query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Maps a L{CODE} constant to a HTTP code. | def _mapErrorCodeToStatus(code):
if code == 103:
return http.NOT_FOUND
return http.INTERNAL_SERVER_ERROR | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_code():\n return jsonify({\"status\": \"0\", \"code\": code_status})",
"def setResponseCode(code, message=None):",
"def send_code(self, code: str) -> Dict:\n raise NotImplementedError",
"async def with_code_header():\n return jsonify(language=request.headers.get(\"Lang\")), 203, {\"X\": 233}",
"def set_code(self, code):\n self.set_payload(code)",
"def _get_request_code(self, data) -> int:\n return int(self._request_code)",
"def code(self, code: int):\n\n self._code = code",
"def http_return_code(res_data) -> (int, str):\n\n start = re.search(\"[0-9]{3}\", res_data).start()\n end_of_line = res_data.find(\"\\r\\n\")\n code = int(res_data[start:start+3])\n if end_of_line == -1:\n end_of_line = len(res_data)\n meaning = res_data[start+4:end_of_line]\n return code, meaning",
"def reply_with_code(self, code: int) -> None:",
"def code(self, value: str) -> None:\n self._code = value",
"def assign_message_code(success: bool):\n return (HTTPStatus.OK.phrase, HTTPStatus.OK) if success\\\n else (HTTPStatus.INTERNAL_SERVER_ERROR.phrase, HTTPStatus.INTERNAL_SERVER_ERROR)",
"def code(self, code):\n if code is None:\n raise ValueError(\"Invalid value for `code`, must not be `None`\")\n\n self._code = code",
"def ircode(self, code):\n if code.lower() in self.codes:\n self._sendCommand('IRCODE ' + self.codes[code.lower()])\n else:\n print 'No such code: %s' % code",
"def code(self, code: str):\n\n self._code = code",
"def code(self, code: \"str\"):\n if code is None:\n raise ValueError(\"Invalid value for `code`, must not be `None`\")\n self._attrs[\"code\"] = code",
"def view_status_code(codes):\n\n if \",\" not in codes:\n try:\n code = int(codes)\n except ValueError:\n return Response(\"Invalid status code\", status=400)\n return status_code(code)\n\n choices = []\n for choice in codes.split(\",\"):\n if \":\" not in choice:\n code = choice\n weight = 1\n else:\n code, weight = choice.split(\":\")\n\n try:\n choices.append((int(code), float(weight)))\n except ValueError:\n return Response(\"Invalid status code\", status=400)\n\n code = weighted_choice(choices)\n\n return status_code(code)",
"def get_code(self):\n return self.code",
"def get_code(self):\n return self.code",
"def get_code(self):\n return self.code",
"def get_code(self):\n return self.code",
"def set_status( code ):",
"def code(self, code):\n\n self._code = code",
"def code_to_name(code):\n upper_code = code.upper()\n if upper_code in code_dict:\n return code_dict[upper_code]\n else:\n return code",
"def update_code(self):\n print ('update code')\n self.query_dict.update({'code':code.value})",
"def decode(self, code):\n raise NotImplementedError",
"def _putCode(self, code):\n assert(type(code) == int)\n self.code[self.codeptr] = code\n self.codeptr += 1",
"def code(self):\n\t\treturn self.status_code",
"def get_status_code(status):\n return dict(const.STATUS_CODES).get(status)",
"def response(code):\n\n def decorator(func):\n func.wsgi_code = code\n return func\n return decorator",
"def map_marital_status(code):\n status = MaritalStatus\n mapping = {\n \"MARRIED\": status.Married.value,\n \"SINGLE\": status.Unmarried.value,\n \"WIDOWED\": status.Widowed.value,\n \"SEPARATED\": status.LegallySeparated.value,\n \"DIVORCED\": status.Divorced.value,\n \"UNKNOWN\": status.Unknown.value,\n }\n if code in mapping.keys():\n return mapping[code]\n elif utils.is_empty(code):\n return status.Unknown.value\n else:\n logging.warning(\"In {}, args {} not recognised\".format(\"marital_status\", code))\n return status.Unknown.value"
] | [
"0.6723069",
"0.6591856",
"0.65714204",
"0.6495424",
"0.6402399",
"0.63924485",
"0.6334081",
"0.6325058",
"0.6201443",
"0.6191107",
"0.616254",
"0.616247",
"0.6142737",
"0.61202556",
"0.6110832",
"0.6099238",
"0.6080944",
"0.6080944",
"0.6080944",
"0.6080944",
"0.60740554",
"0.59960765",
"0.5989822",
"0.59863806",
"0.5973325",
"0.5954982",
"0.5953482",
"0.5953162",
"0.5900504",
"0.5864886"
] | 0.77379066 | 0 |
Verify PayPal IPN data. | def verify(self, request):
paypalURL = 'https://www.sandbox.paypal.com/cgi-bin/webscr'
if not self.SANDBOX:
paypalURL = 'https://www.paypal.com/cgi-bin/webscr'
def _cb(response):
if response == 'INVALID':
raise PaypalError(
'IPN data invalid. data: %s', (data,))
elif response == 'VERIFIED':
return True
else:
raise PaypalError('Unrecognized verification response: %s', (response,))
data = request.content.read()
params = '?cmd=_notify-validate&' + data
d = getPage(paypalURL+params, method='POST')
d.addCallback(_cb)
return d | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verify_ipn(data):\n data = dict(data)\n data['cmd'] = '_notify-validate'\n resp = requests.post(app.config['PAYPAL']['endpoint'], data=data)\n if resp.text == 'VERIFIED':\n return True\n return False",
"def validate_with_paypal(request, validate_type):\n if validate_type == 'PDT':\n # we are on return url\n # need to verify if payment is completed\n # MERCHANT_TXN_KEY is your PDT identity token\n params = {\n 'cmd': '_notify-synch',\n 'tx': request.GET.get('tx', ''),\n 'at': settings.MERCHANT_TXN_KEY\n }\n data = urllib.urlencode(params)\n\n # Sample response:\n # SUCCESS\n # first_name=Jane+Doe\n # last_name=Smith\n # payment_status=Completed payer_email=janedoesmith%40hotmail.com\n # payment_gross=3.99\n # mc_currency=USD custom=For+the+purchase+of+the+rare+book+Green+Eggs+%26+Ham\n\n # If the response is FAIL, PayPal recommends making sure that:\n # The Transaction token is not bad.\n # The ID token is not bad.\n # The tokens have not expired.\n\n else: # IPN\n data = 'cmd=_notify-validate&%s' % request.POST.urlencode()\n\n # The response is one single-word: VERIFIED or INVALID\n\n headers = {\"Content-type\": \"application/x-www-form-urlencoded\",\n 'encoding': 'utf-8',\n \"Accept\": \"text/plain\"}\n request = urllib2.Request(settings.PAYPAL_POST_URL,\n data,\n headers)\n response = urllib2.urlopen(request)\n data = response.read()\n\n if validate_type == 'PDT':\n return parse_pdt_validation(data)\n else:\n return data.strip('\\n').lower() == 'verified', None",
"def ipn(request, item_check_callable=None):\n flag = None\n ins_obj = None\n form = ClickBankINSForm(request.POST)\n if form.is_valid():\n try:\n ins_obj = form.save(commit=False)\n except Exception, e:\n flag = \"Exception while processing. (%s)\" % e\n else:\n flag = \"Invalid form. (%s)\" % form.errors\n\n if ins_obj is None:\n ins_obj = ClickBankINS()\n\n ins_obj.initialize(request)\n\n if flag is not None:\n ins_obj.set_flag(flag)\n else:\n if not ins_obj.verify_hash():\n return HttpResponse(\"INVALID HASH\")\n\n ins_obj.save()\n return HttpResponse(\"OKAY\")",
"def nexmo_verify(request):\n number = request.validated[\"querystring\"][\"number\"]\n\n sender_id = nexmo_conf(request, \"sender_id\")\n params = {\n \"api_key\": nexmo_conf(request, \"api_key\"),\n \"api_secret\": nexmo_conf(request, \"api_secret\"),\n \"sender_id\": sender_id,\n \"code_length\": nexmo_conf(request, \"code_length\"),\n \"pin_expiry\": nexmo_conf(request, \"state_ttl_seconds\"),\n \"number\": number,\n \"brand\": nexmo_conf(request, \"brand\"),\n }\n\n verify_url = \"{}/verify/json\".format(\n nexmo_conf(request, \"api_endpoint\").rstrip(\"/\")\n )\n\n try:\n resp = requests.get(verify_url, params=params)\n except requests.exceptions.ConnectionError:\n logger.exception(\n \"A connection error occured when starting the nexmo auth process\"\n )\n error_msg = \"The Nexmo API is not ready, please retry later.\"\n return http_error(\n httpexceptions.HTTPServiceUnavailable(),\n errno=ERRORS.BACKEND,\n message=error_msg,\n )\n\n try:\n resp.raise_for_status()\n except requests.exceptions.HTTPError:\n logger.exception(\"An error occured when starting the auth process\")\n error_msg = \"The Nexmo API is not ready, please retry later.\"\n return http_error(\n httpexceptions.HTTPServiceUnavailable(),\n errno=ERRORS.BACKEND,\n message=error_msg,\n )\n\n data = resp.json()\n\n if data[\"status\"] == \"10\":\n description = (\n f\"An authentication request is already in progress for this number. \"\n f\"{data['error_text']}\"\n )\n error_details = {\n \"name\": \"number\",\n \"location\": \"querystring\",\n \"description\": description,\n }\n raise_invalid(request, **error_details)\n elif data[\"status\"] != \"0\":\n if data[\"status\"] in [\"6\", \"16\", \"19\"]: # pragma: no cover\n logger.info(\"Nexmo Verify Request failed: {}\".format(data))\n else:\n logger.error(\"Nexmo Verify Request failed: {}\".format(data))\n description = \"Something went wrong when trying to authenticate this number.\"\n error_details = {\n \"name\": \"number\",\n \"location\": \"querystring\",\n \"description\": description,\n }\n raise_invalid(request, **error_details)\n\n state = persist_state(request, {\"request_id\": data[\"request_id\"], \"number\": number})\n\n return {\"state\": state, \"sender_id\": sender_id}",
"def has_paypal(self):\n from django.core.validators import validate_email\n try:\n validate_email(self.paypal_email)\n return True\n except ValidationError:\n return False",
"def is_unverified(self):\n return self.get_status() == self.STATUS_UNVERIFIED",
"def is_unverified(self):\n return self.get_status() == self.STATUS_UNVERIFIED",
"def test_check_nip(client):\n is_assigned, request_id = client.check_nip(\n \"8655104670\", \"41146786026458860703735932\"\n )\n\n assert is_assigned",
"def verify():",
"def verify(self, response):",
"def verify_payload():\n return True",
"def verify():\n if flask.request.method == 'POST':\n req = flask.request.get_json(force=True)\n phone = req.get('phone')\n code = req['code']\n return check_verification(phone, code)",
"def payment_verification(payload):\n response = requests.post(url, data=payload)\n return response.json()",
"def update_paypal(sender, **kwargs):\n ipn_obj = sender\n try:\n payment = json.loads(ipn_obj.custom)\n\n # try to get payment. if not exist, exception will be catched\n p = Payment.objects.filter(id=payment.get('id'), token=payment.get('token')).get()\n\n # update payment\n p.method = constants.PAYPAL\n p.ipn = ipn_obj\n p.save()\n\n # if payment is completed, so valid\n if ipn_obj.payment_status == ST_PP_COMPLETED:\n # check correct price , currency and mail\n if int(ipn_obj.mc_gross) == int(p.price.price) and \\\n ipn_obj.mc_currency == 'EUR' and \\\n ipn_obj.business == settings.PAYPAL_RECEIVER_EMAIL:\n # all is OK, update state\n p.state = True\n p.save()\n sendmail_payment_success(p)\n else:\n # TODO: send alert / mail\n return\n except Payment.DoesNotExist:\n # TODO: send alert / mail\n pass\n except:\n # TODO: send alert / mail\n pass",
"def verify_receipt(receipt_data, user=None):\n #data = json.dumps({'receipt-data': '{' + receipt_data + '}'})\n data = '{{\\n \"receipt-data\" : \"{}\" \\n}}'.format(receipt_data)\n\n def verify(url):\n tries = 3\n for try_ in range(1, tries + 1):\n try:\n req = urllib2.Request(url, data)\n resp = urllib2.urlopen(req, timeout=18) # app timeout is supposed to be 60\n return json.loads(resp.read())\n except (urllib2.URLError, socket_error) as e:\n if try_ == tries:\n raise e\n\n cleaned_data = verify(settings.IAP_VERIFICATION_URL)\n\n # See: http://developer.apple.com/library/ios/#technotes/tn2259/_index.html\n if cleaned_data['status'] == 21007:\n cleaned_data = verify(settings.IAP_VERIFICATION_SANDBOX_URL)\n\n if cleaned_data['status'] != 0:\n extra = {'status': cleaned_data['status']}\n if user is not None and user.is_authenticated():\n extra['username'] = user.username\n extra['response_from_apple'] = json.dumps(cleaned_data)\n client.captureMessage('IAP receipt validation failed', extra=extra)\n raise ValidationError(\"Your purchase went through, but there was an error processing it. Please contact support: [email protected]\")\n\n return cleaned_data['receipt']",
"def test_successful_verification(self):\n for i in (-2, -1, 0, 1, 2):\n\n description = \"TOTP not verified for `i={0}`\".format(i)\n calculated = self.algorithm.calculate(self.device.secret, drift=i)\n confirmed = self.relate.verify(calculated, save=False)\n\n self.assertTrue(confirmed, description)\n\n self.relate.confirm = False",
"def __verify(self):\r\n code = self.request.get('code')\r\n email = None\r\n error = False\r\n # resend if code is not given or in case of some error\r\n if code is not None and code != '':\r\n email = User.verify(code, self.request.remote_addr)\r\n if email is None:\r\n error = True\r\n\r\n if email is None:\r\n template_values = {\r\n 'user_email': self.user_email,\r\n 'error': error\r\n }\r\n template = self.jinja2_env.get_template('verification.html')\r\n self.response.out.write(template.render(template_values))\r\n\r\n # message\r\n template_values = {\r\n 'user_email': self.user_email,\r\n 'message': self.gettext('THANK_YOU')\r\n }\r\n template = self.jinja2_env.get_template('staticmessage.html')\r\n self.response.out.write(template.render(template_values))",
"def verify_payment(self, order_id, ref_id):\n try:\n client = Client(self.service_address)\n res = client.service.bpVerifyRequest(terminalId=self.terminalId,\n userName=self.userName,\n userPassword=self.userPassword,\n orderId=order_id,\n saleOrderId=order_id,\n saleReferenceId=ref_id)\n return True, res\n except WebFault as f:\n return False, f.fault.faultstring\n except Exception as e:\n return False, e.message",
"def paypal_notification(request, payment_mode='paypal'):\r\n try:\r\n data = request.POST\r\n _log.debug(\"PayPal IPN data: %s\", repr(data))\r\n\r\n if not paypal.verify_ipn_request(request):\r\n return HttpResponse()\r\n\r\n if data.get('payment_status', None) != \"Completed\":\r\n # Do not insert payments whose status is not \"Completed\".\r\n _log.debug(\"Ignored IPN data for incomplete payment.\")\r\n return HttpResponse()\r\n\r\n currency = data.get('mc_currency', settings.CURRENCY_DEFAULT)\r\n if currency.upper() not in settings.CURRENCIES_SUPPORTED:\r\n # We do not support anything other than USD.\r\n _log.debug(\"Ignored IPN data for unsupported currency %s\", currency)\r\n return HttpResponse()\r\n\r\n pending_contribution_id, username = data['custom'].split('~') # pending_contrib_id~buyer's_username\r\n is_anon = username == 'anonymous'\r\n transaction_id = data['txn_id']\r\n qty = data['quantity']\r\n artist_email = data['receiver_email']\r\n campaign_id = data['item_number']\r\n amount = data['mc_gross']\r\n is_test = data.get('test_ipn', 0) == 1\r\n\r\n contribs = Contribution.objects.filter(transaction_id=transaction_id, payment_mode=payment_mode).count()\r\n if not contribs:\r\n # This transaction hasn't already been processed.\r\n # Process it and update the ``memo`` field if it has been provided by the buyer.\r\n if is_anon:\r\n _log.debug(\"Processing anonymous contribution\")\r\n contributor = User.objects.get(username='anonymous')\r\n campaign = Campaign.objects.get(pk=campaign_id)\r\n contrib = campaign.contribution_set.create(\r\n contributor=contributor,\r\n amount=amount,\r\n qty=qty,\r\n payment_mode=payment_mode,\r\n transaction_id=transaction_id,\r\n memo=data.get('memo', '')\r\n )\r\n _log.info(\"PayPal (tx: %s) anonymous contribution recorded: %s\", transaction_id, contrib)\r\n else:\r\n pending_contrib = PendingContribution.objects.get(pk=pending_contribution_id,\r\n contributor__username=username,\r\n campaign=campaign_id,\r\n amount=amount,\r\n qty=qty,\r\n payment_mode=payment_mode)\r\n if pending_contrib:\r\n contrib = pending_contrib.process_payment_notification(transaction_id, data.get('memo', ''))\r\n _log.info(\"PayPal transaction %s resolved. Contribution recorded: %s\", transaction_id, contrib)\r\n else:\r\n _log.error(\"PayPal transaction %s could not be resolved.\", transaction_id)\r\n except:\r\n _log.exception(''.join(format_exception(*exc_info())))\r\n return HttpResponse()",
"def verify_email(uid, token):\n return True",
"def test_verify_email(live_server):\n user = get_user_model().objects.create_user(username=\"test\")\n email = models.EmailAddress.objects.create(\n address=\"[email protected]\", user=user\n )\n verification = models.EmailVerification.objects.create(email=email)\n\n data = {\"token\": verification.token}\n url = f\"{live_server}/rest/email-verifications/\"\n response = requests.post(url, data)\n\n assert response.status_code == 201\n assert response.json() == {}",
"def verified(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"verified\")",
"def verify(self, timeout=15):\n processed_host = (self.host.replace('sftp://', '')\n .replace('ftp://', '')\n #.replace('www.', '')\n .replace('https://', '')\n .replace('http://', '')\n .strip())\n protocol = self.protocol\n if protocol in ('ftp', 'ftps'):\n f = self._verify_ftp\n elif protocol == 'sftp':\n f = self._verify_sftp\n else:\n f = self._verify_spurious\n\n self.verified, self.verification_message = f(processed_host, timeout)\n self.last_verified = timezone.now()\n self.save(update_fields=['verified', 'verification_message',\n 'last_verified'])",
"def verify_postcode_api(self):\n\n assert type(self.postcodes) == str, \"To use this method, the postcode cannot be an iterable.\"\n request_path = requests.get(self.path + self.postcodes, verify=False)\n response_code = str(request_path)\n\n if response_code == '<Response [200]>':\n verification_status = 'Verified'\n elif response_code == '<Response [404]>':\n verification_status = 'Invalid Postcode'\n elif response_code == '<Response [400]':\n verification_status = 'No Postcode Submitted'\n elif response_code == '<Response [500]':\n verification_status = 'Server error'\n else:\n verification_status = 'Invalid Postcode'\n return verification_status",
"def verify_token(vial_http: urllib3.connectionpool.ConnectionPool) -> bool:\n verify_resp = vial_http.request(\"GET\", \"/api/verifyToken\")\n return verify_resp.status == 200",
"def do_verify(self, args):\n\n pn = \\\n self._get_choice_(\"pn\", self.promissory_notes, \"Which promissory note needs to be verified?\")\n\n try:\n verify_promissory_note(pn)\n except Exception as e:\n self._print_exception_(e)\n return\n\n print(\"Promissory note is correct.\\n\")",
"def verify(self):\n data = [\"rfc\", \"tel\", \"email\", \"name\", \"use\"]\n state = False\n for item in data:\n if getattr(self, item + \"Input\").text() != \"\":\n state = True\n else:\n return False\n return state",
"def verify(self):\n if self.geturl():\n return True\n return False",
"def send_verification(self):\n pass",
"def test_process_postpay_not_accepted(self):\r\n student1 = UserFactory()\r\n student1.save()\r\n\r\n order1 = Order.get_cart_for_user(student1)\r\n params = {\r\n 'card_accountNumber': '1234',\r\n 'card_cardType': '001',\r\n 'billTo_firstName': student1.first_name,\r\n 'orderNumber': str(order1.id),\r\n 'orderCurrency': 'usd',\r\n 'decision': 'REJECT',\r\n 'ccAuthReply_amount': '0.00',\r\n 'reasonCode': '207'\r\n }\r\n result = process_postpay_callback(params)\r\n self.assertFalse(result['success'])\r\n self.assertEqual(result['order'], order1)\r\n self.assertEqual(order1.status, 'cart')\r\n self.assertIn(REASONCODE_MAP['207'], result['error_html'])"
] | [
"0.8481473",
"0.6472825",
"0.5787242",
"0.5769118",
"0.5757081",
"0.5683456",
"0.5683456",
"0.5624004",
"0.5467733",
"0.54500043",
"0.544273",
"0.5417485",
"0.54154587",
"0.5396169",
"0.5375768",
"0.53615075",
"0.5322293",
"0.53124595",
"0.52979654",
"0.5271814",
"0.5205165",
"0.5192838",
"0.5158461",
"0.51520795",
"0.514784",
"0.51356816",
"0.5104729",
"0.51040757",
"0.5098069",
"0.5095029"
] | 0.75808173 | 1 |
Retrieve a list of recent donations. | def recent(self, limit):
def _cb(players, donations):
donators = []
for donation in donations:
player = players[donation.donator.steamID].copy()
player['date'] = donation.date.asPOSIXTimestamp()
player['amount'] = str(donation.amount)
donators.append(player)
return donators
donations = []
steamids = set()
for donation in self.store.query(Donation,
AND(Donation.donator == Donator.storeID,
Donator.anonymous == False,
Donator.steamID != None),
limit=limit,
sort=Donation.date.descending):
steamids.add(donation.donator.steamID)
donations.append(donation)
d = self.getPlayerSummaries(steamids)
d.addCallback(_cb, donations)
return d | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_list_of_donations():\n try:\n logger.info('opening get_list_of_donations database call')\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n query_results = (Donations.select(Donations.id, Donations.donation_date,\n Donations.donation_amount, Donations.donated_by_id.alias('fullname')))\n return query_results\n except Exception as e:\n logger.info(f'Error getting list of donors')\n logger.info(e)\n\n finally:\n logger.info('closing get_list_of_donations database call')\n database.close()",
"def donations(self):\n return self.caller.player.Dominion.assets.donations.all().order_by(\"amount\")",
"async def api_get_donations(g: WalletTypeInfo = Depends(get_key_type)):\n user = await get_user(g.wallet.user)\n wallet_ids = user.wallet_ids if user else []\n donations = []\n for wallet_id in wallet_ids:\n new_donations = await get_donations(wallet_id)\n donations += new_donations if new_donations else []\n return [donation.dict() for donation in donations] if donations else []",
"def get(self, request):\n concerts = Concert.objects.order_by('date_time')\n if concerts:\n last_updated = concerts[0].date_scraped\n # TODO handle empty DB\n else:\n last_updated = datetime(1900,1,1)\n context = {\n 'concert_list': concerts,\n 'last_updated': last_updated,\n }\n return render(request, 'concerts/concert_list.html', context)",
"def GetAllDateOfPaymentOfCost():\n\n logs.logger.debug(\n \"Start to get back all payment date of Cost objects from database.\")\n try:\n searchedCostsItems = session.query(Cost.Cost).all()\n logs.logger.info(\n \"Get back all payment date of Cost objects from database.\")\n return [CostItems.dateOfPayment for CostItems in searchedCostsItems]\n except Exception as e:\n logs.logger.error(e, exc_info=True)",
"def get_list_of_donors():\n try:\n logger.info('opening get_list_of_donors database call')\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n return Donors.select()\n\n except Exception as e:\n logger.info(e)\n\n finally:\n database.close()",
"def list_donations(self, caller):\n msg = \"{wDonations:{n\\n\"\n table = PrettyTable([\"{wGroup{n\", \"{wTotal{n\"])\n for donation in self.donations:\n table.add_row([str(donation.receiver), donation.amount])\n msg += str(table)\n caller.msg(msg)",
"def GetAllDifferentDateOfPaymentOfCost():\n\n logs.logger.debug(\n \"Start to get back all different payment date of \"\n \"Cost objects from database.\")\n try:\n ListOfAllDifferentDateOfPaymentOfCost = []\n searchedCostsItems = GetAllDateOfPaymentOfCost()\n for item in searchedCostsItems:\n if item not in ListOfAllDifferentDateOfPaymentOfCost:\n ListOfAllDifferentDateOfPaymentOfCost.append(item)\n logs.logger.info(\n \"Get back all different payment date of \"\n \"Cost objects from database.\")\n return ListOfAllDifferentDateOfPaymentOfCost\n except Exception as e:\n logs.logger.error(e, exc_info=True)",
"def getDonates(id):\n contributor = db.find_one({'_id': ObjectId(id)})\n print(contributor)\n return jsonify({\n '_id': str(ObjectId(contributor['_id'])),\n 'name': contributor['name'],\n 'amount': contributor['amount']\n })",
"def get_max_donation_date_list():\n try:\n logger.info('opening get_max_donation_date_list database call')\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n\n query_max_date = (Donations\n .select(Donations.donated_by_id.alias('fullname'),\n fn.MAX(Donations.donation_date).alias(\n 'last_donation_date'),\n Donations.donation_amount.alias('last_donation'))\n .group_by(Donations.donated_by_id)\n )\n return query_max_date\n\n except Exception as e:\n logger.info(e)\n\n finally:\n database.close()\n logger.info('closing get_max_donation_date_list database call')",
"def GetAllRegistrationDateOfCost():\n\n logs.logger.debug(\"Start to get back all registration date of\\\n Cost objects from database.\")\n try:\n searchedCostsItems = session.query(Cost.Cost).all()\n logs.logger.info(\n \"Get back all registration date of Cost objects from database.\")\n return [CostItems.registrationDate for CostItems in searchedCostsItems]\n except Exception as e:\n logs.logger.error(e, exc_info=True)",
"def fetch_review(self):\n c = self.db.cursor()\n c.execute(\"\"\"SELECT * FROM cards\n WHERE date_last_reviewed < (DATETIME('now', 'localtime', '-8 hours'))\n OR correct = 0\"\"\")\n rows = c.fetchall()\n cards = [\n Card(\n id=id,\n card_type=card_type,\n text=text,\n created=created,\n uri=uri,\n updated=updated,\n difficulty=difficulty,\n days_between=days_between,\n date_last_reviewed=date_last_reviewed,\n correct=correct,\n )\n for id, card_type, text, uri, created, updated, difficulty, days_between, date_last_reviewed, correct in rows\n ]\n cards = filter(lambda card: card.percent_overdue >= 1, cards)\n cards = sorted(cards, key=lambda card: card.percent_overdue)\n\n return cards[:20]",
"def print_donor_list():\n print('Below are the existing donors: ')\n for donor in donors_data:\n print('\\t- ', donor[\"name\"], ' ', donor[\"donations\"])",
"def GetAllDifferentRegistrationDateOfCost():\n\n logs.logger.debug(\n \"Start to get back all different registration date of \"\n \"Cost objects from database.\")\n try:\n ListOfAllDifferentRegistrationDateOfCost = []\n searchedCostsItems = GetAllRegistrationDateOfCost()\n for item in searchedCostsItems:\n if item not in ListOfAllDifferentRegistrationDateOfCost:\n ListOfAllDifferentRegistrationDateOfCost.append(item)\n logs.logger.info(\n \"Get back all different registration date of \"\n \"Cost objects from database.\")\n return ListOfAllDifferentRegistrationDateOfCost\n except Exception as e:\n logs.logger.error(e, exc_info=True)",
"def getPurchaseDates(self):\n\t\treturn self.dateList",
"def last_donation(self):\n return self._donations[-1]",
"def print_all_donor_donations():\n print(\"\\nList of Donors and Donations\")\n print(\"\\nDonor Name - Donation Date - Donation Amount:\")\n print(\"-\"*40)\n for donation in donor_donations_list:\n print(f'{donation.fullname} - {donation.donation_date} - ${donation.donation_amount:,.2f}')\n print()",
"def all_donors(self):\n return [item for item in self.r.keys()]",
"def recent(self):\n return self.filter(\n start_date__lte=self.current().end_date + timezone.timedelta(days=1),\n end_date__gte=self.current().start_date - timezone.timedelta(days=1),\n )",
"def last_contribution_date(self):\n from kitsune.customercare.models import Reply\n from kitsune.questions.models import Answer\n from kitsune.wiki.models import Revision\n\n dates = []\n\n # Latest Army of Awesome reply:\n try:\n aoa_reply = Reply.objects.filter(\n user=self.user).latest('created')\n dates.append(aoa_reply.created)\n except Reply.DoesNotExist:\n pass\n\n # Latest Support Forum answer:\n try:\n answer = Answer.objects.filter(\n creator=self.user).latest('created')\n dates.append(answer.created)\n except Answer.DoesNotExist:\n pass\n\n # Latest KB Revision edited:\n try:\n revision = Revision.objects.filter(\n creator=self.user).latest('created')\n dates.append(revision.created)\n except Revision.DoesNotExist:\n pass\n\n # Latest KB Revision reviewed:\n try:\n revision = Revision.objects.filter(\n reviewer=self.user).latest('reviewed')\n # Old revisions don't have the reviewed date.\n dates.append(revision.reviewed or revision.created)\n except Revision.DoesNotExist:\n pass\n\n if len(dates) == 0:\n return None\n\n return max(dates)",
"def list_donors(self):\n return [donor.name for donor in self.donors]",
"def get(self, request):\n\n matched_concert_ids = list(ConcertMatch.objects.values_list('concert', flat=True))\n matches = Concert.objects.filter(id__in=matched_concert_ids).order_by('date_time')\n if Concert.objects.count():\n last_updated = Concert.objects.latest('date_scraped').date_scraped\n # TODO handle empty DB\n else:\n last_updated = datetime(1900,1,1)\n context = {\n 'matches': matches,\n 'last_updated': last_updated,\n }\n\n return render(request, 'concerts/upcoming_concerts.html', context)",
"def select(self):\n last_results = self.database.query('''SELECT *\n FROM History\n ORDER BY request_date DESC\n LIMIT 10''')\n return last_results",
"def get(self):\n try:\n tasks = tasks_overdue(get_db())\n return list(map(task_to_dict, tasks))\n except ValueError:\n api.abort(422, \"Invalid Date\")",
"def add_donations():\n done = False\n while not done:\n name = input(\"Enter donor name (or \\\"list\\\" for list): \")\n if name == \"list\":\n # list donor names\n for d in donor_history: print(d.name)\n continue\n for thisdonor in donor_history:\n if name == thisdonor.name:\n break\n if thisdonor == None:\n thisdonor = donor(name)\n donor_history.append(thisdonor)\n print(\"Adding new donor: \" + name)\n moredonations = True\n while moredonations:\n value = input(\"Enter donation amount or -1 when finished: \")\n try:\n donation_amount = int(value)\n except ValueError:\n print(\"Invalid input, reenter.\")\n continue\n if donation_amount == -1: break\n thisdonor.donations.append(donation_amount)\n done = True\n if thisdonor: print(f\"Thank you, {name}, for your donation(s)!\")\n print()\n return",
"def getCitationsData():\n # Follows https://github.com/simonw/irma-scrapers/issues/1\n citationsResponse = requests.get(\"https://api.github.com/repos/greenelab/covid19-review/git/trees/output\", headers=headers).json()\n treeEntry = [t for t in citationsResponse[\"tree\"] if t[\"path\"] == \"references.json\"][0] \n citations = json.loads(base64.b64decode(requests.get(treeEntry[\"url\"]).json()[\"content\"]))\n\n citationsDF = pd.DataFrame(citations)\n citationsDF[\"Covid19-review_paperLink\"] = citationsDF.id.apply(lambda x: \"https://greenelab.github.io/covid19-review/#ref-\" + x)\n citationsDF = citationsDF[[\"DOI\", \"title\", \"issued\", \"container-title\", \"URL\", \"Covid19-review_paperLink\"]]\n citationsDF.rename(columns={\"DOI\": \"doi\", \"issued\": \"date\", \"container-title\": \"publication\"}, inplace=True)\n\n # Convert date to string\n def dateStringFromDateParts(row):\n try:\n dateParts = row['date']['date-parts'][0]\n if len(dateParts) == 3:\n return \"-\".join([str(dateParts[1]), str(dateParts[2]), str(dateParts[0])])\n elif len(dateParts) == 2:\n return \"-\".join([str(dateParts[1]), str(dateParts[0])])\n elif len(dateParts) == 1:\n return str(dateParts[0])\n else:\n return\n except:\n return\n\n citationsDF.date = citationsDF.apply(dateStringFromDateParts, axis=1)\n\n citationsDF.set_index(\"doi\", inplace=True)\n return citationsDF",
"def get_queryset(self):\n return Person.objects.filter(expiry_date__gt=timezone.now())",
"def populate_donations():\n logger.info('Starting Donations table population')\n\n DONATION_DATE = 0\n DONATION_AMOUNT = 1\n DONATED_BY = 2\n\n d = datetime.today() - timedelta(days=random.randint(1, 301))\n\n try:\n database.connect()\n database.execute_sql('PRAGMA foreign_keys = ON;')\n\n for donor in Donors:\n # Randomly generated number of donations\n #donation_times = random.randint(1, 10)\n for donation in range(random.randint(1, 10)):\n with database.transaction():\n # random date in last year\n # random donation amount converted to decimal\n # pulling donor fullname as id\n new_donation = Donations.create(\n donation_date=datetime.today() - timedelta(days=random.randint(1, 301)),\n donation_amount=decimal.Decimal(\n random.randrange(1, 9999999))/100,\n donated_by=donor.fullname,\n )\n new_donation.save()\n logger.info('Database add successful')\n\n logger.info('Print the Donors records we saved...')\n for don in Donations:\n logger.info(f'donation: {don.id} : {don.donation_date} : {don.donation_amount} : '\n + f' donor_id: {don.donated_by} has been added to the Donations table ')\n except Exception as e:\n logger.info(f'Error creating = {donation[DONATION_DATE]} {donation[DONATION_AMOUNT]}'\n + f'{donation[DONATED_BY]}')\n logger.info(e)\n logger.info('See how the database protects our data')\n finally:\n logger.info('closing database')\n database.close()",
"def __get_unique_due_date_list(self) -> List[str]:\n return self.tasks.get_due_date_list()",
"def all_donors_all_donation(self):\n for name in self.all_donors:\n person = self.r.hgetall(name)\n print(f\"Person: {name}\")\n for key, value in person.items():\n print(f\"{key}: {value}\")"
] | [
"0.65837014",
"0.65390664",
"0.64193267",
"0.5857776",
"0.5843226",
"0.56542754",
"0.565169",
"0.56222814",
"0.56117177",
"0.55442196",
"0.55310655",
"0.5512683",
"0.5447088",
"0.54198706",
"0.53970146",
"0.5342934",
"0.5318437",
"0.5283673",
"0.52812785",
"0.52525187",
"0.52378017",
"0.51988626",
"0.5181239",
"0.5178459",
"0.5177728",
"0.51682246",
"0.5164721",
"0.516058",
"0.5152485",
"0.50993836"
] | 0.668418 | 0 |
Checks that certain pipeline files are not modified from template output. Iterates through the pipeline's directory content and compares specified files against output from the template using the pipeline's metadata. File content should not be modified / missing. | def files_unchanged(self):
passed = []
failed = []
ignored = []
fixed = []
could_fix = False
# Check that we have the minimum required config
required_pipeline_config = {"manifest.name", "manifest.description", "manifest.author"}
missing_pipeline_config = required_pipeline_config.difference(self.nf_config)
if missing_pipeline_config:
return {"ignored": [f"Required pipeline config not found - {missing_pipeline_config}"]}
try:
prefix, short_name = self.nf_config["manifest.name"].strip("\"'").split("/")
except ValueError:
log.warning(
"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is <pipeline> and default to repo 'nf-core'"
)
short_name = self.nf_config["manifest.name"].strip("\"'")
prefix = "nf-core"
# NB: Should all be files, not directories
# List of lists. Passes if any of the files in the sublist are found.
files_exact = [
[".gitattributes"],
[".prettierrc.yml"],
["CODE_OF_CONDUCT.md"],
["LICENSE", "LICENSE.md", "LICENCE", "LICENCE.md"], # NB: British / American spelling
[os.path.join(".github", ".dockstore.yml")],
[os.path.join(".github", "CONTRIBUTING.md")],
[os.path.join(".github", "ISSUE_TEMPLATE", "bug_report.yml")],
[os.path.join(".github", "ISSUE_TEMPLATE", "config.yml")],
[os.path.join(".github", "ISSUE_TEMPLATE", "feature_request.yml")],
[os.path.join(".github", "PULL_REQUEST_TEMPLATE.md")],
[os.path.join(".github", "workflows", "branch.yml")],
[os.path.join(".github", "workflows", "linting_comment.yml")],
[os.path.join(".github", "workflows", "linting.yml")],
[os.path.join("assets", "email_template.html")],
[os.path.join("assets", "email_template.txt")],
[os.path.join("assets", "sendmail_template.txt")],
[os.path.join("assets", f"nf-core-{short_name}_logo_light.png")],
[os.path.join("docs", "images", f"nf-core-{short_name}_logo_light.png")],
[os.path.join("docs", "images", f"nf-core-{short_name}_logo_dark.png")],
[os.path.join("docs", "README.md")],
[os.path.join("lib", "nfcore_external_java_deps.jar")],
[os.path.join("lib", "NfcoreTemplate.groovy")],
]
files_partial = [
[".gitignore", ".prettierignore", "pyproject.toml"],
]
# Only show error messages from pipeline creation
logging.getLogger("nf_core.create").setLevel(logging.ERROR)
# Generate a new pipeline with nf-core create that we can compare to
tmp_dir = tempfile.mkdtemp()
# Create a template.yaml file for the pipeline creation
template_yaml = {
"name": short_name,
"description": self.nf_config["manifest.description"].strip("\"'"),
"author": self.nf_config["manifest.author"].strip("\"'"),
"prefix": prefix,
}
template_yaml_path = os.path.join(tmp_dir, "template.yaml")
with open(template_yaml_path, "w") as fh:
yaml.dump(template_yaml, fh, default_flow_style=False)
test_pipeline_dir = os.path.join(tmp_dir, f"{prefix}-{short_name}")
create_obj = nf_core.create.PipelineCreate(
None, None, None, no_git=True, outdir=test_pipeline_dir, template_yaml_path=template_yaml_path
)
create_obj.init_pipeline()
# Helper functions for file paths
def _pf(file_path):
"""Helper function - get file path for pipeline file"""
return os.path.join(self.wf_path, file_path)
def _tf(file_path):
"""Helper function - get file path for template file"""
return os.path.join(test_pipeline_dir, file_path)
# Files that must be completely unchanged from template
for files in files_exact:
# Ignore if file specified in linting config
ignore_files = self.lint_config.get("files_unchanged", [])
if any([f in ignore_files for f in files]):
ignored.append(f"File ignored due to lint config: {self._wrap_quotes(files)}")
# Ignore if we can't find the file
elif not any([os.path.isfile(_pf(f)) for f in files]):
ignored.append(f"File does not exist: {self._wrap_quotes(files)}")
# Check that the file has an identical match
else:
for f in files:
try:
if filecmp.cmp(_pf(f), _tf(f), shallow=True):
passed.append(f"`{f}` matches the template")
else:
if "files_unchanged" in self.fix:
# Try to fix the problem by overwriting the pipeline file
shutil.copy(_tf(f), _pf(f))
passed.append(f"`{f}` matches the template")
fixed.append(f"`{f}` overwritten with template file")
else:
failed.append(f"`{f}` does not match the template")
could_fix = True
except FileNotFoundError:
pass
# Files that can be added to, but that must contain the template contents
for files in files_partial:
# Ignore if file specified in linting config
ignore_files = self.lint_config.get("files_unchanged", [])
if any([f in ignore_files for f in files]):
ignored.append(f"File ignored due to lint config: {self._wrap_quotes(files)}")
# Ignore if we can't find the file
elif not any([os.path.isfile(_pf(f)) for f in files]):
ignored.append(f"File does not exist: {self._wrap_quotes(files)}")
# Check that the file contains the template file contents
else:
for f in files:
try:
with open(_pf(f), "r") as fh:
pipeline_file = fh.read()
with open(_tf(f), "r") as fh:
template_file = fh.read()
if template_file in pipeline_file:
passed.append(f"`{f}` matches the template")
else:
if "files_unchanged" in self.fix:
# Try to fix the problem by overwriting the pipeline file
with open(_tf(f), "r") as fh:
template_file = fh.read()
with open(_pf(f), "w") as fh:
fh.write(template_file)
passed.append(f"`{f}` matches the template")
fixed.append(f"`{f}` overwritten with template file")
else:
failed.append(f"`{f}` does not match the template")
could_fix = True
except FileNotFoundError:
pass
# cleaning up temporary dir
shutil.rmtree(tmp_dir)
return {"passed": passed, "failed": failed, "ignored": ignored, "fixed": fixed, "could_fix": could_fix} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def files_exist(self):\n\n passed = []\n warned = []\n failed = []\n ignored = []\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n #: test autodoc\n try:\n _, short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n except ValueError:\n log.warning(\"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is '<pipeline>'.\")\n short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n\n files_fail = [\n [\".gitattributes\"],\n [\".gitignore\"],\n [\".nf-core.yml\"],\n [\".editorconfig\"],\n [\".prettierignore\"],\n [\".prettierrc.yml\"],\n [\"CHANGELOG.md\"],\n [\"CITATIONS.md\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"LICENSE\", \"LICENSE.md\", \"LICENCE\", \"LICENCE.md\"], # NB: British / American spelling\n [\"nextflow_schema.json\"],\n [\"nextflow.config\"],\n [\"README.md\"],\n [os.path.join(\".github\", \".dockstore.yml\")],\n [os.path.join(\".github\", \"CONTRIBUTING.md\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"config.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.yml\")],\n [os.path.join(\".github\", \"PULL_REQUEST_TEMPLATE.md\")],\n [os.path.join(\".github\", \"workflows\", \"branch.yml\")],\n [os.path.join(\".github\", \"workflows\", \"ci.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting_comment.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting.yml\")],\n [os.path.join(\"assets\", \"email_template.html\")],\n [os.path.join(\"assets\", \"email_template.txt\")],\n [os.path.join(\"assets\", \"sendmail_template.txt\")],\n [os.path.join(\"assets\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"conf\", \"modules.config\")],\n [os.path.join(\"conf\", \"test.config\")],\n [os.path.join(\"conf\", \"test_full.config\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_dark.png\")],\n [os.path.join(\"docs\", \"output.md\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"docs\", \"usage.md\")],\n [os.path.join(\"lib\", \"nfcore_external_java_deps.jar\")],\n [os.path.join(\"lib\", \"NfcoreTemplate.groovy\")],\n [os.path.join(\"lib\", \"Utils.groovy\")],\n [os.path.join(\"lib\", \"WorkflowMain.groovy\")],\n ]\n\n files_warn = [\n [\"main.nf\"],\n [os.path.join(\"assets\", \"multiqc_config.yml\")],\n [os.path.join(\"conf\", \"base.config\")],\n [os.path.join(\"conf\", \"igenomes.config\")],\n [os.path.join(\".github\", \"workflows\", \"awstest.yml\")],\n [os.path.join(\".github\", \"workflows\", \"awsfulltest.yml\")],\n [os.path.join(\"lib\", f\"Workflow{short_name[0].upper()}{short_name[1:]}.groovy\")],\n [\"modules.json\"],\n [\"pyproject.toml\"],\n ]\n\n # List of strings. Fails / warns if any of the strings exist.\n files_fail_ifexists = [\n \"Singularity\",\n \"parameters.settings.json\",\n \".nf-core.yaml\", # yml not yaml\n os.path.join(\"bin\", \"markdown_to_html.r\"),\n os.path.join(\"conf\", \"aws.config\"),\n os.path.join(\".github\", \"workflows\", \"push_dockerhub.yml\"),\n os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.md\"),\n os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.md\"),\n os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo.png\"),\n \".markdownlint.yml\",\n \".yamllint.yml\",\n os.path.join(\"lib\", \"Checks.groovy\"),\n os.path.join(\"lib\", \"Completion.groovy\"),\n os.path.join(\"lib\", \"Workflow.groovy\"),\n ]\n files_warn_ifexists = [\".travis.yml\"]\n\n # Remove files that should be ignored according to the linting config\n ignore_files = self.lint_config.get(\"files_exist\", [])\n\n def pf(file_path):\n return os.path.join(self.wf_path, file_path)\n\n # First - critical files. Check that this is actually a Nextflow pipeline\n if not os.path.isfile(pf(\"nextflow.config\")) and not os.path.isfile(pf(\"main.nf\")):\n failed.append(\"File not found: nextflow.config or main.nf\")\n raise AssertionError(\"Neither nextflow.config or main.nf found! Is this a Nextflow pipeline?\")\n\n # Files that cause an error if they don't exist\n for files in files_fail:\n if any([f in ignore_files for f in files]):\n continue\n if any([os.path.isfile(pf(f)) for f in files]):\n passed.append(f\"File found: {self._wrap_quotes(files)}\")\n else:\n failed.append(f\"File not found: {self._wrap_quotes(files)}\")\n\n # Files that cause a warning if they don't exist\n for files in files_warn:\n if any([f in ignore_files for f in files]):\n continue\n if any([os.path.isfile(pf(f)) for f in files]):\n passed.append(f\"File found: {self._wrap_quotes(files)}\")\n else:\n warned.append(f\"File not found: {self._wrap_quotes(files)}\")\n\n # Files that cause an error if they exist\n for file in files_fail_ifexists:\n if file in ignore_files:\n continue\n if os.path.isfile(pf(file)):\n failed.append(f\"File must be removed: {self._wrap_quotes(file)}\")\n else:\n passed.append(f\"File not found check: {self._wrap_quotes(file)}\")\n\n # Files that cause a warning if they exist\n for file in files_warn_ifexists:\n if file in ignore_files:\n continue\n if os.path.isfile(pf(file)):\n warned.append(f\"File should be removed: {self._wrap_quotes(file)}\")\n else:\n passed.append(f\"File not found check: {self._wrap_quotes(file)}\")\n\n # Files that are ignoed\n for file in ignore_files:\n ignored.append(f\"File is ignored: {self._wrap_quotes(file)}\")\n\n return {\"passed\": passed, \"warned\": warned, \"failed\": failed, \"ignored\": ignored}",
"def checkAllFilesGenerated(self):\n root = get_exhale_root(self)\n containmentFolder = self.getAbsContainmentFolder()\n for node in root.all_nodes:\n if node.kind in [\"enumvalue\", \"group\"]:\n continue\n gen_file_path = os.path.join(containmentFolder, node.file_name)\n self.assertTrue(\n os.path.isfile(gen_file_path),\n \"File for {kind} node with refid=[{refid}] not generated to [{gen_file_path}]!\".format(\n kind=node.kind, refid=node.refid, gen_file_path=gen_file_path\n )\n )",
"def validate_files(dir, files_to_merge):\r\n for path in files_to_merge:\r\n pathname = dir.joinpath(path)\r\n if not pathname.exists():\r\n raise Exception(\"I18N: Cannot generate because file not found: {0}\".format(pathname))",
"def check_generated_files(out_dir, output_list_file):\n xcpd_dir = os.path.join(out_dir, \"xcp_d\")\n found_files = sorted(glob(os.path.join(xcpd_dir, \"**/*\"), recursive=True))\n found_files = [os.path.relpath(f, out_dir) for f in found_files]\n\n # Ignore figures\n found_files = [f for f in found_files if \"figures\" not in f]\n\n with open(output_list_file, \"r\") as fo:\n expected_files = fo.readlines()\n expected_files = [f.rstrip() for f in expected_files]\n\n if sorted(found_files) != sorted(expected_files):\n expected_not_found = sorted(list(set(expected_files) - set(found_files)))\n found_not_expected = sorted(list(set(found_files) - set(expected_files)))\n\n msg = \"\"\n if expected_not_found:\n msg += \"\\nExpected but not found:\\n\\t\"\n msg += \"\\n\\t\".join(expected_not_found)\n\n if found_not_expected:\n msg += \"\\nFound but not expected:\\n\\t\"\n msg += \"\\n\\t\".join(found_not_expected)\n raise ValueError(msg)",
"def test_yaml_file_watch(self):\n # Set initial data\n _setup_template_value('yaml_file_test_values.tmp.yml', 'yaml_file_test_values_1.yml')\n\n with TemplateRenderThread('yaml_file_test.t', 'yaml_file_test.tmp.out') as renderer:\n self.assertStringEqualToTemplateFileWithIterations(renderer.output_data_getter,\n 'yaml_file_test_values_expected_1.out')\n\n # Set updated data\n print('Updating file..')\n _setup_template_value('yaml_file_test_values.tmp.yml', 'yaml_file_test_values_2.yml')\n self.assertStringEqualToTemplateFileWithIterations(renderer.output_data_getter,\n 'yaml_file_test_values_expected_2.out')",
"def test_duo_yaml_files_watch(self):\n # Set initial data\n _setup_template_value('yaml_file_test_values_first.tmp.yml', 'yaml_file_test_values_1.yml')\n _setup_template_value('yaml_file_test_values_second.tmp.yml', 'yaml_file_test_values_2.yml')\n\n with TemplateRenderThread('yaml_file_test_duo.t', 'yaml_file_test_duo.tmp.out') as renderer:\n self.assertStringEqualToTemplateFileWithIterations(renderer.output_data_getter,\n 'yaml_file_test_duo_expected_1.out')\n\n # Set updated data\n print('Updating first file..')\n _setup_template_value('yaml_file_test_values_first.tmp.yml', 'yaml_file_test_values_2.yml')\n self.assertStringEqualToTemplateFileWithIterations(renderer.output_data_getter,\n 'yaml_file_test_duo_expected_2.out')\n\n # Set updated data\n print('Updating second file..')\n _setup_template_value('yaml_file_test_values_second.tmp.yml', 'yaml_file_test_values_1.yml')\n self.assertStringEqualToTemplateFileWithIterations(renderer.output_data_getter,\n 'yaml_file_test_duo_expected_3.out')",
"def test_files(self):\r\n\r\n for path in self.get_files():\r\n self.assertTrue(datetime.fromtimestamp(os.path.getmtime(path)) > self.start_time,\r\n msg='File not recently modified: %s' % os.path.basename(path))",
"def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True",
"def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True",
"def check_out_files_exist(self):\n for filetype in self.filetypes:\n filename = self.out_filename(filetype)\n if not filename.is_file():\n log.error('MISSING: {}'.format(filename))\n return False\n\n return True",
"def process_all_files():\n src_files = get_doc_files()\n\n for src_pathname in src_files:\n if src_pathname.suffix in MARKDOWN_EXTENSIONS:\n process_file_markdown(src_pathname)\n elif src_pathname.suffix in STATIC_ASSET_EXTENSIONS:\n process_file_copytodest(src_pathname)",
"def output_files_exist(self):\n return all([split.exists() for split in self.split_files])",
"def lint_every_rendered_component_has_a_fixture(files_to_lint):\n files_to_lint = lintutil.filter(files_to_lint, suffix='.html')\n\n for f in files_to_lint:\n contents_of_f = lintutil.file_contents(f)\n for m in RENDER_REACT_RE.finditer(contents_of_f):\n component_file = m.group(1)\n # To be server-side renderable, the fixture file has to be\n # a javascript file, not jsx or something else.\n fixture_file = component_file + '.fixture.js'\n if not os.path.exists(ka_root.join(fixture_file)):\n linenum = contents_of_f.count('\\n', 0, m.start()) + 1\n yield (f, linenum,\n '%s must have an associated fixture file %s'\n % (component_file, fixture_file))",
"def file_checker():\n\n PATH_RELEASE1_IDEN = os.getcwd()+'/archive_all_2014-10/'\n PATH_RELEASE1_UNIDE = None\n #PATH_RELEASE1_UNIDE = os.getcwd()+'/archive_all_2014-10/'\n\n PATH_RELEASE2_IDEN = os.getcwd()+'/archive_all_2016-10/archive_identified_2016-10/'\n PATH_RELEASE2_UNIDE = os.getcwd() + '/archive_all_2016-10/archive_unidentified_2016-10/'\n\n\n #From here don't change anything.\n #This global function finds the .mgf files in paths\n list_of_files_release1_ide = glob.glob(PATH_RELEASE1_IDEN+'*.mgf')\n list_of_files_release1_unide = None #REMOVE THIS PART AND UNCOMMENT NEXT LINE IN NEXT RELEASES.\n\n #list_of_files_release1_unid = glob.glob(PATH_RELEASE1_UNID'+*.mgf')\n\n list_of_files_release2_ide = glob.glob(PATH_RELEASE2_IDEN+'*.mgf')\n list_of_files_release2_unide = glob.glob(PATH_RELEASE2_UNIDE+'*.mgf')\n\n\n #Check if exist cache folder. If not will make it. \n #RELEASE 1 \n if not os.path.exists(PATH_RELEASE1_IDEN+'cache'):\n os.makedirs(PATH_RELEASE1_IDEN+'cache')\n\n # if not os.path.exists(PATH_RELEASE1_UNIDE'+cache'):\n # os.makedirs(PATH_RELEASE1_UNIDE'+cache')\n\n #RELEASE2\n if not os.path.exists(PATH_RELEASE2_IDEN+'cache'):\n os.makedirs(PATH_RELEASE2_IDEN+'cache')\n\n if not os.path.exists(PATH_RELEASE2_UNIDE+'cache'):\n os.makedirs(PATH_RELEASE2_UNIDE+'cache')\n \n\n return PATH_RELEASE1_IDEN, \\\n PATH_RELEASE2_IDEN, \\\n PATH_RELEASE2_UNIDE, \\\n list_of_files_release1_ide, \\\n list_of_files_release2_ide, \\\n list_of_files_release2_unide",
"def test_matched_pairs():\n template_filelist = listdir(RTEMPLATE_PATH)\n\n R_files = []\n json_files = []\n orphan_files = []\n for file in template_filelist:\n if '.r' in file:\n file = file.replace('.r', '')\n R_files.append(file)\n elif '.json' in file:\n file = file.replace('.json', '')\n json_files.append(file)\n else:\n orphan_files.append(file)\n\n ## make sure there are no non R/json files\n assert not bool(orphan_files) #file in path isn't .json or .R\n\n ## make sure every R file has a json pair\n assert not bool(\n set(R_files) - set(json_files)\n )",
"def _assert_correct_files_are_present(outputdir: Path) -> None:\n for plane in PLANES:\n assert (outputdir / f\"{AMP_BETA_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{BETA_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{PHASE_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{TOTAL_PHASE_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{ORBIT_NAME}{plane.lower()}.tfs\").is_file()\n assert (outputdir / f\"{DISPERSION_NAME}x.tfs\").is_file()\n assert (outputdir / f\"{NORM_DISP_NAME}x.tfs\").is_file() # no norm disp in Y plane\n\n for rdt in [\"1001\", \"1010\"]:\n assert (outputdir / f\"f{rdt}.tfs\").is_file()",
"def test_filecompare(self):\n cmp = filecmp.dircmp(self.root_gold, self.root_target, ignore=[])\n self.recursive_dircmp(cmp)",
"def CheckFilesMatch(config):\n\n diff_errors = []\n\n file_pairs = _GetFilePairs(config)\n missing_files, stale_files = _GetMissingAndStaleFiles(file_pairs)\n\n for pair in missing_files:\n diff_errors.append(\"File %s does not exist\" % pair.target)\n continue\n\n for pair in stale_files:\n diff_errors.append(\"File %s is out of date\" % pair.target)\n\n if diff_errors:\n error_msg = \"Files out of date!\\n\\n\"\n error_msg += \"To fix run THIS command:\\n\"\n error_msg += \" bazel-bin/%s/%s --fix\\n\\n\" % (config.package_name,\n config.target_name)\n error_msg += \"Errors:\\n\"\n error_msg += \" \" + \"\\n \".join(diff_errors)\n return error_msg\n else:\n return None",
"def __render_templates(files_to_render, dest_location, jinja_env):\n errors = []\n\n from jinja2.exceptions import TemplateNotFound\n\n for template_file in files_to_render:\n filename = os.path.abspath(os.path.join(dest_location, template_file))\n\n print(\"Pillar template_file: {} --> {}\".format(template_file, filename))\n\n if not os.path.isdir(os.path.dirname(filename)):\n os.makedirs(os.path.dirname(filename))\n\n try:\n print(\"Attempting to load template_file: {}\".format(template_file))\n template_rendered = jinja_env.get_template(template_file).render(env=env)\n print(green(\"Pillar template_file rendered: {} --> {}\".format(template_file, filename)))\n\n # Only write the template file if we can actually render it\n with open(os.path.join(dest_location, template_file), 'w') as f:\n f.write(template_rendered)\n\n except TemplateNotFound:\n errors.append(template_file)\n print(red(\"Pillar template_file not found: {} --> {}\".format(template_file, filename)))\n\n if not len(errors):\n print(green(\"Pillar was successfully rendered in: {}\".format(dest_location)))\n else:\n print(red(\"Pillar could not compile the following templates:\"))\n for error in errors:\n print(red(\" - {}\").format(error))\n\n return len(errors) == 0",
"def build(self) -> None:\n def do_process(fname) -> bool:\n for sfx in skip_suffixes:\n if fname.endswith(sfx):\n return False\n return True\n\n for dirpath, _, fnames in os.walk(self.template_dir):\n for fname in fnames:\n if do_process(fname):\n self.process(dirpath, fname)",
"def check_comps(root, comps):\n for key, comp in comps.items():\n\n filename = os.path.join(root, comp['filename'])\n if not os.path.isfile(filename):\n warnings.warn(\n 'The file {0} could not be found'.format(filename))",
"def _check_file_not_used(self):\n module_files = set(self._get_module_files())\n referenced_files = set(self._get_manifest_referenced_files()).union(\n set(self._get_xml_referenced_files())\n )\n excluded_dirs = ['static', 'test', 'tests', 'migrations']\n no_referenced_files = [\n f for f in (module_files - referenced_files)\n if f.split(os.path.sep)[0] not in excluded_dirs\n ]\n self.msg_args = no_referenced_files\n return not no_referenced_files",
"def comp_files(cfg, atom_id_dict, type_dicts):\n first_content, first_section_order = proc_data_file(cfg, cfg[DATA_FILE], atom_id_dict, type_dicts,)\n second_content, second_section_order = proc_data_file(cfg, cfg[DATA_COMP], atom_id_dict, type_dicts,)\n\n for section in second_section_order:\n if section not in first_section_order:\n warning(\"Skipping section '{}'; section found in the file: {}\\n\"\n \" but not in file: {}\".format(section, cfg[DATA_COMP], cfg[DATA_FILE]))\n\n diffs = [\"Differences in head section:\"]\n compare_heads(first_content[SEC_HEAD], second_content[SEC_HEAD], diffs)\n\n for section in first_section_order:\n if section not in second_section_order:\n warning(\"Skipping section '{}'; section found in the file: {}\\n\"\n \" but not in file: {}\".format(section, cfg[DATA_FILE], cfg[DATA_COMP]))\n elif section in [SEC_VELOS]:\n diffs.append(\"\\nSkipping section '{}'\".format(section))\n elif section in COMP_ORD_SEC_COL_DICT:\n diffs.append(\"\\nDifferences in section '{}':\".format(section))\n num_col_to_compare = COMP_ORD_SEC_COL_DICT[section]\n compare_lists(first_content[section], second_content[section], 0, num_col_to_compare, diffs,\n SEC_FORMAT_DICT[section][0], SEC_FORMAT_DICT[section][1])\n elif section in NUM_SEC_DICT:\n diffs.append(\"\\nDifferences in section '{}':\".format(section))\n num_col_to_compare = NUM_SEC_DICT[section][1]\n compare_lists(first_content[section], second_content[section], 1, num_col_to_compare, diffs,\n SEC_FORMAT_DICT[section][0], SEC_FORMAT_DICT[section][1])\n else:\n print(\"Encountered unexpected section '{}'\".format(section))\n\n f_name = create_out_fname(cfg[DATA_COMP], prefix='diffs_', ext='.txt')\n list_to_file(diffs, f_name)\n print('Completed writing {}'.format(f_name))",
"def test_input_files(self):\n files = list_files_folder(data_dir + \"build-custom/files/\", ext=\"fna.gz\")\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_files\"\n params[\"input\"] = files\n params[\"input_extension\"] = \"\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n self.assertTrue(res[\"target\"][\"file\"].isin(files).all(), \"Files missing from target\")\n self.assertEqual(len(files), res[\"target\"].shape[0], \"Wrong number of files on target\")\n self.assertTrue(res[\"info\"][\"file\"].isin(files).all(), \"Files missing from info\")\n self.assertEqual(len(files), res[\"info\"].shape[0], \"Wrong number of files on info\")\n\n # All files are invalid\n files = [f+\".xxx\" for f in files]\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_files_invalid\"\n params[\"input\"] = files\n params[\"input_extension\"] = \"\"\n cfg = Config(\"build-custom\", **params)\n self.assertFalse(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom ran but it should fail\")",
"def test_verify_corrupt_archive_compare_data(self):\n self.backup(u\"full\", u\"testfiles/various_file_types\", options=[])\n output_files = os.listdir(\"testfiles/output\")\n archives = [elem for elem in output_files if \"vol\" in elem]\n for archive in archives:\n # Edit source file\n with open(\"testfiles/output/\" + archive, 'r+') as f:\n f.write('This writes text into each archive file to corrupt it.')\n # Test verify for the file\n try:\n self.verify(u'testfiles/various_file_types/executable', file_to_verify=u'executable',\n options=[u\"--compare-data\"])\n except CmdError as e:\n # Should return a 21 error code for \"hash mismatch\"\n self.assertEqual(e.exit_status, 21, str(e))\n else:\n self.fail('Expected Hash Mismatch Error not thrown')",
"def precheck(self):\n if (not dfs.exists(self.outputpath)):\n logger.debug(\"precheck(%s): outputpath %s does not exist, ready to run.\" \n % (self, self.outputpath))\n return 'ready'\n inTSs = [dfs.modtime(file) for file in self.inputpaths]\n outTS = dfs.modtime(self.outputpath)\n newer = reduce(lambda x,y: x or y, [(inTS>outTS) for inTS in inTSs])\n logger.debug(\"Input timestamps: %s\" % inTSs)\n logger.debug(\"Output timestamp: %s\" % outTS)\n if newer:\n logger.debug(\"At least one input file is newer than outputfile, ready to run.\")\n dfs.delete(self.outputpath)\n return 'ready'\n else:\n logger.debug(\"All input files are newer than outputfile, skipping.\")\n return 'skip'",
"def check_all_files_and_dirs(self):\n err = 0\n err_m = ''\n warning = 0\n warning_m = ''\n # Check the pdb file for refinement\n if self.refine_pdb_in == None:\n err = 1\n err_m += '\\nPdb file should be supplied'\n else:\n if self.check_single_file(self.refine_pdb_in):\n self.refine_pdb_in = os.path.abspath(self.refine_pdb_in)\n else:\n err = 1\n err_m += '\\nFile not found: %s' %(self.refine_pdb_in)\n\n # Check the pdb file for distance analysis\n if self.check_single_file(self.X8_pdb_in):\n self.X8_pdb_in = os.path.abspath(self.X8_pdb_in)\n else:\n self.X8_pdb_in != None\n warning = 1\n warning_m += '\\nXtrapol8 pdb_in not found. No additional analysis will be applied'\n\n # Check additional files and append them to a string\n additional = \"\"\n for fle in self.additional:\n if len(fle)>0:\n if self.check_single_file(fle):\n new_add = os.path.abspath(fle)\n additional = additional + \"%s \" % (new_add)\n else:\n err = 1\n err_m += '\\nFile not found: %s' %(fle)\n self.additional = additional\n\n #Check the output directory\n if os.path.isdir(self.outdir):\n self.outdir = os.path.abspath(self.outdir)\n else:\n err = 1\n err_m += \"\\nXtrapol8 output directory cannot be found.\" \\\n \"Please run this from the same directory from which you ran Xtrapol8.\"\n\n #Check the phil file for reciprocal space refinement\n if self.check_single_file(self.reciprocal_space_phil):\n self.reciprocal_space_phil = os.path.abspath(self.reciprocal_space_phil)\n else:\n self.reciprocal_space_phil = ''\n warning = 1\n warning_m += '\\nPhil for reciprocal space refinement not found. Refinement will use default parameters.'\n\n\n #Check the phil file for real space refinement\n if self.check_single_file(self.real_space_phil):\n self.real_space_phil = os.path.abspath(self.real_space_phil)\n else:\n self.real_space_phil = ''\n warning = 1\n warning_m += '\\nPhil for real space refinement not found. Refinement will use default parameters.'\n\n #Check the residue list for distance analysis\n if self.check_single_file(self.residue_list):\n self.residue_list = os.path.abspath(self.residue_list)\n else:\n self.residue_list = None\n warning = 1\n warning_m += '\\nResidue list not found. Distance analysis (if required) will be performed without residue list.'\n\n return err, err_m, warning, warning_m",
"def test_filter_file_exceptions_early_dupes():\n exceptions = Exceptions(os.path.join(os.path.dirname(__file__),\n 'early_exceptions.yaml'))\n\n package = Package('test', os.path.dirname(__file__))\n files = [os.path.join(os.path.dirname(__file__),\n 'unlikelystring'),\n os.path.join(os.path.dirname(__file__),\n 'unlikelystring')]\n\n filtered_files = exceptions.filter_file_exceptions_early(package, files)\n\n assert not filtered_files",
"def should_run(self):\n # from IPython.html.tasks.py\n\n css_targets = [pjoin(static, 'css', 'style.min.css')]\n css_maps = [t + '.map' for t in css_targets]\n targets = css_targets + css_maps\n if not all(os.path.exists(t) for t in targets):\n # some generated files don't exist\n return True\n earliest_target = sorted(mtime(t) for t in targets)[0]\n\n # check if any .less files are newer than the generated targets\n for dirpath, dirnames, filenames in os.walk(static):\n for f in filenames:\n if f.endswith('.less'):\n path = pjoin(static, dirpath, f)\n timestamp = mtime(path)\n if timestamp > earliest_target:\n return True\n\n return False",
"def affected_testfiles(files_changed: Iterable[Text],\n skip_dirs: Optional[Set[Text]] = None,\n manifest_path: Optional[Text] = None,\n manifest_update: bool = True\n ) -> Tuple[Set[Text], Set[Text]]:\n if skip_dirs is None:\n skip_dirs = {\"conformance-checkers\", \"docs\", \"tools\"}\n affected_testfiles = set()\n # Exclude files that are in the repo root, because\n # they are not part of any test.\n files_changed = [f for f in files_changed if not _in_repo_root(f)]\n nontests_changed = set(files_changed)\n wpt_manifest = load_manifest(manifest_path, manifest_update)\n\n test_types = [\"crashtest\", \"print-reftest\", \"reftest\", \"testharness\", \"wdspec\"]\n support_files = {os.path.join(wpt_root, path)\n for _, path, _ in wpt_manifest.itertypes(\"support\")}\n wdspec_test_files = {os.path.join(wpt_root, path)\n for _, path, _ in wpt_manifest.itertypes(\"wdspec\")}\n test_files = {os.path.join(wpt_root, path)\n for _, path, _ in wpt_manifest.itertypes(*test_types)}\n\n interface_dir = os.path.join(wpt_root, 'interfaces')\n interfaces_files = {os.path.join(wpt_root, 'interfaces', filename)\n for filename in os.listdir(interface_dir)}\n\n interfaces_changed = interfaces_files.intersection(nontests_changed)\n nontests_changed = nontests_changed.intersection(support_files)\n\n tests_changed = {item for item in files_changed if item in test_files}\n\n nontest_changed_paths = set()\n rewrites: Dict[Text, Text] = {\"/resources/webidl2/lib/webidl2.js\": \"/resources/WebIDLParser.js\"}\n for full_path in nontests_changed:\n rel_path = os.path.relpath(full_path, wpt_root)\n path_components = rel_path.split(os.sep)\n top_level_subdir = path_components[0]\n if top_level_subdir in skip_dirs:\n continue\n repo_path = \"/\" + os.path.relpath(full_path, wpt_root).replace(os.path.sep, \"/\")\n if repo_path in rewrites:\n repo_path = rewrites[repo_path]\n full_path = os.path.join(wpt_root, repo_path[1:].replace(\"/\", os.path.sep))\n nontest_changed_paths.add((full_path, repo_path))\n\n interfaces_changed_names = [os.path.splitext(os.path.basename(interface))[0]\n for interface in interfaces_changed]\n\n def affected_by_wdspec(test: Text) -> bool:\n affected = False\n if test in wdspec_test_files:\n for support_full_path, _ in nontest_changed_paths:\n # parent of support file or of \"support\" directory\n parent = os.path.dirname(support_full_path)\n if os.path.basename(parent) == \"support\":\n parent = os.path.dirname(parent)\n relpath = os.path.relpath(test, parent)\n if not relpath.startswith(os.pardir):\n # testfile is in subtree of support file\n affected = True\n break\n return affected\n\n def affected_by_interfaces(file_contents: Text) -> bool:\n if len(interfaces_changed_names) > 0:\n if 'idlharness.js' in file_contents:\n for interface in interfaces_changed_names:\n regex = '[\\'\"]' + interface + '(\\\\.idl)?[\\'\"]'\n if re.search(regex, file_contents):\n return True\n return False\n\n for root, dirs, fnames in os.walk(wpt_root):\n # Walk top_level_subdir looking for test files containing either the\n # relative filepath or absolute filepath to the changed files.\n if root == wpt_root:\n for dir_name in skip_dirs:\n dirs.remove(dir_name)\n for fname in fnames:\n test_full_path = os.path.join(root, fname)\n # Skip any file that's not a test file.\n if test_full_path not in test_files:\n continue\n if affected_by_wdspec(test_full_path):\n affected_testfiles.add(test_full_path)\n continue\n\n with open(test_full_path, \"rb\") as fh:\n raw_file_contents: bytes = fh.read()\n if raw_file_contents.startswith(b\"\\xfe\\xff\"):\n file_contents: Text = raw_file_contents.decode(\"utf-16be\", \"replace\")\n elif raw_file_contents.startswith(b\"\\xff\\xfe\"):\n file_contents = raw_file_contents.decode(\"utf-16le\", \"replace\")\n else:\n file_contents = raw_file_contents.decode(\"utf8\", \"replace\")\n for full_path, repo_path in nontest_changed_paths:\n rel_path = os.path.relpath(full_path, root).replace(os.path.sep, \"/\")\n if rel_path in file_contents or repo_path in file_contents or affected_by_interfaces(file_contents):\n affected_testfiles.add(test_full_path)\n continue\n\n return tests_changed, affected_testfiles"
] | [
"0.6298096",
"0.5958212",
"0.5917722",
"0.5899919",
"0.5821627",
"0.58071446",
"0.57569844",
"0.5753009",
"0.5753009",
"0.5753009",
"0.57331693",
"0.5726181",
"0.56889504",
"0.5665329",
"0.5652075",
"0.56505686",
"0.5650157",
"0.561327",
"0.56093055",
"0.5601862",
"0.5555765",
"0.55554396",
"0.5544284",
"0.55367583",
"0.55271",
"0.5514008",
"0.55031884",
"0.5490609",
"0.5489855",
"0.5482246"
] | 0.68884873 | 0 |
generates initial hidden states for each agent | def generate_initial_hidden_states(self, batch_size, test_mode=False, caller=None):
# Set up hidden states for all levels - and propagate through the runner!
hidden_dict = {}
hidden_dict["level1"] = th.stack([Variable(th.zeros(batch_size, 1, self.args.agents_hidden_state_size)) for _
in range(self.n_agents if self.is_obs_noise(test_mode) and caller != "learner" else 1)])
hidden_dict["level2"] = th.stack([Variable(th.zeros(batch_size, 1, self.args.agents_hidden_state_size)) for _
in range(len(sorted(combinations(list(range(self.n_agents)), 2)))*2
if self.is_obs_noise(test_mode) and caller != "learner" else
len(sorted(combinations(list(range(self.n_agents)), 2))))])
hidden_dict["level3"] = th.stack([Variable(th.zeros(batch_size, 1, self.args.agents_hidden_state_size)) for _
in range(self.n_agents)])
if self.args.use_cuda:
hidden_dict = {_k:_v.cuda() for _k, _v in hidden_dict.items()}
return hidden_dict, "?*bs*v*t" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initial_state(self):\n # Network details elided.\n return self.agent.initial_state()",
"def initial_state(self):\n # Network details elided.\n return self.agent.initial_state()",
"def initial_state(self):\n # Network details elided.\n return self.agent.initial_state()",
"def registerInitialState(self, gameState):\n\n ''' \n Make sure you do not delete the following line. If you would like to\n use Manhattan distances instead of maze distances in order to save\n on initialization time, please take a look at\n CaptureAgent.registerInitialState in captureAgents.py. \n '''\n CaptureAgent.registerInitialState(self, gameState)\n self.opponents = self.getOpponents(gameState)\n self.distributions = []\n self.legalPositions = [p for p in gameState.getWalls().asList(False) if p[1] > 1]\n print self.legalPositions\n\n #initializing beleif distribution of opponents\n for i in range(0, gameState.getNumAgents()):\n if i in self.opponents:\n beliefs = util.Counter()\n for p in self.legalPositions: beliefs[p] = 1.0\n beliefs.normalize()\n self.distributions.append(beliefs)\n else:\n self.distributions.append(None)\n\n\n ''' \n Your initialization code goes here, if you need any.\n '''",
"def init_hidden(self):\n # TODO ========================\n # initialize the hidden states to zero\n\n initial_hidden = torch.zeros(self.num_layers, self.batch_size, self.hidden_size)\n return initial_hidden # a parameter tensor of shape (self.num_layers, self.batch_size, self.hidden_size)",
"def _init_episode(self):\n # get states - one-hots\n self._states = np.zeros((self._size_state, self._size_state))\n\n # to_ones = np.random.permutation(self._size_state)[0:3]\n for x in xrange(self._size_state):\n # self._states[x][to_ones[x]] = 1\n self._states[x][x] = 1\n\n self._prob_transition = np.array([[.8,.2]])\n self._randomize()\n self._current_state = 0\n self._last_state = 0\n self._stage = 0\n self._since_flipped = 0",
"def initial_state(self):\n # Network details elided.\n return self._agent.initial_state()",
"def initialize_hidden_state(self):\n initializer = tf.keras.initializers.Zeros()\n rnnten = initializer(shape=(self.batch, self.units))\n return rnnten",
"def fetch_initial_states(self):\n for agent_id, agent_obj in self.__registered_agents.items():\n # given the agent's capabilities, get everything the agent can perceive\n state = self.__get_agent_state(agent_obj)\n\n # filter other things from the agent state\n filtered_agent_state = agent_obj.filter_observations(state)\n\n # save the current agent's state for the API\n api.add_state(agent_id=agent_id, state=filtered_agent_state,\n agent_inheritence_chain=agent_obj.class_inheritance,\n world_settings=api.MATRX_info)\n\n # add god state\n api.add_state(agent_id=\"god\", state=self.__get_complete_state(), agent_inheritence_chain=\"god\",\n world_settings=api.MATRX_info)\n\n # initialize the message manager\n self.message_manager.agents = self.__registered_agents.keys()\n self.message_manager.teams = self.__teams\n\n # make the information of this tick available via the API, after all\n # agents have been updated\n api.next_tick()",
"def reset(self):\r\n \r\n self.done = False\r\n self.t = 0\r\n self.episode = random.choice(episodes)\r\n\r\n # initiate agent\r\n self.agent = self.create_agent(Agent)\r\n \r\n # initiate state at time zero\r\n self.state = (self.episode[self.t]['ST Relative Indicator'], \r\n self.episode[self.t]['ST Relative Indicator'], \r\n self.agent.stock,\r\n self.t)\r\n \r\n return self.state",
"def setup_initial_state(self):\n # collect the ids of vehicles in the network\n self.ids = self.vehicles.get_ids()\n self.controlled_ids = self.vehicles.get_controlled_ids()\n self.sumo_ids = self.vehicles.get_sumo_ids()\n self.rl_ids = self.vehicles.get_rl_ids()\n\n # dictionary of initial observations used while resetting vehicles after\n # each rollout\n self.initial_observations = dict.fromkeys(self.ids)\n\n # create the list of colors used to different between different types of\n # vehicles visually on sumo's gui\n #TODO: Get these colors working!\n # self.colors = {(255,0,0), (0,255,0),(0,0,255),(255,255,255)}\n self.colors = {}\n key_index = 1\n color_choice = np.random.choice(len(COLORS))\n for i in range(self.vehicles.num_types):\n self.colors[self.vehicles.types[i]] = \\\n COLORS[(color_choice + key_index) % len(COLORS)]\n key_index += 1\n\n for veh_id in self.ids:\n # set the colors of the vehicles based on their unique types\n veh_type = self.vehicles.get_state(veh_id, \"type\")\n self.traci_connection.vehicle.setColor(veh_id,\n self.colors[veh_type])\n\n # add the initial states to the vehicles class\n self.vehicles.set_edge(\n veh_id, self.traci_connection.vehicle.getRoadID(veh_id))\n self.vehicles.set_position(\n veh_id, self.traci_connection.vehicle.getLanePosition(veh_id))\n self.vehicles.set_lane(\n veh_id, self.traci_connection.vehicle.getLaneIndex(veh_id))\n self.vehicles.set_speed(\n veh_id, self.traci_connection.vehicle.getSpeed(veh_id))\n self.vehicles.set_route(\n veh_id, self.available_routes[self.vehicles.get_edge(veh_id)])\n self.vehicles.set_absolute_position(\n veh_id, self.get_x_by_id(veh_id))\n # the time step of the last lane change is always present in\n # the environment,but only used by sub-classes that apply lane\n # changing\n self.vehicles.set_state(veh_id, \"last_lc\",\n -1 * self.lane_change_duration)\n # some constant vehicle parameters\n self.vehicles.set_state(\n veh_id, \"length\",\n self.traci_connection.vehicle.getLength(veh_id))\n self.vehicles.set_state(veh_id, \"max_speed\", self.max_speed)\n\n # import initial state data to initial_observations dict\n self.initial_observations[veh_id] = dict()\n self.initial_observations[veh_id][\"type\"] = veh_type\n self.initial_observations[veh_id][\"edge\"] = \\\n self.traci_connection.vehicle.getRoadID(veh_id)\n self.initial_observations[veh_id][\"position\"] = \\\n self.traci_connection.vehicle.getLanePosition(veh_id)\n self.initial_observations[veh_id][\"lane\"] = \\\n self.traci_connection.vehicle.getLaneIndex(veh_id)\n self.initial_observations[veh_id][\"speed\"] = \\\n self.traci_connection.vehicle.getSpeed(veh_id)\n self.initial_observations[veh_id][\"route\"] = \\\n self.available_routes[self.initial_observations[veh_id][\"edge\"]]\n self.initial_observations[veh_id][\"absolute_position\"] = \\\n self.get_x_by_id(veh_id)\n\n # set speed mode\n self.set_speed_mode(veh_id)\n\n # set lane change mode\n self.set_lane_change_mode(veh_id)\n\n # save the initial state. This is used in the _reset function\n #\n route_id = \"route\" + self.initial_observations[veh_id][\"edge\"]\n pos = self.traci_connection.vehicle.getPosition(veh_id)\n\n self.initial_state[veh_id] = \\\n (self.initial_observations[veh_id][\"type\"], route_id,\n self.initial_observations[veh_id][\"lane\"],\n self.initial_observations[veh_id][\"position\"],\n self.initial_observations[veh_id][\"speed\"], pos)\n\n # collect list of sorted vehicle ids\n self.sorted_ids, self.sorted_extra_data = self.sort_by_position()\n\n # collect headway, leader id, and follower id data\n for veh_id in self.ids:\n headway = self.traci_connection.vehicle.getLeader(veh_id, 2000)\n if headway is None:\n self.vehicles.set_leader(veh_id, None)\n self.vehicles.set_headway(veh_id, 9e9)\n else:\n self.vehicles.set_leader(veh_id, headway[0])\n self.vehicles.set_headway(veh_id, headway[1])\n self.vehicles.set_follower(headway[0], veh_id)\n\n # contains the last lc before the current step\n self.prev_last_lc = dict()\n for veh_id in self.ids:\n self.prev_last_lc[veh_id] = self.vehicles.get_state(veh_id,\n \"last_lc\")\n\n # subscribe the requested states for traci-related speedups\n for veh_id in self.ids:\n self.traci_connection.vehicle.subscribe(\n veh_id, [tc.VAR_LANE_INDEX, tc.VAR_LANEPOSITION,\n tc.VAR_ROAD_ID, tc.VAR_SPEED])\n self.traci_connection.vehicle.subscribeLeader(veh_id, 2000)",
"def init_hidden(self, batch_size):\r\n \r\n self.hidden_state = (\r\n torch.zeros(((1+self.bidirectional)*self.num_layers,\r\n batch_size,\r\n self.hidden_size)).to(self.device),\r\n torch.zeros(((1+self.bidirectional)*self.num_layers, \r\n batch_size, \r\n self.hidden_size)).to(self.device))",
"def init_states(self):\n self.filtered_state_means = None\n self.filtered_state_covariances = None\n self.predicted_state_means = None\n self.predicted_state_covariances = None\n self.smoothed_state_means = None\n self.smoothed_state_covariances = None",
"def init_hidden_state(self, batch_size):\n hidden_state = tf.tile(self.initial_hidden_state[None, ...], [batch_size, 1])\n cell_state = tf.tile(self.initial_cell_state[None, ...], [batch_size, 1])\n return hidden_state, cell_state",
"def generate_initial_states(env, max_steps=10000):\n\n initial_state, _ = env.reset()\n\n n_steps = 0\n seen_states = set([initial_state])\n frontier = [initial_state]\n while frontier and n_steps < max_steps:\n state = frontier.pop()\n valid_actions = sorted(list(env.action_space.all_ground_literals(state)))\n for action in valid_actions:\n env.set_state(state)\n next_state = env.step(action)[0]\n n_steps += 1\n if next_state not in seen_states:\n seen_states.add(next_state)\n frontier.append(next_state)\n if n_steps >= max_steps:\n break\n\n seen_states.remove(initial_state)\n # Sort states using the One True Ordering\n states = sorted(list(seen_states), key=lambda x: sorted(list(x.literals)))\n old_rng_st = random.getstate()\n random.seed(0)\n random.shuffle(states)\n random.setstate(old_rng_st)\n\n return states",
"def __init__(self):\n \"\"\" action_ space : pick up location , Drop location\n state_space : location , time (hours) , day\n state_init : random pick from the state_space \"\"\"\n self.action_space = [(i,j) for i in range(m) for j in range(m) if i!=j or i==0]\n # Total states (Xi Tj Dk)\n self.state_space = [[x, y, z] for x in range(m) for y in range(t) for z in range(d)]\n # random Initialize of state (location, hours, day)\n self.state_init = random.choice(self.state_space)\n # Start the first round\n self.reset()",
"def init_hidden_state(self, encoder_out: torch.Tensor):\n pass",
"def make_alternative_states(self) -> np.ndarray:\n states = []\n for agent in range(self.agents):\n agent_state = []\n\n # Own distance\n r, c = self.game.get_agent_pos(agent)\n agent_state.append(r / 6)\n agent_state.append(c / 6)\n\n # Distances to others\n distances_r = [\n (r - pos[0]) / 12\n for key, pos in self.game.agent_positions.items()\n if key != agent\n ]\n distances_c = [\n (c - pos[1]) / 12\n for key, pos in self.game.agent_positions.items()\n if key != agent\n ]\n agent_state += distances_r\n agent_state += distances_c\n\n # Goal distances\n distances_goal_r = [(r - pos[0]) / 12 for pos in self.payoff_fields]\n distances_goal_c = [(c - pos[1]) / 12 for pos in self.payoff_fields]\n agent_state += distances_goal_r\n agent_state += distances_goal_c\n\n if agent < self.num_informed:\n agent_state.append((r - self.special_payoff_fields[0][0]) / 12)\n agent_state.append((c - self.special_payoff_fields[0][1]) / 12)\n else:\n agent_state += [0, 0]\n agent_state.append(self.max_turns - self.turns_count)\n states.append(np.array(agent_state))\n\n states = np.stack(states, axis=0)\n return states",
"def init_states(batch_size, num_lstm_layer, num_hidden):\n init_c = [('l%d_init_c' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]\n init_h = [('l%d_init_h' % l, (batch_size, num_hidden)) for l in range(num_lstm_layer)]\n return init_c + init_h",
"def initialize_state(self):\n super(InverseChain, self).initialize_state()",
"def initial_states(self):\n return list(self.iter_initial_states())",
"def agent_init(self, agent_info):\n\n # First, we get the relevant information from agent_info \n # NOTE: we use np.random.RandomState(seed) to set the two different RNGs\n # for the planner and the rest of the code\n try:\n self.num_states = agent_info[\"num_states\"]\n self.num_actions = agent_info[\"num_actions\"]\n except:\n print(\"You need to pass both 'num_states' and 'num_actions' in agent_info to initialize the action-value table\")\n self.gamma = agent_info.get(\"discount\", 0.95)\n self.step_size = agent_info.get(\"step_size\", 0.1)\n self.epsilon = agent_info.get(\"epsilon\", 0.1)\n self.planning_steps = agent_info.get(\"planning_steps\", 10)\n\n self.rand_generator = np.random.RandomState(agent_info.get('random_seed', 42))\n self.planning_rand_generator = np.random.RandomState(agent_info.get('planning_random_seed', 42))\n\n # Next, we initialize the attributes required by the agent, e.g., q_values, model, etc.\n # A simple way to implement the model is to have a dictionary of dictionaries, \n # mapping each state to a dictionary which maps actions to (reward, next state) tuples.\n self.q_values = np.zeros((self.num_states, self.num_actions))\n self.actions = list(range(self.num_actions))\n self.past_action = -1\n self.past_state = -1\n self.model = {} # model is a dictionary of dictionaries, which maps states to actions to \n # (reward, next_state) tuples",
"def states_initial(self):\n return self.states(\"Initial = YES\")",
"def make_initial_state(self):\n return {\n 'h_rec':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),\n 'c_rec':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),\n 'h_gen':Variable(np.zeros((1, self.n_hidden), dtype=np.float32)),\n 'c_gen':Variable(np.zeros((1, self.n_hidden), dtype=np.float32))\n }",
"def initial_state():\n\treturn [[EMPTY, EMPTY, EMPTY],\n\t\t\t[EMPTY, EMPTY, EMPTY],\n\t\t\t[EMPTY, EMPTY, EMPTY]]",
"def _reset(self):\r\n \r\n airgym.reset()\r\n self.stepN = 0\r\n self.episodeN += 1\r\n \r\n self.allLogs = { 'reward': [0] }\r\n self.allLogs['distance'] = [221]\r\n self.allLogs['action'] = [1]\r\n \r\n print(\"\")\r\n \r\n #self.sensors = airgym.getSensorStates()\r\n \r\n # Initial state\r\n self.state = airgym.getScreenDepthVis()\r\n \r\n \r\n return self.state",
"def init_hidden_state(self,batch_size):\n h = torch.zeros(batch_size,self.decoder_dim).to(device) # (batch_size, decoder_dim)\n c = torch.zeros(batch_size,self.decoder_dim).to(device)\n return h, c",
"def initialize_hidden_state(self):\n return tf.zeros(shape=(self.batch_size, self.enc_units))",
"def reset(self, *agents):\n # initialize the state to [0, 0, ..., 0] (length D+1) + [1, 1]\n for i in range(len(agents)):\n D_state = np.hstack((np.zeros(shape=(agents[i].D + 1)), [1, 1]))\n if i == 0:\n self.state = D_state\n else:\n self.state = np.hstack((self.state, D_state))\n\n self.k = 1\n\n # price\n self.S = np.zeros(shape=(self.N,))\n self.S[ind(self.k)] = self.initial_market_price\n self.S_tilde = np.zeros(shape=(self.N,))\n self.S_tilde[ind(self.k)] = self.initial_market_price\n\n for agent in agents:\n agent.reset()\n\n return self.state",
"def initial_state():\n return [[EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY],\n [EMPTY, EMPTY, EMPTY]]"
] | [
"0.6404829",
"0.6404829",
"0.6404829",
"0.6275053",
"0.62659883",
"0.62305397",
"0.6203651",
"0.6194984",
"0.61897767",
"0.6183285",
"0.60839427",
"0.6075158",
"0.6037105",
"0.6035622",
"0.60091317",
"0.59804654",
"0.59778255",
"0.59626013",
"0.5937987",
"0.5926602",
"0.59236103",
"0.5922435",
"0.5919982",
"0.59192294",
"0.5919091",
"0.5906496",
"0.5899491",
"0.5886812",
"0.58807594",
"0.58801347"
] | 0.73379165 | 0 |
Initializes and returns an LSL outlet | def initializeOutlet(interface):
info = StreamInfo('OpenBCI_EEG', 'EEG', 4, 256, 'float32', 'openbci12345')
outlet = StreamOutlet(info)
return outlet | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _add_lamp_outlet(self, model):\r\n\r\n # Create a new CameraItem and set the model\r\n item = LampOutletItem()\r\n item.setModel(model)\r\n\r\n # Create a new CameraInfoWidget and set the model\r\n widget = LampOutletInfoWidget()\r\n widget.setModel(model)\r\n\r\n item.double_clicked.connect(widget.show)\r\n item.deleteSocketAction.connect(model.prepare_for_deletion)\r\n\r\n self.scene().addItem(item)\r\n proxy = self.scene().addWidget(widget)\r\n widget.setProxy(proxy)",
"def _add_lamp_outlets(self):\r\n lst = self.model.get_all_lamp_outlets()\r\n\r\n for itm in lst:\r\n self._add_lamp_outlet(itm)",
"def __init__(self) -> None:\n ptr = lib.wlr_output_layout_create()\n self._ptr = ffi.gc(ptr, lib.wlr_output_layout_destroy)\n\n self.add_event = Signal(ptr=ffi.addressof(ptr.events.add))\n self.change_event = Signal(ptr=ffi.addressof(ptr.events.change))\n self.destroy_event = Signal(ptr=ffi.addressof(ptr.events.destroy))",
"def connect_ls_to_lr(ls, lr, rp, rp_ip, rp_mac, db):\n ovn_nbctl(\"-- --id=@lrp create Logical_Router_port name=%s network=%s \"\n \"mac=%s -- add Logical_Router %s ports @lrp -- lsp-add %s \"\n \"rp-%s\" % (rp, rp_ip, rp_mac, lr, ls, rp), db)\n ovn_nbctl(\"set Logical-Switch-Port rp-%s type=router \"\n \"options:router-port=%s addresses=%s\" % (rp, rp, rp_mac), db)",
"def __init__(self):\n self.server_name = 'Binary Light Device'\n self.device = None",
"def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.foundation",
"def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.pythia",
"def __init__ (self, scHandle):\n Greenlet.__init__(self)\n\n self.scHandle = scHandle",
"def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.ratchet",
"def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.ratchet",
"def __init__(self, *args):\n this = _ida_hexrays.new_lvar_locator_t(*args)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, mpls_ttl=None):\n super().__init__()\n self.mpls_ttl = mpls_ttl",
"def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.phe",
"def __init__(self, als, cfg=None):\n\n self.als = als\n if cfg is None:\n cfg = AlsDEMCfg()\n self.cfg = cfg",
"def __init__(self, name: str, hw_device: KnauerDAD):\n super().__init__(name, hw_device)\n self.lamp = name\n self.add_api_route(\"/lamp_status\", self.get_lamp, methods=[\"GET\"])\n self.add_api_route(\"/status\", self.get_status, methods=[\"GET\"])",
"def __init__(self, window):\n self._ptr = lib.SDL_GL_CreateContext(window._ptr)",
"def __init__(self, ns=None):\n this = _libSALOME_LifeCycleCORBA.new_SALOME_LifeCycleCORBA(ns)\n try: self.this.append(this)\n except: self.this = this",
"def __init__(self, *args, **kwargs):\n super(LinlLis, self).__init__(\n ('linl', Bits(maxlen=4)),\n ('lis', Bits(maxlen=4)),\n *args, **kwargs\n )",
"def __init__(self, label, LEDStrips, colors):\n\n self._label = label\n self._LEDStrips = LEDStrips\n self._colors = colors",
"def setupLL_Native(self):\n self.LLN_Selector = slicer.qMRMLNodeComboBox()\n self.LLN_Selector.nodeTypes = ['vtkMRMLMultiVolumeNode']\n self.LLN_Selector.noneEnabled = True\n self.LLN_Selector.setMRMLScene(slicer.mrmlScene)\n self.LLN_Selector.addEnabled = 0\n self.LLN_SelectorLabel = qt.QLabel('Native Look Locker')\n self.LLN_Selector.setToolTip(\"Select the pre contrast Look Locker to create the T1 Mapping\")\n self.InputOutput_Layout.addRow(self.LLN_SelectorLabel, self.LLN_Selector)",
"def __init__(self):\n self.raw_wires = PyWires.WireNetwork();\n self.__initialize_wires();",
"def setupLL_Enhanced(self):\n self.LLE_Selector = slicer.qMRMLNodeComboBox()\n self.LLE_Selector.nodeTypes = ['vtkMRMLMultiVolumeNode']\n self.LLE_Selector.noneEnabled = True\n self.LLE_Selector.setMRMLScene(slicer.mrmlScene)\n self.LLE_Selector.addEnabled = 0\n self.LLE_SelectorLabel = qt.QLabel('Enhanced Look Locker')\n self.LLE_Selector.setToolTip(\"Select the post contrast Look Locker to create the T1 Mapping\")\n self.InputOutput_Layout.addRow(self.LLE_SelectorLabel, self.LLE_Selector)",
"def IBOutlet(name=None):\n if name is None:\n return ivar(isOutlet=1)\n else:\n return ivar(name, isOutlet=1)",
"def __init__(self):\n _hypre.HypreILU_swiginit(self, _hypre.new_HypreILU())",
"def __init__(self):\n import visa\n\n rm = visa.ResourceManager()\n target = 'Agilent Technologies,8163B,MY48208514,V5.25(72637)'\n\n for dev in rm.list_resources():\n try:\n inst = rm.open_resource(dev)\n name = inst.query('*IDN?') # Agilent Technologies,8163B,MY48208514,V5.25(72637)\n if target in name:\n # TODO: check that the slot contains the correct module\n self._inst = inst\n except:\n continue\n\n if self._inst is None:\n raise RuntimeError(\"Target resource {} cannot be found in the VISA resource manager\".format(target))\n print(\"Connected to \" + self.id())",
"def __init__(self, config, ll=None, osimmodel=None, landmarks=None):\n self.config = config\n self.ll = ll\n self.trcdata = landmarks\n self.gias_osimmodel = None\n if osimmodel is not None:\n self.set_osim_model(osimmodel)\n self._unit_scaling = dim_unit_scaling(\n self.config['in_unit'], self.config['out_unit']\n )",
"def __init__(self, *args, **kwargs):\n self.args = args\n self.kwargs = kwargs\n\n config = kwargs.get(\"config\", kwargs)\n self.connection_type = config.get(\"connection_type\", None)\n self.connection = connection_decider.connection(device=self,\n conn_type=self.connection_type,\n **kwargs)\n self.connection.connect()\n self.consoles = [self]\n super(PrplMeshStation, self).__init__(*args, **kwargs)\n self.iface_dut = self.iface_wifi = self.kwargs.get(\n 'iface', 'wlan0')\n self.driver_name = config.get(\"driver\", \"nl80211,wext\")\n self.mac = self.get_mac()\n\n # kill all wpa_supplicant relevant to active interface\n self.wifi_disconnect()\n # Turn on and off wlan iface just in case\n self.disable_and_enable_wifi()",
"def __init__( self, owner, shoulderindex, wristindex, ctrlindex=0 ):\n\t\tself.shoulder = ServoJoint( owner, shoulderindex, ctrlindex ) \n\t\tself.wrist = ServoJoint( owner, wristindex, ctrlindex )",
"def __init__(self):\n self.new_dll = DLinkedList()",
"def __init__(self, layout=None):\n self.presentation_ended = False\n self.presentation = Presentation()\n self.layout = layout\n self.master_connection = None\n self.source = ''\n self.beacon = Beacon()\n self.beacon.start_beaconing()"
] | [
"0.5946556",
"0.5688111",
"0.55620337",
"0.55175424",
"0.53838944",
"0.5277127",
"0.52567214",
"0.52559364",
"0.5208461",
"0.5208461",
"0.51927763",
"0.51246226",
"0.50718874",
"0.5064975",
"0.5049424",
"0.5043597",
"0.50424546",
"0.50129515",
"0.5001687",
"0.49760997",
"0.49708754",
"0.49548873",
"0.49347582",
"0.4921544",
"0.4921092",
"0.49127913",
"0.48999384",
"0.4887002",
"0.48794746",
"0.48781332"
] | 0.58739966 | 1 |
This function recursively builds a string of manager to employee relationships starting from the managers that do not have managers. | def findHierarchy(self):
def __recursiveHelper(key_name, output, indent):
if key_name in self.relations:
for employee in self.relations[key_name].employees:
output += " " * indent + str(employee) +"\n"
# return __recursiveHelper(employee, output, indent+1)
__recursiveHelper(employee, output, indent+1)
else:
print(output)
return output
#experimenting with Iter() and next() iterators/generators
#and a while loop in the recursive function:
# def __recursiveHelper(key_name, output, indent):
# if key_name in self.relations:
# employees = iter(self.relations[key_name].employees)
# employee = next(employees, "stop")
# while employees and employee != 'stop':
# output += " " * indent + str(employee) +"\n"
# __recursiveHelper(next(employees, "stop"), output, indent+1)
# else:
# employee = next(employees, "stop")
#
# else:
# return output
output = ""
indent = -1
# self.relations is a dictionary of manager-name string keys.
# The employees of None are the top-ranking managers.
# only issue:
# having trouble returning the concatenated output
# from the recursive function:
return __recursiveHelper(None, output, indent+1) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def buildHierarchy(self, test_input):\n for entry in test_input:\n if entry['manager']not in self.relations:\n self.relations[entry['manager']] = Node(entry['manager'], entry['name'])\n else:\n self.relations[entry['manager']].employees.append(entry['name'])",
"def generate_full_chain(chain):\n list_of_subchains = [extract_amino_acids(subchain) for subchain in chain]\n # Join list into single string separated by spaces\n return ' '.join(list_of_subchains)",
"def str_recursive(node):\n\n if node == None:\n return \"\"\n else:\n return str(node.item) + \" \" + LinkedList.str_recursive(node.next)",
"def phone_dir_nav_eager():\n\n emps = Employee.query.options(db.joinedload('dept')).all()\n\n for emp in emps: # [<Emp>, <Emp>]\n if emp.dept is not None:\n print(emp.name, emp.dept.dept_code, emp.dept.phone)\n else:\n print(emp.name, \"-\", \"-\")",
"def phone_dir_nav():\n\n emps = Employee.query.all()\n\n for emp in emps: # [<Emp>, <Emp>]\n if emp.dept is not None:\n print(emp.name, emp.dept.dept_code, emp.dept.phone)\n else:\n print(emp.name, \"-\", \"-\")",
"def get_movie_people_relation(title, people_dict, movie_people_dict):\n for item in title:\n for key in people_dict.keys():\n for movie_title in people_dict[key]:\n if item == movie_title:\n if item in movie_people_dict.keys():\n if key not in movie_people_dict[item]:\n movie_people_dict[item] += ',' + key\n else:\n movie_people_dict[item] = key\n else:\n if item not in movie_people_dict.keys():\n movie_people_dict[item] = ''\n return movie_people_dict",
"def expand_paths_by_nodes(self, paths):\n paths_formatted = set()\n # Expand each path\n for path in paths:\n if len(path) < 2:\n continue\n expanded_paths = set()\n if self.include_entity:\n relations_for_each_step = [[path[0]]]\n else:\n relations_for_each_step = []\n for index in range(1, len(path)):\n node1 = path[index-1]\n node2 = path[index]\n if (node1, node2) in self.pair_to_relations:\n relations = self.pair_to_relations[(node1, node2)]\n else:\n print(node1, node2)\n relations_for_each_step.append(relations)\n if self.include_entity:\n relations_for_each_step.append([node2])\n expanded_paths.update(list(itertools.product(*relations_for_each_step)))\n paths_formatted.update(expanded_paths)\n return paths_formatted",
"def _intermediary_to_dot(tables, relationships):\n t = '\\n'.join(t.to_dot() for t in tables)\n r = '\\n'.join(r.to_dot() for r in relationships)\n return '{}\\n{}\\n{}\\n}}'.format(GRAPH_BEGINNING, t, r)",
"def _generate_hierarchy_string(self, skeleton):\n hierarchy_string = \"HIERARCHY\\n\"\n hierarchy_string += self._generate_joint_string(skeleton.root, skeleton, 0)\n return hierarchy_string",
"def asngen(pool):\n pool = AssociationPool.read(pool)\n rules = AssociationRegistry()\n (asns, orphaned) = generate(pool, rules)\n result = []\n result.append('There where {:d} associations found.'.format(len(asns)))\n result.append('There where {:d} orphaned exposures.'.format(len(orphaned)))\n for assocs in asns:\n result.append(assocs.__str__())\n\n return '\\n'.join(result)",
"def _return_string_all_descendants_rec(self, node, string, level):\n if len(node.get_children()) == 0:\n return string\n else:\n level += 1\n for child in node.get_children():\n string += \"| \"*level\n string += \"|---\" + str(child) + \"\\n\"\n string = self._return_string_all_descendants_rec(child, string, level)\n return string",
"def get_tree_str(self, depth: int = 0) -> str:\n temp = \" \" * depth + str(self.head) + \"\\n\"\n for son in self.sons:\n temp += son.get_tree_str(depth + 1)\n return temp",
"def __str__(self):\n stringRepresentation = []\n for node in self.getNodes():\n stringRepresentation.append(\"->\".join(\n (str(node), str(self.graph[node]))))\n\n return str(stringRepresentation)",
"def phone_dir_join_outerjoin():\n\n emps = (db.session.query(Employee, Department)\n .outerjoin(Department).all())\n\n for emp, dept in emps: # [(<E>, <D>), (<E>, <D>)]\n if dept:\n print(emp.name, dept.dept_name, dept.phone)\n else:\n print(emp.name, \"-\", \"-\")",
"def __str__(self):\n s = ''\n for node in self.nodes:\n s += '\\n\\n'+str(node)+'\\n\\t'\n edges = node.getChildren()\n keys = edges.keys()\n keys.sort()\n for key in keys:\n bounds = edges[key].getSuffix()\n s += str(edges[key])+' '\n for i in xrange(bounds[0], bounds[1]):\n s += self.target[i]\n s += '\\n\\t'\n return s",
"def make_to_string(front, mid, back, empty_repr):\n \"*** YOUR CODE HERE ***\"\n def printer(lnk):\n if lnk == Link.empty:\n return empty_repr\n else:\n return front + str(lnk.first) + mid + printer(lnk.rest) + back\n return printer",
"def str_reverse_recur(node):\n\n if node == None:\n return \"\"\n else:\n return LinkedList.str_reverse_recur(node.next) + \" \" + str(node.item)",
"def _generate_sql_parts(self, node,i=0,colNames=None,sql=None):\n\t\treferencesPersonFact = False\n\t\tif i == 0:\n\t\t\tsql=[]\n\t\t\tcolNames=[]\n\t\t\t# print('\\nSELECT *\\nFROM {}'.format(node))\n\t\tfor edge in self.DiG.out_edges(node):\n\t\t\t# print('\\tedge: {}->{} {}'.format(*edge,self.DiG.get_edge_data(*edge)))\n\t\t\tcolNames.append('{}.{}'.format(edge[1],self.DiG.get_edge_data(*edge)['Column']))\n\t\t\t# print('{}LEFT JOIN {}\\n{}ON {}.{}={}.{}'.format('\\t'*i,edge[1],'\\t'*i,edge[1],self.DiG.get_edge_data(*edge)['Column'],edge[0],self.DiG.get_edge_data(*edge)['Column']))\n\t\t\tsql.append('{}LEFT JOIN {}\\n{}ON {}.{}={}.{}'.format('\\t'*i,edge[1],'\\t'*i,edge[1],self.DiG.get_edge_data(*edge)['Column'],edge[0],self.DiG.get_edge_data(*edge)['Column']))\n\t\t\tself._generate_sql_parts(edge[1],i+1,colNames,sql)\n\t\t\t# if 'dbo.PersonFact' in edge[0] or 'dbo.PersonFact' in edge[1]:\n\t\t\t\t# referencesPersonFact = True\n\t\t# print('_generate_sql_parts')\n\t\t# print(colNames)\n\t\t# if referencesPersonFact and 'CommunityMart.dbo.PersonFact.PatientID' not in colNames:\n\t\t\t# colNames.append('CommunityMart.dbo.PersonFact.PatientID')\n\t\tnet_new_colNames = []\n\t\t# remove colNames of already in leaf table\n\t\tfor colName in colNames:\n\t\t\tif node not in colName:\n\t\t\t\tnet_new_colNames.append(colName)\n\t\treturn net_new_colNames,sql",
"def format_relation(relation: list):\n pattern = \"%1s%6s%8i%1i%1s%10.2f%10.2f%1i%5i%5i%1i%10.2f%10.2f%10.2f%1i\"\n return pattern % (\n relation[0],\n relation[1] if relation[1] is not None else 0,\n relation[2],\n relation[3],\n relation[4],\n relation[5],\n relation[6],\n relation[7],\n relation[8],\n relation[9],\n relation[10],\n relation[11],\n relation[12],\n relation[13],\n relation[14]\n )",
"def get_personnel():\r\n if len(man) == 0:\r\n print(\"There are no managers\")\r\n else:\r\n for i in man:\r\n print(str(i))",
"def _generate_subgraph_sql_parts(self, node, subgraph, i=0,colNames=None,sql=None):\n\t\treferencesPersonFact = False\n\t\tif i == 0:\n\t\t\tsql=[]\n\t\t\tcolNames=[]\n\t\t\t# print('\\nSELECT *\\nFROM {}'.format(node))\n\t\tfor edge in subgraph.out_edges(node):\n\t\t\t# print('\\tedge: {}->{} {}'.format(*edge,subgraph.get_edge_data(*edge)))\n\t\t\tcolNames.append('{}.{}'.format(edge[0],subgraph.get_edge_data(*edge)['Column']))\n\t\t\t# print('{}LEFT JOIN {}\\n{}ON {}.{}={}.{}'.format('\\t'*i,edge[1],'\\t'*i,edge[1],subgraph.get_edge_data(*edge)['Column'],edge[0],subgraph.get_edge_data(*edge)['Column']))\n\t\t\tsql.append('{}LEFT JOIN {}\\n{}ON {}.{}={}.{}'.format('\\t'*i,edge[1],'\\t'*i,edge[1],subgraph.get_edge_data(*edge)['Column'],edge[0],subgraph.get_edge_data(*edge)['Column']))\n\t\t\tself._generate_subgraph_sql_parts(edge[1],subgraph,i+1,colNames,sql)\n\t\t\tif 'dbo.PersonFact' in edge[0] or 'dbo.PersonFact' in edge[1]:\n\t\t\t\treferencesPersonFact = True\n\t\t# print('_generate_subgraph_sql_parts')\n\t\t# if referencesPersonFact and 'CommunityMart.dbo.PersonFact.PatientID' not in colNames:\n\t\t\t# colNames.append('CommunityMart.dbo.PersonFact.PatientID')\n\t\t# remove colNames of already in leaf table\n\t\tnet_new_colNames = []\n\t\tfor colName in colNames:\n\t\t\tif node not in colName:\n\t\t\t\tnet_new_colNames.append(colName)\n\t\treturn net_new_colNames,sql",
"def get_programs(e: str, ans: str, all_paths_around_e: List[List[str]]):\n all_programs = []\n for path in all_paths_around_e:\n for l, (r, e_dash) in enumerate(path):\n if e_dash == ans:\n # get the path till this point\n all_programs.append([x for (x, _) in path[:l + 1]]) # we only need to keep the relations\n return all_programs",
"def get_relations(self):\n triples = list(self.get_triples())\n\n for s, p, o in triples:\n if not p.startswith(\"rel\"):\n s, o = int(s.id), int(o.id)\n yield {\"predicate\": p,\n \"subject\": s,\n \"subject_nodes\": list(self.get_descendants(s, triples)),\n \"object\": o,\n \"object_nodes\": list(self.get_descendants(o, triples)),\n }",
"def _find_relations(self, node, depth=0):\n depth += 1\n\n model = node.model\n opts = model._meta\n\n # determine relational fields to determine paths\n forward_fields = opts.fields\n reverse_fields = opts.get_all_related_objects()\n\n forward_o2o = filter(self._filter_one2one, forward_fields)\n reverse_o2o = filter(self._filter_related_one2one, reverse_fields)\n\n forward_fk = filter(self._filter_fk, forward_fields)\n reverse_fk = filter(self._filter_related_fk, reverse_fields)\n\n forward_m2m = filter(self._filter_m2m, opts.many_to_many)\n reverse_m2m = filter(self._filter_related_m2m,\n opts.get_all_related_many_to_many_objects())\n\n # iterate m2m relations\n for f in forward_m2m:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'manytomany',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related m2m fields\n for r in reverse_m2m:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'manytomany',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over one2one fields\n for f in forward_o2o:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'onetoone',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related one2one fields\n for r in reverse_o2o:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'onetoone',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': False,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over fk fields\n for f in forward_fk:\n kwargs = {\n 'parent': node,\n 'model': f.rel.to,\n 'relation': 'foreignkey',\n 'reverse': False,\n 'related_name': f.name,\n 'accessor_name': f.name,\n 'nullable': f.null,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n # iterate over related foreign keys\n for r in reverse_fk:\n kwargs = {\n 'parent': node,\n 'model': r.model,\n 'relation': 'foreignkey',\n 'reverse': True,\n 'related_name': r.field.related_query_name(),\n 'accessor_name': r.get_accessor_name(),\n 'nullable': True,\n 'depth': depth,\n }\n self._add_node(**kwargs)\n\n return node",
"def _parse_relators(rels):\n return rels",
"def phone_dir_join():\n\n emps = (db.session.query(Employee.name,\n Department.dept_name,\n Department.phone)\n .join(Department).all())\n\n for name, dept, phone in emps: # [(n, d, p), (n, d, p)]\n print(name, dept, phone)",
"def __str__(self):\n left = ''\n right = ''\n for i in range(len(self.ant)):\n left += Prop.__str__(self.ant[i]) + \", \"\n \n for i in range(len(self.con)):\n right += Prop.__str__(self.con[i]) + \", \"\n return left[:-2] + '|-- ' + right[:-2]",
"def __str__(self):\n _str = \"\"\n current_node = self._head\n while(current_node != None):\n _str += str(current_node.value)\n _str += \" -> \"\n current_node = current_node.next\n _str += \"None\"\n return _str",
"def macs_to_str(self, reached_max_depth: bool) -> str:\n if self.num_params > 0 and (\n reached_max_depth or not any(self.module.children())\n ):\n return f\"{self.macs:,}\"\n return \"--\"",
"def get_relations(char):\n\n def parse_name(relation):\n \"\"\"Helper function for outputting string display of character name\"\"\"\n if relation.player:\n char_ob = relation.player.char_ob\n return \"%s %s\" % (char_ob.key, char_ob.item_data.family)\n else:\n return str(relation)\n\n try:\n dom = char.player_ob.Dominion\n parents = []\n uncles_aunts = []\n for parent in dom.all_parents:\n parents.append(parent)\n for sibling in parent.siblings:\n uncles_aunts.append(sibling)\n for spouse in sibling.spouses.all():\n uncles_aunts.append(spouse)\n\n unc_or_aunts = set(uncles_aunts)\n relations = {\n \"parents\": [parse_name(ob) for ob in parents],\n \"siblings\": list(parse_name(ob) for ob in dom.siblings),\n \"uncles_aunts\": list(parse_name(ob) for ob in unc_or_aunts),\n \"cousins\": list(parse_name(ob) for ob in dom.cousins),\n }\n return relations\n except AttributeError:\n return {}"
] | [
"0.5634923",
"0.53542036",
"0.5284192",
"0.5087596",
"0.49705473",
"0.4939793",
"0.49350137",
"0.48989594",
"0.48973182",
"0.48740613",
"0.485191",
"0.48157832",
"0.47859207",
"0.47704506",
"0.47661456",
"0.4745172",
"0.47298232",
"0.47289705",
"0.4718572",
"0.46928048",
"0.4681505",
"0.4680061",
"0.46481207",
"0.46174118",
"0.45920128",
"0.45801604",
"0.45766482",
"0.45643035",
"0.45582384",
"0.45357338"
] | 0.6343025 | 0 |
Extract zipfile to a directory if password is correct. | def extractfile(file, passwd):
try:
zipf = zipfile.ZipFile(file)
zipf.extractall(path=os.path.join(file[:-4]), pwd=str.encode(passwd))
print('Password: {}'.format(passwd))
except:
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unzipArchives(zip_file, password):\n with ZipFile(zip_file) as archive:\n archive.extractall(pwd=bytes(password, \"utf8\"))",
"def unzip_item(source_path, destination_path, password):\n\n if not destination_path:\n destination_path = source_path.replace(\".zip\", \"\")\n if not os.path.isdir(destination_path):\n os.makedirs(destination_path)\n else:\n destination_path += \"_unzipped\"\n if not os.path.isdir(destination_path):\n os.makedirs(destination_path)\n\n try:\n with pyzipper.AESZipFile(source_path) as z:\n members = z.infolist()\n for i, member in enumerate(members):\n z.extract(member, destination_path, pwd=password)\n print(f\"Unpacked {member.filename} from archive.\")\n print(f\"{source_path} unpacked successfully to {destination_path}.\")\n except Exception:\n tb = traceback.format_exc()\n print(\"Something went wrong\")\n print(tb)",
"def unzipfile(filename, passcode):\n # Password is SHA-256 hash of the pass code received\n password = hashlib.sha256(passcode.encode('utf-8')).hexdigest()\n # Unzip with password\n with ZipFile(filename) as zf:\n zf.extractall(pwd=bytes(password, 'utf-8'))",
"def extract_zip_contents(zip_file, destination):\n logging.info(\"Extracting ZIP File\")\n if os.path.isfile(zip_file):\n with zipfile.ZipFile(zip_file, \"r\") as zip_ref:\n zip_ref.extractall(destination)\n else:\n logging.error(\"%s not found.\", zip_file)\n sys.exit(\"ZIP is not the filesystem.\")",
"def extract_zip(zip_path, target_folder):\n with zipfile.ZipFile(zip_path) as archive:\n archive.extractall(target_folder)",
"def extract_and_clean(zipper, zip_path, filename):\n zipper.extract(zip_path)\n if \"/\" in zip_path :\n os.rename(zip_path, filename)\n shutil.rmtree(zip_path.split('/')[0])",
"def extract_zip(file, extract_location):\n\n with zipfile.ZipFile(file, \"r\") as zip_ref:\n zip_ref.extractall(extract_location)\n\n print(f\"Extracted file to {extract_location}\")",
"def extract_file(self):\n# path_destination = os.path.join(\n# self.root, self.resources.replace(\".zip\", \"\"))\n# os.makedirs(path_destination, exist_ok=True)\n shutil.unpack_archive(os.path.join(\n self.root, self.resources), self.root)\n os.remove(os.path.join(self.root, self.resources))",
"def _extract_file(dest_path, root_dir):\n logger.info(\"Unzipping the dataset file.\")\n with zipfile.ZipFile(dest_path, \"r\") as zip_dir:\n zip_dir.extractall(root_dir)",
"def unzip_file(path_to_zip_file, directory_to_extract_to):\n \n with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:\n zip_ref.extractall(directory_to_extract_to)\n\n return",
"def SshExtractZip(host, zipname, dst):\n command = ['ssh', host, 'unzip', '-o', '-d', dst, zipname]\n result = RunCommand(command)\n if result:\n raise ExternalError('Failed to ssh unzip -o -d \"%s\" \"%s\" on \"%s\" (%s)' %\n (dst, zipname, host, result))\n\n # unzip will create directories with access 700, which is not often what we\n # need. Fix the permissions for the whole archive.\n command = ['ssh', host, 'chmod', '-R', '755', dst]\n result = RunCommand(command)\n if result:\n raise ExternalError('Failed to ssh chmod -R 755 \"%s\" on \"%s\" (%s)' %\n (dst, host, result))",
"def __extract_zip(self):\n archive_binaries_dir = None\n zip_file = zipfile.ZipFile(self.archive)\n try:\n extract_dir = tempfile.mkdtemp()\n archive_binaries_dir = self.__create_extraction_dir(\n zip_file.namelist(), extract_dir, zip_file.extract)\n finally:\n zip_file.close()\n return archive_binaries_dir, extract_dir",
"def unzip_file(zip_path, directory_to_extract_to):\n ensure_dir(directory_to_extract_to)\n with zipfile.ZipFile(file=zip_path) as zip_file:\n # Loop over each file\n for file in tqdm(iterable=zip_file.namelist(), total=len(zip_file.namelist())):\n try:\n zip_file.extract(member=file, path=directory_to_extract_to)\n except BadZipFile as e:\n print(e)",
"def zip_folder(source_path, destination_path, password):\n\n source_path = os.path.abspath(source_path)\n\n if not destination_path:\n destination_path = source_path + \".zip\"\n\n if not destination_path.endswith(\".zip\"):\n destination_path += \".zip\"\n\n try:\n parent_folder = os.path.dirname(source_path)\n contents = os.walk(source_path)\n\n if password:\n z = pyzipper.AESZipFile(destination_path + \"\\\\\", 'w', compression=pyzipper.ZIP_LZMA, encryption=pyzipper.WZ_AES)\n z.setpassword(password)\n else:\n z = pyzipper.ZipFile(destination_path + \"\\\\\", 'w', compression=pyzipper.ZIP_LZMA)\n\n try:\n for root, folders, files in contents:\n # Include all subfolders, including empty ones.\n for folder_name in folders:\n absolute_path = os.path.join(root, folder_name)\n relative_path = absolute_path.replace(parent_folder + '\\\\', '')\n print(f\"Adding {absolute_path} to archive.\")\n z.write(absolute_path, relative_path)\n for file_name in files:\n absolute_path = os.path.join(root, file_name)\n relative_path = absolute_path.replace(parent_folder + '\\\\', '')\n print(f\"Adding {absolute_path} to archive.\")\n z.write(absolute_path, relative_path)\n print(f\"{destination_path} created successfully.\")\n\n except Exception:\n tb = traceback.format_exc()\n print(\"Something went wrong\")\n print(tb)\n\n finally:\n z.close()\n\n except Exception:\n tb = traceback.format_exc()\n print(\"Something went wrong\")\n print(tb)",
"def unzip_file(zip_file: str) -> None:\n destination = tempfile.mkdtemp(prefix='gaelo_pross_unzip_')\n with ZipFile(zip_file) as my_zip:\n for member in my_zip.namelist():\n filename = os.path.basename(member)\n # skip directories\n if not filename:\n continue\n # copy file (taken from zipfile's extract)\n source = my_zip.open(member)\n target = open(os.path.join(destination, filename), \"wb\")\n with source, target:\n shutil.copyfileobj(source, target)\n # return destination",
"def fromZip(self, zip_location,extract_location):\n zip_file = zipfile.ZipFile(zip_location,'r')\n zip_file.extractall(extract_location)",
"def ExtractZip(zip_path, dest_dir):\n zip_path = GetWindowsPathWithUNCPrefix(zip_path)\n dest_dir = GetWindowsPathWithUNCPrefix(dest_dir)\n with zipfile.ZipFile(zip_path) as zf:\n for info in zf.infolist():\n zf.extract(info, dest_dir)\n # UNC-prefixed paths must be absolute/normalized. See\n # https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file#maximum-path-length-limitation\n file_path = os.path.abspath(os.path.join(dest_dir, info.filename))\n # The Unix st_mode bits (see \"man 7 inode\") are stored in the upper 16\n # bits of external_attr. Of those, we set the lower 12 bits, which are the\n # file mode bits (since the file type bits can't be set by chmod anyway).\n attrs = info.external_attr >> 16\n if attrs != 0: # Rumor has it these can be 0 for zips created on Windows.\n os.chmod(file_path, attrs & 0o7777)",
"def _unzip_file(zip_file_path: str, unzip_dir: str = \"\") -> None:\n if not unzip_dir:\n unzip_dir = os.path.dirname(zip_file_path)\n op_desc = f\"Extracting: {os.path.basename(zip_file_path)}\"\n try:\n with ZipFile(file=zip_file_path) as zip_file:\n for member_name in tqdm(zip_file.namelist(), desc=op_desc):\n file_name = os.path.basename(member_name)\n if not file_name:\n continue\n target_path = os.path.join(unzip_dir, file_name)\n target_path = open(target_path, \"wb\")\n source_file = zip_file.open(member_name)\n with source_file, target_path:\n shutil.copyfileobj(source_file, target_path)\n os.remove(zip_file_path)\n except Exception as zip_error:\n zip_file_str = os.path.basename(zip_file_path)\n zip_file_str = os.path.splitext(zip_file_str)[0]\n for file_name in os.listdir(unzip_dir):\n if zip_file_str in file_name:\n os.remove(os.path.join(unzip_dir, file_name))\n raise zip_error",
"def ExtractZip(filename, output_dir, verbose=True):\n MaybeMakeDirectory(output_dir)\n\n # On Linux and Mac, we use the unzip command as it will\n # handle links and file bits (executable), which is much\n # easier then trying to do that with ZipInfo options.\n #\n # On Windows, try to use 7z if it is installed, otherwise fall back to python\n # zip module and pray we don't have files larger than 512MB to unzip.\n unzip_cmd = None\n if IsLinux():\n unzip_cmd = ['unzip', '-o']\n elif IsMac():\n # The Mac version of unzip does not have LARGE_FILE_SUPPORT until\n # macOS 10.12, so use ditto instead. The Python ZipFile fallback\n # used on Windows does not support symbolic links, which makes it\n # unsuitable for Mac builds.\n unzip_cmd = ['ditto', '-x', '-k']\n elif IsWindows() and os.path.exists('C:\\\\Program Files\\\\7-Zip\\\\7z.exe'):\n unzip_cmd = ['C:\\\\Program Files\\\\7-Zip\\\\7z.exe', 'x', '-y']\n\n if unzip_cmd:\n # Make sure path is absolute before changing directories.\n filepath = os.path.abspath(filename)\n saved_dir = os.getcwd()\n os.chdir(output_dir)\n command = unzip_cmd + [filepath]\n # When using ditto, a destination is required.\n if command[0] == 'ditto':\n command += ['.']\n result = RunCommand(command)\n os.chdir(saved_dir)\n if result:\n raise ExternalError('unzip failed: %s => %s' % (str(command), result))\n else:\n assert IsWindows()\n zf = zipfile.ZipFile(filename)\n # TODO(hinoka): This can be multiprocessed.\n for name in zf.namelist():\n if verbose:\n print 'Extracting %s' % name\n zf.extract(name, output_dir)\n if IsMac():\n # Restore permission bits.\n os.chmod(os.path.join(output_dir, name),\n zf.getinfo(name).external_attr >> 16L)",
"def extract(cls, path, outdir):\r\n with open_zip(path) as zip:\r\n for path in zip.namelist():\r\n # While we're at it, we also perform this safety test.\r\n if path.startswith('/') or path.startswith('..'):\r\n raise ValueError('Zip file contains unsafe path: %s' % path)\r\n # Ignore directories. extract() will create parent dirs as needed.\r\n if not path.endswith('/'):\r\n zip.extract(path, outdir)",
"def unzip_to_temp_dir(zip_file_name):\n if not zip_file_name or not os.path.exists(zip_file_name):\n return None\n\n zf = zipfile.ZipFile(zip_file_name)\n\n if zf.testzip() is not None:\n return None\n\n # Unzip the files into a temporary directory\n LOGGER.info(\"Extracting zipped file: %s\" % zip_file_name)\n tempdir = tempfile.mkdtemp()\n\n try:\n # Create directories that don't exist\n for zip_name in zf.namelist():\n # We have no knowledge on the os where the zipped file was\n # created, so we restrict to zip files with paths without\n # charactor \"\\\" and \"/\".\n name = (zip_name.replace(\"\\\\\", os.path.sep).\n replace(\"/\", os.path.sep))\n dest = os.path.join(tempdir, name)\n if (name.endswith(os.path.sep) and not os.path.exists(dest)):\n os.mkdir(dest)\n LOGGER.debug(\"Directory %s created.\" % dest)\n\n # Copy files\n for zip_name in zf.namelist():\n # We have no knowledge on the os where the zipped file was\n # created, so we restrict to zip files with paths without\n # charactor \"\\\" and \"/\".\n name = (zip_name.replace(\"\\\\\", os.path.sep).\n replace(\"/\", os.path.sep))\n dest = os.path.join(tempdir, name)\n if not (name.endswith(os.path.sep)):\n LOGGER.debug(\"Copying file %s......\" % dest)\n outfile = open(dest, 'wb')\n outfile.write(zf.read(zip_name))\n outfile.close()\n LOGGER.debug(\"File %s copied.\" % dest)\n\n LOGGER.info(\"Unzipped file can be found at %s\" % tempdir)\n return tempdir\n\n except IOError as err:\n LOGGER.error(\"Error in extracting webdriver.xpi: %s\" % err)\n return None",
"def unzip(input_filename, extract_dir):\n if not zipfile.is_zipfile(input_filename):\n raise ValueError(\"%s is not a zip file\" % (input_filename))\n zip_ds = zipfile.ZipFile(input_filename)\n zip_ds.extractall(path=extract_dir)\n zip_ds.close()",
"def unzip(zfile, md=False):\n\tbasedir = ''\n\tcount = -1\n\tif md:\n\t\tbasedir = prepareBaseDir(zfile)\n\t\n\tzfile = zipfile.ZipFile(zfile, 'r')\n\tfor name in zfile.namelist():\n\t\tcount+=1\n\t\tuname = name.decode('gbk')\n\t\tif uname.endswith('.DS_Store'):\n\t\t\tcontinue\n\t\t\n\t\t#prepare directory\n\t\tdirs = os.path.dirname(uname)\n\t\tif basedir:\n\t\t\tdirs = os.path.join(basedir, dirs)\n\t\tprint 'Extracting: ' + uname\n\t\tif dirs and not os.path.exists(dirs):\n\t\t\tprint 'Prepare directories: ', dirs\n\t\t\tos.makedirs(dirs)\n\t\tif (count == 0):\n\t\t\thomeDir = uname[:-1]\n\t\t#ready to unzip file\n\t\tdata = zfile.read(name)\n\t\tif basedir:\n\t\t\tuname = os.path.join(basedir, uname)\n\t\tif not os.path.exists(uname):\n\t\t\tfo = open(uname, 'w')\n\t\t\tfo.write(data)\n\t\t\tfo.close()\n\tzfile.close()\n\treturn homeDir",
"def getzip(url, zipfile, unzipdir):\n done_file = os.path.join(unzipdir, '.'+os.path.basename(zipfile)+'.done')\n if file_exists(done_file):\n print('{} already downloaded and extracted; skipping. To reinstall \"rm {}\"'.format(os.path.basename(zipfile), done_file))\n else:\n print('Downloading {} as {}.'.format(url, zipfile))\n urlretrieve(url, zipfile)\n print('Extracting {} into {}.'.format(zipfile, unzipdir))\n with ZipFile(zipfile, 'r') as zip:\n zip.extractall(unzipdir)\n os.remove(zipfile)\n with open(done_file, 'w'):\n pass",
"def ZipExtract(zipname, filename, path=os.getcwd()):\n try:\n zpf = zipfile.ZipFile(zipname)\n zpf.extract(filename, path)\n zpf.close()\n return True\n except KeyError:\n logging.warning('Could not find %s to extract from %s.',\n (filename, zipname))\n return False",
"def _extract_if_zip(tmpdir: str, config: CSCConfig) -> str:\n if os.path.isdir(config.reads):\n return config.reads\n else:\n extracted_dir = os.path.join(tmpdir, f\"{config.input_format}s\")\n os.makedirs(extracted_dir)\n with zipfile.ZipFile(config.reads) as zip_file:\n files = [finfo for finfo in zip_file.infolist() if finfo.filename.endswith(f\".{config.input_format}\")]\n for extract_file in files:\n zip_file.extract(extract_file, extracted_dir)\n return extracted_dir",
"def unzip_file(data_zip, path_unzip):\r\n with zipfile.ZipFile(data_zip, \"r\") as zip_temp:\r\n zip_temp.extractall(path_unzip)",
"def _do_unzip(zipped_file, output_directory):\n z = zipfile.ZipFile(zipped_file)\n for path in z.namelist():\n relative_path = os.path.join(output_directory, path)\n dirname, dummy = os.path.split(relative_path)\n try:\n if relative_path.endswith(os.sep) and not os.path.exists(dirname):\n os.makedirs(relative_path)\n elif not os.path.exists(relative_path):\n dirname = os.path.join(output_directory, os.path.dirname(path))\n if os.path.dirname(path) and not os.path.exists(dirname):\n os.makedirs(dirname)\n fd = open(relative_path, \"w\")\n fd.write(z.read(path))\n fd.close()\n except IOError, e:\n raise e\n return output_directory",
"def unzip(file_loc, extract_loc=None):\n try:\n with zipfile.ZipFile(\n file_loc, \"r\"\n ) as file: # opening the zip file using 'zipfile.ZipFile' class\n print(\"Ok\")\n # ZipFile.infolist() returns a list containing all the members of an archive file\n print(file.infolist())\n\n # ZipFile.namelist() returns a list containing all the members with names of an archive file\n print(file.namelist())\n\n # ZipFile.getinfo(path = filepath) returns the information about a member of Zip file.\n # It raises a KeyError if it doesn't contain the mentioned file\n print(file.getinfo(file.namelist()[-1]))\n\n # If extraction directory not given, extracted to 'data/processed/file_name'\n if extract_loc == None:\n base = os.path.dirname(file_loc)\n folder_name = os.path.basename(base)\n extract_loc = \"data/processed/\" + folder_name\n\n # ZipFile.extractall(path = filepath, pwd = password) extracts all\n # the files to current directory\n file.extractall(path=extract_loc)\n # after executing check the directory to see extracted files\n\n except zipfile.BadZipFile: # if the zip file has any errors then it prints the\n # error message which you wrote under the 'except' block\n print(\"Error: Zip file is corrupted\")\n\n except zipfile.LargeZipFile:\n print(\"Error: File size if too large\") # if the file size is too large to\n # open it prints the error you have written\n except FileNotFoundError:\n print(\"Error: File not found\")",
"def _maybe_download_and_extract(self, filename):\n if not os.path.exists(self.work_dir):\n os.mkdir(self.work_dir)\n filepath = os.path.join(self.work_dir, filename)\n if not os.path.exists(filepath):\n filepath, _ = urllib.urlretrieve(self.url + filename, filepath)\n statinfo = os.stat(filepath)\n log.info('Successfully downloaded', filename, statinfo.st_size,\n 'bytes.')\n log.info('Extracting zip file ... ')\n f = zipfile.ZipFile(filepath)\n f.extractall(path=self.work_dir)\n log.info('Extraction finished ... ')"
] | [
"0.73367697",
"0.68281114",
"0.6682046",
"0.66589713",
"0.6608535",
"0.65792376",
"0.65135366",
"0.65102696",
"0.6478845",
"0.64773226",
"0.64197737",
"0.6360199",
"0.6358647",
"0.6311184",
"0.62914145",
"0.6283126",
"0.6278718",
"0.62752926",
"0.62581104",
"0.62555015",
"0.62402403",
"0.6229527",
"0.62201023",
"0.61923414",
"0.6169135",
"0.61554027",
"0.6123011",
"0.6120836",
"0.610842",
"0.6105338"
] | 0.7807078 | 0 |
Calculate Profit of Order | def calculate_profit(self): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def profit(self):\n retail_value = 0\n wholesale_value = 0\n for bike in self.sold:\n retail_value += bike.total_cost() + (\n self.retail_margin * bike.total_cost())\n wholesale_value += bike.total_cost()\n return retail_value - wholesale_value",
"def profit_per_item(self, pk=None):\n total_profit = 0\n total_cost = self.item_cost + self.shipping_cost + self.listing_fee + self.final_value_fee\n total_paid = self.shipping_paid + self.item_paid\n total_profit = total_paid - total_cost\n return total_profit",
"def get_profit(self):\n # Profit from previous transactions\n values = [t['value'] for t in self.transactions]\n\n profits = []\n base = None\n for v in values:\n if not base:\n base = v\n profit = v - base\n profits.append(profit)\n base = v\n\n return np.array(profits).sum()\n\n # Get all values to get profit\n #return np.array([ s['value'] for s in self.states ]).mean()",
"def potential_profit(self):\n potential_profit = (_House.closing_cost*self.vars['after_repair_value']) - self.vars['purchase_price'] - self.total_spent() - _House.broker_fee\n return round(potential_profit, 2)",
"def calc_profit(self, assignment):\n return sum([self.profit(agent, task)\n for agent, tasks in assignment.items() \n for task in tasks])",
"def envisaged_profit(self):\n profit = round(\n self.calcul_buy_nb_action() * self.take_profit - self.investment_price(),\n 2,\n )\n percent_profit = round(profit * 100 / self.capital, 2)\n return profit, percent_profit",
"def maxProfit(self, prices):\r\n\t\tprofit = 0",
"def calc_earning(self, data=None):\n result = Result()\n if data is None:\n data = self.security\n self.calcDecision()\n first_purchase_method = self.check_first_purchase_method()\n for i in np.arange(len(data['Close'])):\n if data['FinalDecision'].iloc[i] is None:\n pass\n elif data['FinalDecision'].iloc[i] == TransactionType.BUY:\n if data['FinalDecision'].iloc[i-1] == TransactionType.BUY:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n if first_purchase_method == FirstTransactionType.INIT_CAPITAL:\n self.shares_own = int((self.init_capital/data['Close'].iloc[i]))\n self.buys_made += 1\n elif first_purchase_method == FirstTransactionType.STOCK_QUANTITY:\n self.shares_own = self.stock_quantity\n self.buys_made += 1\n else:\n self.shares_own = int(self.final_capital / data['Close'].iloc[i])\n self.final_capital = self.final_capital % data['Close'].iloc[i]\n #print(self.shares_own)\n\n elif data['FinalDecision'].iloc[i] == TransactionType.SELL:\n if data['FinalDecision'].iloc[i-1] == TransactionType.SELL:\n pass\n else:\n if (self.buys_made + self.sells_made) == 0:\n pass\n else:\n self.final_capital += self.shares_own * data['Close'].iloc[i]\n self.shares_own = 0\n self.sells_made +=1\n #Checar si es el momento mas alto o bajo de ganancias\n if self.shares_own == 0:\n if (self.highest_point is None\n or self.highest_point < self.final_capital):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > self.final_capital\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n else:\n if (self.highest_point is None\n or self.highest_point < (self.shares_own * data['Close'].iloc[i])):\n self.highest_point = self.final_capital\n if (self.lowest_point is None\n or self.lowest_point > (self.shares_own * data['Close'].iloc[i])\n or self.lowest_point == 0):\n self.lowest_point = self.final_capital\n self.calcRealFinalCapital()\n self.calcDiferencePercentage()",
"def curProfit(curPrice, prevPrice, demandIntcpt, k1, k2, a, b, unitCost, coff):\n\treturn curDemand(curPrice, prevPrice, demandIntcpt, k1, k2, a, b, coff) * (curPrice - unitCost)",
"def _calc_return(self, order_original, perf_df):\r\n\r\n order = order_original.copy()\r\n no_sec = len(self.perf_data)\r\n price_names = np.array(['price_' + str(i) for i in xrange(1, no_sec + 1)])\r\n ret = np.zeros((np.shape(order)[0], no_sec))\r\n\r\n transaction_cost = 0\r\n\r\n # buy_list vs sell_list contains order bought vs sold that cannot be matched yet to determine the return\r\n # For example when something has been bought, but nothing or not enough has been sold yet, the residue will be\r\n # listed in these lists.\r\n buy_shares = np.zeros((np.shape(order)[0], no_sec))\r\n buy_price = np.zeros((np.shape(order)[0], no_sec))\r\n sell_shares = np.zeros((np.shape(order)[0], no_sec))\r\n sell_price = np.zeros((np.shape(order)[0], no_sec))\r\n\r\n # bl_first vs sl_first indicates which row in buy_list vs sell_list can be used to \"match\" bought/sold shares.\r\n # It automatically points to the oldest row with still outstanding shares. Initial value is -1\r\n # bl_last vs sl_last indicates which row in buy_list vs sell_list can be used to write outstanding shares to.\r\n bl_first = np.ones(no_sec).astype(int) * -1\r\n bl_last = np.zeros(no_sec).astype(int)\r\n sl_first = np.ones(no_sec).astype(int) * -1\r\n sl_last = np.zeros(no_sec).astype(int)\r\n\r\n for ind in range(0, np.shape(order)[0]):\r\n bl_first[(bl_first == -1) & (bl_last > 0)] = 0\r\n sl_first[(sl_first == -1) & (sl_last > 0)] = 0\r\n\r\n # Three situations, per type: buy, sell, nothing\r\n # If nothing, skip to next day\r\n # Only returns made on one day are determined, later they will be accumulated.\r\n\r\n # Situation A.A: Sell order & outstanding buys larger than sell order\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) > -order[ind, :])\r\n if sum(col_to_change) != 0:\r\n share_cumsum = np.cumsum(buy_shares, 0)\r\n share_compl = (share_cumsum < -order[ind, :]) & col_to_change\r\n numb_shares = sum(buy_shares * share_compl, 0)[col_to_change]\r\n ret[ind, col_to_change] += numb_shares * perf_df.loc[ind, price_names[col_to_change]] \\\r\n - sum(buy_shares * buy_price * share_compl, 0)[col_to_change]\r\n buy_shares[share_compl] = 0\r\n bl_first += sum(share_compl)\r\n order[col_to_change] += numb_shares\r\n\r\n ret[ind, col_to_change] += perf_df.loc[ind, price_names[col_to_change]] * -order[ind, col_to_change] * (1 - transaction_cost) \\\r\n - buy_price[bl_first[col_to_change], col_to_change] \\\r\n * -order[ind, col_to_change] * (1 + transaction_cost)\r\n buy_shares[bl_first[col_to_change], col_to_change] += order[ind, col_to_change]\r\n order[ind, col_to_change] = 0\r\n\r\n # Situation A.B: Sell order & outstanding buys smaller than or equal to sell order\r\n # --> just fill out all outstanding buys, and change order. This order will be added to sell list in A.C\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) > 0) \\\r\n & (np.sum(buy_shares, 0) <= -order[ind, :])\r\n if sum(col_to_change) != 0:\r\n numb_shares = buy_shares[:, col_to_change]\r\n price_shares = buy_price[:, col_to_change]\r\n ret[ind, col_to_change] += np.sum(numb_shares, 0) * \\\r\n perf_df.loc[ind, price_names[col_to_change]].values * (1 - transaction_cost) \\\r\n - np.sum(numb_shares * price_shares, 0) * (1 + transaction_cost)\r\n order[ind, col_to_change] += np.sum(numb_shares, 0)\r\n buy_shares[:, col_to_change] = 0\r\n bl_first[col_to_change] = bl_last[col_to_change] - 1\r\n\r\n # Situation A.C: Sell order & no outstanding buys\r\n col_to_change = (order[ind, :] < 0) & (np.sum(buy_shares, 0) == 0)\r\n if sum(col_to_change) != 0:\r\n row_to_change = bl_last[col_to_change]\r\n sell_shares[row_to_change, col_to_change] = -order[ind, col_to_change]\r\n sell_price[row_to_change, col_to_change] = perf_df.loc[ind, price_names[col_to_change]]\r\n sl_last[col_to_change] += 1\r\n\r\n # Situation B.A: Buy order & outstanding sells larger than buy order\r\n col_to_change = (order[ind, :] > 0) & (np.sum(sell_shares, 0) > order[ind, :])\r\n if sum(col_to_change) != 0:\r\n share_cumsum = np.cumsum(sell_shares, 0)\r\n share_compl = (share_cumsum < order[ind, :]) & col_to_change\r\n numb_shares = sum(sell_shares * share_compl, 0)[col_to_change]\r\n ret[ind, col_to_change] += sum(sell_shares * sell_price * share_compl, 0)[col_to_change] * (1 - transaction_cost)\\\r\n - numb_shares * perf_df.loc[ind, price_names[col_to_change]] * (1 + transaction_cost)\r\n sell_shares[share_compl] = 0\r\n sl_first += sum(share_compl)\r\n order[col_to_change] += -numb_shares\r\n\r\n ret[ind, col_to_change] += sell_price[sl_first[col_to_change], col_to_change] * order[ind, col_to_change] * (1 - transaction_cost)\\\r\n - perf_df.loc[ind, price_names[col_to_change]] * order[ind, col_to_change] * (1 + transaction_cost)\r\n sell_shares[sl_first[col_to_change], col_to_change] += -order[ind, col_to_change]\r\n order[ind, col_to_change] = 0\r\n\r\n # Situation B.B: Buy order & outstanding sells smaller than buy order\r\n # --> just fill out all outstanding sells, and change order. This order will be added to buy list in B.C\r\n col_to_change = (order[ind, :] > 0) & \\\r\n (np.sum(sell_shares, 0) > 0) & (np.sum(sell_shares, 0) <= order[ind, :])\r\n if sum(col_to_change) != 0:\r\n numb_shares = sell_shares[:, col_to_change]\r\n price_shares = sell_price[:, col_to_change]\r\n ret[ind, col_to_change] += np.sum(numb_shares * price_shares, 0) * (1 - transaction_cost) \\\r\n - np.sum(numb_shares, 0) * perf_df.loc[ind, price_names[col_to_change]] * (1 + transaction_cost)\r\n order[ind, col_to_change] += -np.sum(numb_shares, 0)\r\n sell_shares[:, col_to_change] = 0\r\n sl_first[col_to_change] = sl_last[col_to_change] - 1\r\n\r\n # Situation B.C: Buy order & no outstanding sells\r\n col_to_change = (order[ind, :] > 0) & (np.sum(sell_shares, 0) == 0)\r\n if sum(col_to_change) != 0:\r\n row_to_change = bl_last[col_to_change]\r\n buy_shares[row_to_change, col_to_change] = order[ind, col_to_change]\r\n buy_price[row_to_change, col_to_change] = perf_df.loc[ind, price_names[col_to_change]]\r\n bl_last[col_to_change] += 1\r\n\r\n ret_abs = np.array([sum(ret[:r]) for r in range(1, len(ret) + 1)])\r\n returns_abs = np.sum(ret_abs, 1)\r\n returns_rel = [i / self.context['max_notional'] + 1 for i in returns_abs]\r\n\r\n return returns_rel, returns_abs, ret_abs",
"def expected_policy_profit(targeting_decision, g, observed_profit, prob_treatment):\n return np.sum(((1-targeting_decision) * (1-g) * observed_profit)/(1-prob_treatment) +\\\n (targeting_decision * g * observed_profit)/(prob_treatment))",
"def profit_per_item_percentage(self, pk=None):\n total_profit_percentage = 0\n total_cost = self.item_cost + self.shipping_cost + self.listing_fee + self.final_value_fee\n total_paid = self.shipping_paid + self.item_paid\n total_profit_percentage = round(100*((total_paid - total_cost) / total_cost), 2)\n return total_profit_percentage",
"def totalProfit(name,sortlist, max):\n result= \"go to \"+ name+\" and buy\"\n tp=0\n for i in range(len(sortlist)):\n if sortlist[i][1][2]>0 :\n if sortlist[i][1][0]<=max:\n max= max-sortlist[i][1][0]\n t=(sortlist[i][1][2] * sortlist[i][1][0])\n result= result+\"\\n\"+str(sortlist[i][1][0])+ \" \"+str(sortlist[i][0])+\" for profit of \"+str(t)\n tp= tp+t\n else:\n t=(sortlist[i][1][2]*max)\n result = result+\"\\n\"+ str(max)+\" \"+str(sortlist[i][0])+\" for profit of \"+str(t)\n tp=tp+t\n max=0\n if max==0:\n break\n if tp==0:\n result= result+ \"\\nno profit\"\n return result,tp",
"def calculate(self, order):\n pass",
"def calculate_profit_pod(location, destination):\n _profit = []\n for key in destination.price_slip.keys():\n if location.price_slip[key] != 0 and destination.price_slip[key] != 0 and location.price_slip[key][1] != 0 and location.price_slip[key][2] != 0:\n benefit = destination.price_slip[key][0] - location.price_slip[key][1]\n _profit.append([f'{benefit:.2f}'])\n else:\n _profit.append([f'0.00'])\n\n return _profit",
"def get_contribution(self):\n salary = self._get_salary()\n if not salary:\n return 0\n # Class 1 NIC.\n contribution = 0\n st = 702\n if salary > st:\n contribution = (salary - st) * 0.138\n return contribution",
"def curProfitResponse(curPrice, prevPrice, coff):\n\treturn curProfit(curPrice, prevPrice, demandIntcpt, k1, k2, aPrInc, bPrDec, unitCost, coff)",
"def determine_profit(self):\n sqrt_delta_sigma = math.sqrt(self.brownian_delta) * self.brownian_sigma\n brownian_motion = nrand.normal(loc=0, scale=sqrt_delta_sigma)\n sigma_pow_mu_delta = (self.drift_mu - 0.5 * math.pow(self.brownian_sigma, 2.0)) * self.brownian_delta\n geometric_brownian_motion_log_return = brownian_motion + sigma_pow_mu_delta\n retur = np.exp(geometric_brownian_motion_log_return)\n next_profit = self.profit_history[-1] * retur\n return next_profit",
"def findAShin(self):\n #return reduce(lambda x, y: x*y, [self.DoS[key].get_price() for key in self.DoS] )\n a = array([self.DoS[key].get_Price() for key in self.DoS])\n return a.prod()**(1.0/len(a))",
"def cash_flow(self):\n _cash_flow = self.after_tax_profit() + self.depreciation()\n return _cash_flow",
"def maxProfit(self, prices):\n profit = 0\n for i in range(1,len(prices)):\n if prices[i] > prices[i-1]:\n profit += prices[i] - prices[i-1]\n return profit",
"def total_profit(knapsack, items, weight):\n return knapsack[items][weight]",
"def add_profit(df_gimmes, bet_size):\n df_gimmes['Bet_on_A'] = bet_size * \\\n (df_gimmes['best_ML_B']/100 + 1) / \\\n (df_gimmes['best_ML_A']/100.0 +\n df_gimmes['best_ML_B']/100.0 + 2)\n\n df_gimmes['Bet_on_B'] = bet_size - df_gimmes['Bet_on_A']\n\n df_gimmes['Profit_A'] = df_gimmes['Bet_on_A'] * \\\n df_gimmes['best_ML_A'] / 100.0 + \\\n df_gimmes['Bet_on_A'] - bet_size\n\n df_gimmes['Profit_B'] = df_gimmes['Bet_on_B'] * \\\n df_gimmes['best_ML_B'] / 100.0 + \\\n df_gimmes['Bet_on_B'] - bet_size\n\n return df_gimmes",
"def best_ask_price(orders: pandas.DataFrame):\n return best_ask_order(orders).price",
"def investment_price(self):\n invest = self.max_loss / (self.buy_price - self.stop_loss) * self.buy_price\n if invest > self.capital:\n return round(self.capital, 2)\n else:\n return round(invest, 2)",
"def calcul_risk(self):\n if (self.take_profit - self.buy_price) >= (\n self.buy_price - self.stop_loss\n ) * self.risk:\n return True\n else:\n return False",
"def INVITE_COST(sent, isNonProfit=False):\n cost = 0\n if sent > 100:\n cost = 500 # $5\n if sent > 500:\n cost = 1000 # $10\n if sent > 1000:\n cost = 1500 # $15\n if sent > 2000:\n cost = 2000 # $20\n if sent > 10000:\n cost = 2500 # $25\n if isNonProfit:\n cost = cost * .75\n return int(round(cost))",
"def mxprofit(array):\n\n #initialize variables\n minimum_val = 10000\n profit = 0\n # edge cases\n if len(array) <= 1:\n return 0\n # iterate through list and store minimum value\n for i in range(len(array)):\n if array[i] < minimum_val:\n minimum_val = array[i]\n # subtract from minimum value and store profit\n for j in array[i:len(array)]:\n if (j-minimum_val) > profit:\n profit = j - minimum_val\n return profit",
"def gross_profit():\n sales_revenue = float(input(\"Expected sales revenue: \"))\n cogs = float(input(\"Cost of goods sold: \"))\n gross_profit = sales_revenue - cogs\n print(\"Sales Revenue: {}\\nCOGS: {}\\nGross Profit: {}\".format(sales_revenue, cogs, gross_profit))\n return gross_profit, sales_revenue",
"def GetSpeculated(self):\n return self.money + sum([self.share[i] * self.price[i][0] * (1 + self.taxe) for i in self.price])"
] | [
"0.73407346",
"0.70521134",
"0.69925046",
"0.66937786",
"0.664156",
"0.6609153",
"0.660143",
"0.64814395",
"0.63270825",
"0.62858367",
"0.62036735",
"0.6141628",
"0.6069652",
"0.6067097",
"0.6044266",
"0.60409504",
"0.602659",
"0.6025829",
"0.6020886",
"0.6019667",
"0.6008011",
"0.59903014",
"0.59768826",
"0.59436584",
"0.5936405",
"0.59282374",
"0.5919609",
"0.5914812",
"0.5902267",
"0.5887112"
] | 0.79486245 | 0 |
Removes a service from a list of existing services. | def RemoveServiceFromEndpoints(service_name, services):
new_services = []
if not isinstance(services, list):
return new_services
# TODO(user): Consider throwing an exception if the service is not
# already configured in the list of endpoints.
for service in services:
if not isinstance(service, dict) or 'name' not in service:
raise exceptions.ToolException(ValueError(
'Services are expected to be service dicts!'))
if service['name'] != service_name:
new_services.append(service)
return new_services | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def DeleteServices(self):\n for service in self.services.values():\n service.Delete()",
"def delete_service(self, service):\n # type: (LoadBalancerService) -> List[BoundAction]\n return self._client.delete_service(self, service)",
"def remove(self, service):\n os.remove(os.path.join(self.directory, service))",
"def remove_service(self, zeroconf, service_type, name):",
"def terminate_services(self, services):\n services = self._filter_cid(services)\n for service in services:\n ctr = self.check_service_running(service,\n raise_on=['terminated'])\n logger.info(\"Stopping and \"\n \"removing docker instance : %s\" % service)\n self.driver.stop_container(ctr['Id'], remove=True)\n if service not in self._dirty_service:\n self._dirty_service[service] = {\"ctr\": ctr,\n \"terminated\": True}\n else:\n self._dirty_service[service][\"terminated\"] = True\n return services",
"def delete_service(self, service_id):\n raise exception.NotImplemented() # pragma: no cover",
"def service_delete(service):\n db = model.Session()\n service = _must_find(db, model.Service, service)\n db.delete(service)\n db.commit()\n\n\n # API Code #\n ############",
"def service_delete(container, sysdir=constants.SYSTEMD_DIR, log=None):\n log = log or common.configure_logging(__name__)\n # prefix is explained in the service_create().\n service = 'tripleo_' + container\n\n sysd_unit_f = systemctl.format_name(service)\n sysd_health_f = systemctl.format_name(service + '_healthcheck')\n sysd_timer_f = service + '_healthcheck.timer'\n sysd_health_req_d = sysd_unit_f + '.requires'\n\n for sysd_f in sysd_unit_f, sysd_health_f, sysd_timer_f:\n if os.path.isfile(sysdir + sysd_f):\n log.debug('Stopping and disabling systemd service for %s' %\n service)\n try:\n systemctl.stop(sysd_f)\n systemctl.disable(sysd_f)\n except systemctl.SystemctlException:\n log.exception(\"systemctl failed\")\n raise\n log.debug('Removing systemd unit file %s' % sysd_f)\n os.remove(sysdir + sysd_f)\n else:\n log.info('No systemd unit file was found for %s' % sysd_f)\n\n # Now that the service is removed, we can remove its \".requires\"\n if os.path.exists(os.path.join(sysdir, sysd_health_req_d)):\n log.info('Removing healthcheck require for %s' % service)\n shutil.rmtree(os.path.join(sysdir, sysd_health_req_d))",
"def delete_service(self, service_description, host_name):\n\t\tfor item in self.data['all_service']:\n\t\t\tif (item['service_description'] == service_description) and (host_name in self._get_active_hosts(item)):\n\t\t\t\tself.data['all_service'].remove(item)\n\t\t\t\titem['meta']['delete_me'] = True\n\t\t\t\titem['meta']['needs_commit'] = True\n\t\t\t\tself.data['all_service'].append(item)\n\n\t\t\t\treturn True",
"def unproxy_service(self, *service_ids) -> None:\n\n for service_id in service_ids:\n router_key = self._router_key(self._router_id(service_id))\n middleware_key = self._middleware_key(self._middleware_id(service_id))\n tservice_key = self._tservice_key(self._tservice_id(service_id))\n\n self._zk.delete(router_key, recursive=True)\n self._zk.delete(middleware_key, recursive=True)\n self._zk.delete(tservice_key, recursive=True)\n\n # prevents \"KV connection error: middlewares cannot be a standalone element\"\n middlewares_key = f\"/{self._prefix}/http/middlewares\"\n if not self._zk.get_children(middlewares_key):\n self._zk.delete(middlewares_key)\n\n self._trigger_configuration_update()",
"def unregister(self, service_name, service_addr, addr_cls=None):\n addr_cls = addr_cls or PlainAddress\n etcd_delete = True\n if addr_cls != PlainAddress:\n etcd_delete = False\n\n for service_name in service_name:\n key = self._form_service_key(service_name, service_addr)\n if etcd_delete:\n self._client.delete(key)\n else:\n self._client.put(addr_cls(service_addr).delete_value())\n\n self._services.get(service_addr, {}).discard(service_name)",
"def service_remove(path, service_name):\n compose_result, err = __load_docker_compose(path)\n if err:\n return err\n services = compose_result[\"compose_content\"][\"services\"]\n if service_name not in services:\n return __standardize_result(\n False, \"Service {} did not exists\".format(service_name), None, None\n )\n del services[service_name]\n return __dump_compose_file(\n path,\n compose_result,\n \"Service {} is removed from {}\".format(service_name, path),\n already_existed=True,\n )",
"def update_services(self, new_services_list):\n to_stop = [service for service in self if service not in new_services_list]\n for service_id in to_stop:\n self[service_id].stop()\n del self[service_id]\n\n for service_id in new_services_list:\n if service_id not in self:\n self[service_id] = ServiceManager(self.zk_client, self.project_id,\n service_id, self.callback)",
"def delService(self):\n self.__selected.delete()\n row = self.currentRow()\n if row >= 1:\n self.__service_list.setCurrentRow(row - 1, QtCore.QItemSelectionModel.Select)\n self.refresh()",
"def removeService(self, interfaceClass: java.lang.Class, service: object) -> None:\n ...",
"def stop_services(self, services):\n services = self._filter_cid(services)\n for service in services:\n ctr = self.check_service_running(service, raise_on=['terminated'])\n logger.info(\"Stopping docker instance : %s\" % service)\n self.driver.stop_container(ctr['Id'])\n if service not in self._dirty_service:\n self._dirty_service[service] = {\"ctr\": ctr,\n \"terminated\": False}\n\n # self.store.update_service_map()\n return services",
"def remove_pilot_compute_service(self, pjs):\n self.pilot_job_services.remove(pjs)\n CoordinationAdaptor.update_cds(self.url, self)",
"def delete_service(self, load_balancer, service):\n # type: (Union[LoadBalancer, BoundLoadBalancer], LoadBalancerService) -> List[BoundAction]\n data = {\n \"listen_port\": service.listen_port,\n }\n\n response = self._client.request(\n url=\"/load_balancers/{load_balancer_id}/actions/delete_service\".format(load_balancer_id=load_balancer.id),\n method=\"POST\", json=data)\n return BoundAction(self._client.actions, response['action'])",
"def delete_service(self, service_id):\n service_name = self.fastly_cache[service_id]['service_name']\n del(self.fastly_cache[service_id])\n del(self.fastly_cache[service_name])\n\n return {'status': 'ok'}",
"def unregister_service(self, name):\n self._services.remove(name)",
"def remove_sp(self, date_limit):\n for provider in ServiceProvider.objects.filter(end_at__lt=date_limit, history=None):\n # Check for history versions\n for sp in ServiceProvider.objects.filter(history=provider.pk):\n self.output(\"Removing service provider (history): \" + sp.entity_id)\n if not self.list_only:\n sp.delete()\n self.output(\"Removing service provider: \" + provider.entity_id)\n if not self.list_only:\n provider.delete()",
"def delete_service(self, project_id, service_id):\n service_obj = self.storage_controller.get_service(\n project_id, service_id)\n\n # get provider details for this service\n provider_details = self._get_provider_details(project_id, service_id)\n\n # change each provider detail's status to delete_in_progress\n for provider in service_obj.provider_details:\n service_obj.provider_details[provider].status = (\n u'delete_in_progress')\n\n self.storage_controller.update_service(\n project_id,\n service_id,\n service_obj\n )\n\n kwargs = {\n \"provider_details\": json.dumps(\n dict([(k, v.to_dict()) for k, v in provider_details.items()])),\n \"project_id\": project_id,\n \"service_id\": service_id,\n 'time_seconds': self.determine_sleep_times(),\n 'context_dict': context_utils.get_current().to_dict()\n }\n\n self.distributed_task_controller.submit_task(\n delete_service.delete_service, **kwargs)\n\n return",
"def stop_services(self):\n logger.info(\"Stopping services: %s\", self.services)\n for service in self.services:\n with hide(*fab_quiet):\n sudo('service %s stop' % service)",
"def delete_service_entry(service_name, service_type):\n manager = get_manager()\n service_id = manager.resolve_service_id(service_name, service_type)\n if service_id:\n manager.api.services.delete(service_id)\n log(\"Deleted service entry '%s'\" % service_name, level=DEBUG)",
"async def remove_orphaned_services(\n registry: RedisResourceRegistry, app: web.Application\n) -> None:\n logger.info(\"Starting orphaned services removal...\")\n currently_opened_projects_node_ids = set()\n alive_keys, _ = await registry.get_all_resource_keys()\n for alive_key in alive_keys:\n resources = await registry.get_resources(alive_key)\n if \"project_id\" not in resources:\n continue\n\n project_uuid = resources[\"project_id\"]\n node_ids = await get_workbench_node_ids_from_project_uuid(app, project_uuid)\n currently_opened_projects_node_ids.update(node_ids)\n\n running_interactive_services = await get_running_interactive_services(app)\n logger.info(\n \"Will collect the following: %s\",\n [x[\"service_host\"] for x in running_interactive_services],\n )\n for interactive_service in running_interactive_services:\n # if not present in DB or not part of currently opened projects, can be removed\n node_id = interactive_service[\"service_uuid\"]\n if (\n not await is_node_id_present_in_any_project_workbench(app, node_id)\n or node_id not in currently_opened_projects_node_ids\n ):\n logger.info(\"Will remove service %s\", interactive_service[\"service_host\"])\n try:\n await stop_service(app, node_id)\n except (ServiceNotFoundError, DirectorException) as e:\n logger.warning(\"Error while stopping service: %s\", e)\n\n logger.info(\"Finished orphaned services removal\")",
"def delete(self, *args, **kwargs):\n\n if args:\n self.service.remove(EtherAddress(args[0]))\n else:\n self.service.remove_all()",
"def rm(path, service_names=None):\n\n project = __load_project(path)\n if isinstance(project, dict):\n return project\n else:\n try:\n project.remove_stopped(service_names)\n except Exception as inst: # pylint: disable=broad-except\n return __handle_except(inst)\n return __standardize_result(\n True, \"Removing stopped containers via docker-compose\", None, None\n )",
"async def services_delete(request):\r\n LOG.debug('DELETE /services received.')\r\n # Tap into the database pool\r\n db_pool = request.app['pool']\r\n\r\n # Send request for processing\r\n await delete_services(request, db_pool)\r\n\r\n # Notify aggregators of changed service catalogue\r\n await invalidate_aggregator_caches(request, db_pool)\r\n\r\n # Return confirmation\r\n return web.HTTPNoContent()",
"def delete_TestService(test_case, override_service_name=null, override_headers=null, override_cookies=null):\n # type: (AnyMagpieTestCaseType, Optional[Str], Optional[HeadersType], Optional[CookiesType]) -> None\n app_or_url = get_app_or_url(test_case)\n service_name = override_service_name if override_service_name is not null else test_case.test_service_name\n services_info = TestSetup.get_RegisteredServicesList(test_case,\n override_headers=override_headers,\n override_cookies=override_cookies)\n test_service = list(filter(lambda r: r[\"service_name\"] == service_name, services_info))\n # delete as required, skip if non-existing\n if len(test_service) > 0:\n path = \"/services/{svc_name}\".format(svc_name=service_name)\n resp = test_request(app_or_url, \"DELETE\", path,\n headers=override_headers if override_headers is not null else test_case.json_headers,\n cookies=override_cookies if override_cookies is not null else test_case.cookies)\n check_val_equal(resp.status_code, 200)\n TestSetup.check_NonExistingTestService(test_case, override_service_name=service_name)",
"def filter_services(self, services):\n ret = []\n matchers = [re.compile(b) for b in self.service_blacklist_re]\n for s in services:\n if not any([m.match(s) for m in matchers]):\n ret.append(s)\n return set(ret)"
] | [
"0.7037286",
"0.675924",
"0.6610748",
"0.6590831",
"0.62848717",
"0.6255595",
"0.61463314",
"0.6130574",
"0.611379",
"0.6080032",
"0.6072836",
"0.6069888",
"0.60036",
"0.5951796",
"0.5946897",
"0.59276074",
"0.59024787",
"0.58957607",
"0.58896816",
"0.5826269",
"0.57981753",
"0.5736189",
"0.57276124",
"0.56832755",
"0.56461155",
"0.56343794",
"0.561863",
"0.5609318",
"0.5520186",
"0.54270095"
] | 0.7142464 | 0 |
Return distance of two keys in qwerty keyboard based on manhattan or euclidean distance. | def key_distance(self, x, y, type="manhattan"):
if type == "manhattan":
return self.manhattan_dist_matrix[self.keys.index(x), self.keys.index(y)]
elif type == "euclidean":
return self.euclidean_dist_matrix[self.keys.index(x), self.keys.index(y)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def distance(self, keyOne, keyTwo):",
"def qwerty_distance():\n from collections import defaultdict\n import math\n R = defaultdict(dict)\n R['-']['-'] = 0\n zones = [\"dfghjk\", \"ertyuislcvbnm\", \"qwazxpo\"]\n keyboard = [\"qwertyuiop\", \"asdfghjkl\", \"zxcvbnm\"]\n for num, content in enumerate(zones):\n for char in content:\n R['-'][char] = num + 1\n R[char]['-'] = 3 - num\n for a in ascii_lowercase:\n rowA = None\n posA = None\n for num, content in enumerate(keyboard):\n if a in content:\n rowA = num\n posA = content.index(a)\n for b in ascii_lowercase:\n for rowB, contentB in enumerate(keyboard):\n if b in contentB:\n R[a][b] = int(math.fabs(rowB - rowA) + math.fabs(posA - contentB.index(b)))\n return R",
"def _get_distance(a, b):\n return np.sqrt(np.sum((a - b) ** 2))",
"def distance(self, first_tape, second_tape):\n pairs = zip(first_tape, second_tape)\n return math.sqrt(abs(sum(map((lambda n: self.subsq(*n)), pairs))))",
"def hammingDistance(s1 = \"\", s2 = \"\"):\n # if len(s1) != len(s2):\n # raise ValueError(\"Undefined for sequences of unequal length\")\n return sum(bool(ord(ch1) - ord(ch2)) for ch1, ch2 in zip(s1, s2))",
"def _pairwise_dist(self,seq1,seq2):\n \n return jf.damerau_levenshtein_distance(str(seq1), str(seq2))",
"def distance(a, b):\n a = a[0]\n b = b[0]\n if lower.search(a):\n if lower.search(b):\n return abs(ord(b) - ord(a)) % 8\n elif upper.search(b):\n return abs(ord(b.lower()) - ord(a)) % 5 + 8\n elif upper.search(a):\n if lower.search(b):\n return abs(ord(a.lower()) - ord(b)) % 5 + 8\n elif upper.search(b):\n return abs(ord(b) - ord(a)) % 8\n if a == b:\n return 0\n return 1",
"def hamming_dist(self):\r\n distance = 0\r\n distance = abs(len(self.s1) - len(self.s2))\r\n distance += sum(i1 != i2 for i1,i2 in zip(self.s2,self.s1))\r\n return distance",
"def dist(string1, string2):\n if string1 == string2:\n return 0\n count1 = Counter(string1)\n count2 = Counter(string2)\n\n keys = set(count1.keys())\n keys.update(count2.keys())\n dist = sum(abs(count1.get(letter, 0) - count2.get(letter, 0)) for letter in keys)\n return dist",
"def get_distance(p1, p2):\n return ((p1[0] - p2[0]) ** 2 + (p1[1] - p2[1]) ** 2) ** 0.5",
"def getDistance(pos1, pos2):\r\n return ((pos1[0] - pos2[0]) ** 2 + (pos1[1] - pos2[1]) ** 2) ** 0.5",
"def distance(a, b):\n return (np.sum((a - b)**2))**0.5",
"def manhattan(rating1, rating2):\r\n distance = 0\r\n commonRatings = False \r\n for key in rating1:\r\n if key in rating2:\r\n distance += abs(rating1[key] - rating2[key])\r\n commonRatings = True\r\n if commonRatings:\r\n return distance\r\n else:\r\n return -1 #Indicates no ratings in common\r",
"def distance(self, word_a, word_b):\n word_a, word_b = word_a.upper(), word_b.upper()\n s_a = self.word_lookup[word_a]\n s_b = self.word_lookup[word_b]\n j = 1\n max_len = min(len(s_a), len(s_b))\n while j <= max_len:\n if s_a[-j] != s_b[-j]:\n break\n j += 1\n return j",
"def distance(self) -> float:\n return self._dist_two_wire() # at this time we only support 2-wire meausre",
"def distance(p_1, p_2):\n return ((p_2[0] - p_1[0]) ** 2 + (p_2[1] - p_1[1]) ** 2 \\\n + (p_2[2] - p_1[2]) ** 2) ** 0.5",
"def distance(a,b): \r\n return math.sqrt((a[0] - b[0])**2 + (a[1] - b[1])**2)",
"def distance(self, wn1, wn2):\n return abs(self.chunk_map[wn1] - self.chunk_map[wn2])",
"def calc_distance(first: Waypoint, second: Waypoint) -> int:\n return int(distance.vincenty(first.coords(), second.coords()).m)",
"def wer(self, s1, s2):\n\n # build mapping of words to integers\n b = set(s1.split() + s2.split())\n word2char = dict(zip(b, range(len(b))))\n\n # map the words to a char array (Levenshtein packages only accepts\n # strings)\n w1 = [chr(word2char[w]) for w in s1.split()]\n w2 = [chr(word2char[w]) for w in s2.split()]\n\n return Lev.distance(''.join(w1), ''.join(w2))",
"def dist(self, one, two):\n return np.sqrt((one[0] - two[0]) ** 2 + (one[1] - two[1]) ** 2)",
"def distance(pt1, pt2):\n return (pt1[0] - pt2[0]) ** 2 + (pt1[1] - pt2[1]) ** 2",
"def distance(p1, p2):\n return math.sqrt((math.pow((p2[0] - p1[0]), 2) + math.pow((p2[1] - p1[1]), 2)))",
"def distance(p1, p2):\r\n return math.hypot(p1[0] - p2[0], p1[1] - p2[1])",
"def edit_distance_between_seqs(seq1, seq2):\n aln1, aln2 = needleman_wunsch(seq1, seq2)\n return edit_distance_from_aln_strings(aln1, aln2)",
"def distance(P1, P2):\n return ((P1[0] - P2[0])**2 + (P1[1] - P2[1])**2) ** 0.5",
"def CalculateDistance(q1, q2):\r\n return np.sqrt((q1[0] - q2[0])**2 + (q1[1] - q2[1])**2)",
"def manhatam_distance(self) -> int:\n raise NotImplementedError",
"def distance(p1, p2):\n\treturn sqrt((p1[1]-p2[1])**2 + (p1[0]-p2[0])**2)",
"def get_manhattan_distance(coord_a, coord_b):\n return abs(coord_a.x - coord_b.x) + abs(coord_a.y - coord_b.y)"
] | [
"0.75387555",
"0.69056517",
"0.6859236",
"0.67510206",
"0.6659005",
"0.66116893",
"0.6603836",
"0.65740633",
"0.6566495",
"0.65459806",
"0.6529574",
"0.6490348",
"0.6489842",
"0.64859676",
"0.6471863",
"0.6434619",
"0.6432744",
"0.6414012",
"0.6411454",
"0.63999486",
"0.6389636",
"0.6386778",
"0.63855314",
"0.63828",
"0.6381105",
"0.63664514",
"0.63640213",
"0.63637084",
"0.6362522",
"0.63544244"
] | 0.71139956 | 1 |
Return a dataframe of distance matrix of x and y. Indexes are letters of x and columns are letters of y. | def distance_dataframe(self, x, y, keyboard_weight=None):
dist_matrix = self.distance_matrix(x, y, keyboard_weight)
dist_df = pd.DataFrame(dist_matrix, index=["", *list(x)],
columns=["", *list(y)])
return dist_df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_dist_matrix(self):\n\n self.dist_matrix = spatial.distance.squareform(spatial.distance.pdist(self.data_vector,metric=\"hamming\"))\n\n self.dist_frame = pd.DataFrame(self.dist_matrix,\n index = self.seq_strings,\n columns = self.seq_strings)",
"def calcDistance(self):\n # Initialize the distance matrix\n arr = np.repeat(0, self.num_col)\n result_mat = np.repeat(arr, self.num_col)\n result_mat = np.reshape(result_mat, (self.num_col, self.num_col))\n trinary_mat = self.df_trinary.values\n for left_val in TRINARY_VALUES:\n left_func = lambda v: 1 if v==left_val else 0\n left_mat = np.transpose(np.vectorize(left_func)(trinary_mat))\n for right_val in TRINARY_VALUES:\n if left_val == right_val:\n continue\n right_func = lambda v: 1 if v==right_val else 0\n right_mat = np.vectorize(right_func)(trinary_mat)\n # Count the number of occurrences of this combination of values\n # by doing a matrix multiply\n new_mat = np.matmul(left_mat, right_mat)\n # Multiply by the squared distance between the values\n squared_distance = (left_val - right_val)**2\n new_mat = new_mat*squared_distance\n # Accumulate the result\n result_mat = result_mat + new_mat\n # Convert to dataframe\n result_mat = np.vectorize(lambda v: np.sqrt(v)) (result_mat)\n self.df_distance = pd.DataFrame(result_mat, columns=self.columns,\n index=self.columns)",
"def _distance_matrix(self):\n\n # Log the type of metric being used in Sequencing\n logger.info('Using {} Distance'.format(self.measure))\n\n # Convert the nodal coordinate tuples to a np.array\n coords = np.vstack(map(np.array, self.coords.values()))\n \n if self.measure == 'haversine':\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n haversine = lambda coord: get_hav_distance(coords[:, 0], coords[:, 1], *coord) \n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(haversine, coords))\n\n # Partially applied haversine function that takes a coord and computes the vector distances for all coords\n euclidean = lambda coord: get_euclidean_dist(coords, coord)\n # Map the partially applied function over all coordinates, and stack to a matrix\n return np.vstack(map(euclidean, coords))",
"def get_distance_matrix(self):\n names = self.get_named_leaves()\n num_names = len(names)\n dist_mat = np.zeros((num_names, num_names), dtype='float')\n for i, j in itertools.combinations(range(num_names), 2):\n node1, node2 = self.node_names[names[i]], self.node_names[names[j]]\n dist = self.node_distance(node1, node2)\n dist_mat[i,j] = dist\n dist_mat[j,i] = dist\n return names, dist_mat",
"def get_distance_matrix():\n df_afstandn2 = get_dataframe(\"\"\"SELECT *\n FROM proj_afval_netwerk.afv_poi_afstand\n WHERE afstand < 1000\n \"\"\")\n return df_afstandn2",
"def _get_edit_distance_matrix(x: str, y: str) -> list:\n matrix = [[-1 for _ in range(len(y) + 1)] for _ in range(len(x) + 1)]\n\n for j in range(len(matrix[0])):\n matrix[0][j] = j\n\n for i, _ in enumerate(matrix):\n matrix[i][0] = i\n\n return matrix",
"def get_distance_matrix(df, distance_measure, feat_col_ix=1):\n n = len(df)\n dist_matrix = np.zeros((n,n))\n for i in range(n):\n for j in range(j):\n si = df.iloc[i, feat_col_ix:]\n sj = df.iloc[j, feat_col_ix:]\n dist_matrix[i,j] = distance_measure(si, sj)[0]\n return dist_matrix",
"def build_distance_matrix(path_to_embeddings):\n\n embed_df = pd.read_csv(path_to_embeddings)\n print (\"length is: \", len(embed_df))\n columns = list(embed_df)\n\n \n distances = euclidean_distances(embed_df.iloc[:, 1:], embed_df.iloc[:, 1:])\n embed_df = embed_df.set_index([columns[0]])\n # format distance matrix\n distances_df = pd.DataFrame(distances)\n distances_df.columns = list(embed_df.index)\n distances_df.index = list(embed_df.index)\n\n print (\"finished building the distance matrix ...\")\n\n print (\"///////////////////\")\n print (len(distances_df))\n\n return distances_df",
"def calculateDistances(df):\n return",
"def euclidean_distances(X, Y):\r\n\r\n D = np.zeros((X.shape[0],Y.shape[0]))\r\n \r\n for X_idx in range(X.shape[0]):\r\n for Y_idx in range(Y.shape[0]): \r\n \r\n D[X_idx,Y_idx] = np.sqrt(np.sum((X[X_idx,:]-Y[Y_idx,:])**2))\r\n \r\n return D",
"def compute_dist_matrix(X1, X2, distance):\n N, M = X1.shape[0], X2.shape[0]\n dist_matrix = np.zeros((N, M))\n for i in range(N):\n for j in range(M):\n dist_matrix[i][j] = dist(X1[i], X2[j], distance=distance)\n return dist_matrix",
"def pairwise_euclidean_distance(x, y):\n m, n = x.size(0), y.size(0)\n dist_mat = torch.pow(x, 2).sum(1, keepdim=True).expand(m, n) + \\\n torch.pow(y, 2).sum(1, keepdim=True).expand(n, m).t() \\\n - 2 * torch.matmul(x, y.t())\n # for numerical stability\n dist_mat = dist_mat.clamp(min=1e-12).sqrt()\n return dist_mat",
"def calc_dist_matrix(self,verbose=False):\n\n print(\"Calculating distance matrix.\"); sys.stdout.flush()\n\n nrow = self.data_vector.shape[0]\n self.dist_matrix = np.zeros((nrow, nrow),dtype=float)\n for i in range(nrow):\n if verbose:\n if i % 1000 == 0:\n print(\"Row\",i,\"of\",nrow)\n sys.stdout.flush()\n\n for j in range(i + 1, nrow):\n self.dist_matrix[i,j] = self._pairwise_dist(self.data_vector[i],self.data_vector[j])\n self.dist_matrix[j,i] = self.dist_matrix[i,j]\n \n self.dist_frame = pd.DataFrame(self.dist_matrix,\n index = self.seq_strings,\n columns = self.seq_strings)",
"def distance_matrix(sunspots1, sunspots2):\n \n N1 = len(sunspots1)\n N2 = len(sunspots2)\n\n distance_matrix = np.zeros((N1, N2))\n\n for i in list(range(N1)):\n for j in list(range(N2)):\n\n distance_matrix[i, j] = euclidean_dist(sunspots1[i], sunspots2[j])\n\n return distance_matrix",
"def compute_euclidean_distance_matrix(locations):\n distances = {}\n distances_df=get_times(locations)\n print(distances_df)\n print(distances_df.iloc[0,0])\n print(distances_df.iloc[0,1])\n print(distances_df.iloc[0,2])\n for from_counter, from_node in enumerate(locations):\n distances[from_counter] = {}\n for to_counter, to_node in enumerate(locations):\n distances[from_counter][to_counter] = (int(\n distances_df.iloc[from_counter,to_counter]))\n return distances",
"def get_euclidean_matrix(df):\n df.reset_index(drop=True, inplace=True)\n\n # foods = df['food_names']\n # food_examples = []\n # indices = list(range(0, len(foods)))\n # for i in indices:\n # food_examples.append(str(foods[i]) + str(i))\n # food_examples = pd.Series(food_examples)\n food_examples = df['food_names']\n\n df = df.drop(['food_names', 'height', 'weight', 'above_range', 'BMI', 'age', 'gender',\n 'glucose_tolerance_category','90-percentile_of_2h-iAUC', 'average_carbs_ratio',\n 'average_daily_carbs','average_meals_per_day', 'average_sleep_hours',\n 'average_glucose', 'baseline', 'coefficient_of_variation', 'max_2-hours_iAUC',\n 'median_fasting_glucose_level','median_of_2h-iAUC', 'night_baseline'], axis='columns')\n\n df = df.replace([-np.inf], 0).dropna(axis=1)\n\n num_examples = df.shape[0]\n\n distances = pdist(df.values, metric='euclidean')\n print(distance)\n dis_array = squareform(distances)\n print(dis_array)\n dis_df = pd.DataFrame(data = dis_array, index=food_examples, columns=food_examples)\n print(dis_df)\n writer = pd.ExcelWriter('Euclidean_distance_icarbonx.xlsx', engine='xlsxwriter')\n dis_df.to_excel(writer, sheet_name='Sheet1')\n writer.save()",
"def distance_matrix(self, x, y, keyboard_weight=None):\r\n # create distance matrix\r\n size_x = len(x) + 1\r\n size_y = len(y) + 1\r\n dist_matrix = np.zeros((size_x, size_y))\r\n for i in range(size_x):\r\n dist_matrix[i, 0] = i\r\n for j in range(size_y):\r\n dist_matrix[0, j] = j\r\n\r\n ## fill distance matrix\r\n # no keyboard weight\r\n if not keyboard_weight:\r\n for i in range(1, size_x):\r\n for j in range(1, size_y):\r\n # if letters are same\r\n if x[i-1] == y[j-1]:\r\n dist_matrix[i, j] = dist_matrix[i-1, j-1]\r\n # if letters are different\r\n else:\r\n subs = dist_matrix[i-1, j-1] + 1\r\n delete = dist_matrix[i-1, j] + 1\r\n insert = dist_matrix[i, j-1] + 1 \r\n dist_matrix[i, j] = min(subs, delete, insert)\r\n # manhattan keyboard weight\r\n elif keyboard_weight == \"manhattan\":\r\n for i in range(1, size_x):\r\n for j in range(1, size_y):\r\n # if letters are same\r\n if x[i-1] == y[j-1]:\r\n dist_matrix[i, j] = dist_matrix[i-1, j-1]\r\n # if letters are different\r\n else:\r\n dist = self.key_distance(x[i-1], y[j-1], keyboard_weight)\r\n subs_weight = dist * self.manhattan_coef\r\n subs = dist_matrix[i-1, j-1] + subs_weight\r\n delete = dist_matrix[i-1, j] + 1\r\n insert = dist_matrix[i, j-1] + 1 \r\n dist_matrix[i, j] = min(subs, delete, insert)\r\n # euclidean keyboard weight\r\n elif keyboard_weight == \"euclidean\":\r\n for i in range(1, size_x):\r\n for j in range(1, size_y):\r\n # if letters are same\r\n if x[i-1] == y[j-1]:\r\n dist_matrix[i, j] = dist_matrix[i-1, j-1]\r\n # if letters are different\r\n else:\r\n dist = self.key_distance(x[i-1], y[j-1], keyboard_weight)\r\n subs_weight = dist * self.euclidean_coef\r\n subs = dist_matrix[i-1, j-1] + subs_weight\r\n delete = dist_matrix[i-1, j] + 1\r\n insert = dist_matrix[i, j-1] + 1 \r\n dist_matrix[i, j] = min(subs, delete, insert)\r\n \r\n return dist_matrix",
"def distance_matrix(X, Y, metric):\n distance = np.zeros((len(X), len(Y)))\n for i in range(len(X)):\n for j in range(len(Y)):\n m = metric(X[i], Y[j])\n if np.isnan(m):\n pdb.set_trace()\n distance[i, j] = m\n return distance",
"def create_cols_distances(df):\n #create a column for haversine distance\n df['distance'] = haversine_array(df['pickup_longitude'], df['pickup_latitude'],\n df['dropoff_longitude'], df['dropoff_latitude'])\n\n df['manhattan_distance'] = dummy_manhattan_distance(df['pickup_longitude'], df['pickup_latitude'],\n df['dropoff_longitude'], df['dropoff_latitude'])\n\n df['bearing'] = bearing_array(df['pickup_longitude'], df['pickup_latitude'],\n df['dropoff_longitude'], df['dropoff_latitude'])\n\n return df",
"def get_distance_matrix(grouped_distance):\n return grouped_distance.groupby(\n F.col(\n \"category_a\"\n ).alias(\n \"category\"\n )\n ).pivot(\n \"category_b\"\n ).agg(\n F.expr(\n \"coalesce(min(distance), 10000.00)\"\n )\n ).orderBy(\n \"category\"\n )",
"def distance_matrix(d1, d2=None):\n if d2 is None:\n dists = np.zeros(shape=(d1.shape[0], d1.shape[0]))\n for i in range(dists.shape[0]):\n dists[i] = (((d1 - d1[i]) ** 2).sum(axis=1)) ** 0.5\n else:\n dists = np.zeros(shape=(d1.shape[0], d2.shape[0]))\n for i in range(d1.shape[0]):\n dists[i] = (((d2 - d1[i]) ** 2).sum(axis=1)) ** 0.5\n return dists",
"def compute_distance(df):\n pass",
"def get_matches_df(sparse_matrix, name_vector):\n\n name_vector_list = pd.Series(list(map(str, name_vector)))\n\n non_zeros = sparse_matrix.nonzero()\n\n sparserows = non_zeros[0]\n sparsecols = non_zeros[1]\n\n nr_matches = sparsecols.size\n\n left_side = np.empty([nr_matches], dtype=object)\n right_side = np.empty([nr_matches], dtype=object)\n similarity = np.zeros(nr_matches)\n pos_left = np.zeros(nr_matches, dtype=np.int)\n pos_right = np.zeros(nr_matches, dtype=np.int)\n\n for index in range(0, nr_matches):\n left_side[index] = name_vector_list[sparserows[index]]\n right_side[index] = name_vector_list[sparsecols[index]]\n similarity[index] = sparse_matrix.data[index]\n pos_left[index] = sparserows[index]\n pos_right[index] = sparsecols[index]\n\n return pd.DataFrame({'left_side': left_side,\n 'right_side': right_side,\n 'similarity': similarity,\n 'pos_left': pos_left,\n 'pos_right': pos_right})",
"def __build_distance_matrix(self):\n for i in range(0, len(self.__corpus)):\n doc_i = self.__corpus[i]\n for j in range(i + 1, len(self.__corpus)):\n doc_j = self.__corpus[j]\n distance = doc_i.calc_distance(doc_j)\n self.__distance_matrix.append(distance)",
"def cosine_distances(X, Y):\r\n D = np.zeros((X.shape[0],Y.shape[0]))\r\n \r\n for X_idx in range(X.shape[0]):\r\n for Y_idx in range(Y.shape[0]): \r\n \r\n D[X_idx,Y_idx] = 1 - (np.dot(X[X_idx,:],Y[Y_idx,:]) / (np.sqrt(np.dot(X[X_idx,:], X[X_idx,:]))* np.sqrt(np.dot(Y[Y_idx,:], Y[Y_idx,:])))) \r\n return D",
"def _compute_pairwise_distance(\n x: np.ndarray, y: np.ndarray, symmetric: bool, distance_callable: DistanceCallable\n) -> np.ndarray:\n _x = _make_3d_series(x)\n _y = _make_3d_series(y)\n x_size = _x.shape[0]\n y_size = _y.shape[0]\n\n pairwise_matrix = np.zeros((x_size, y_size))\n\n for i in range(x_size):\n curr_x = _x[i]\n for j in range(y_size):\n if symmetric and j < i:\n pairwise_matrix[i, j] = pairwise_matrix[j, i]\n else:\n pairwise_matrix[i, j] = distance_callable(curr_x, _y[j])\n return pairwise_matrix",
"def distance_matrix(dnas: Collection[str], metric=hamming_distance, relative=True, as_ndarray=False):\n n = len(dnas)\n result = [[0] * n for _ in range(n)]\n for pair in itertools.combinations(zip(range(n), dnas), r=2):\n (idx1, dna1), (idx2, dna2) = pair\n distance = metric(dna1, dna2)\n distance = distance / max(len(dna1), len(dna2)) if relative else distance\n result[idx1][idx2] = distance\n result[idx2][idx1] = distance\n if as_ndarray:\n result = np.asarray(result)\n return result",
"def get_distance_matrix(self, points):\n return points[:, :, np.newaxis, :]-points[:, np.newaxis, :, :]",
"def distancematrix(vec1, vec2):\n v1, v2 = np.meshgrid(vec1, vec2)\n return np.abs(v1 - v2)",
"def distance(self, features, targets):\n cost_matrix = np.zeros((len(targets), len(features)))\n for i, target in enumerate(targets):\n cost_matrix[i, :] = self._metric(self.samples[target], features)\n return cost_matrix"
] | [
"0.69271284",
"0.67564255",
"0.6533258",
"0.6516786",
"0.6428106",
"0.63869244",
"0.6351963",
"0.6319464",
"0.63100487",
"0.62420344",
"0.62378067",
"0.62373847",
"0.62171084",
"0.62104243",
"0.62080455",
"0.6201593",
"0.61032706",
"0.6101561",
"0.60353494",
"0.6023481",
"0.5988664",
"0.5987141",
"0.59437454",
"0.5932476",
"0.5913393",
"0.59105027",
"0.5896201",
"0.58813703",
"0.58646",
"0.5776097"
] | 0.8356927 | 0 |
Returns the token and dsn from a key Generate a simple SHA1 hash of the key key is a 64bits integer Token is a 32bits integer, dsn is a 64bits integer | def key2tokenAndDSN(self, key):
import binascii
import struct
import hashlib
self.keystr = struct.pack("!Q", key)
self.h = hashlib.sha1(self.keystr.rjust(8,'\00'))
self.shastr=self.h.digest() # binary
#shastr = struct.pack("!IIIII", *struct.unpack("@IIIII",shastr)) #to net
self.token, self.dsn = self.shastr[0:4], self.shastr[-8:]
#print "raw: %s (len=%i)"%(shastr,len(shastr))
#print "hex: %s"% binascii.hexlify(token), "%s"%binascii.hexlify(dsn)
self.d1, self.d2 = struct.unpack("!II",self.dsn)
self.token, self.dsn = (struct.unpack("!I",self.token)[0], (long(self.d2)<<32)+self.d1)
#print "token: %x"% token
#print "dsn: %x" % dsn
return (self.token, self.dsn) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fnv1(self, key):\n # hash = 0xff\n hash = 0xcbf29ce484222325\n for n in key.encode():\n # print(n)\n hash = hash ^ n\n hash = hash * 0x100000001b3\n\n # print(hash)\n return hash",
"def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)",
"def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)",
"def fingerprint(self, key):\n base64_pub = self.base64_pub_encode(key)\n return SHA256.new(base64_pub.encode('utf-8')).digest()",
"def SHA1(self) -> _n_0_t_3[_n_0_t_9]:",
"def _dsa_key(self,private_key):\n numbers = private_key.private_numbers()\n content = WriteMessage()\n content.write_string('ssh-dss')\n content.write_mpint(numbers.public_numbers.parameter_numbers.p)\n content.write_mpint(numbers.public_numbers.parameter_numbers.q)\n content.write_mpint(numbers.public_numbers.parameter_numbers.g)\n content.write_mpint(numbers.public_numbers.y)\n content.write_mpint(numbers.x)\n return content.data",
"def _key_hash(self, key):\n\n split_key = key.strip(' ').split(' ')[1]\n return int(split_key)",
"def from_key(self, public_id, key):\n otp = self.get_otp(key)\n from_key = modhex_encode(public_id.encode('hex')) + modhex_encode(otp.encode('hex'))\n return from_key",
"def build_serverkeyhash(self):\n server_publickey = self.getfilehttps(self.epo_url + \"srpubkey.bin\")\n self.serverkeyhash = b64encode(mcafee_crypto.SHA1(server_publickey))\n return self.serverkeyhash",
"def getMD5(self, key1, key2, last8):\n n1=[]\n s1=0\n n2=[]\n s2=0\n for c in key1:\n if c.isdigit():\n n1.append(c)\n if c.isspace():\n s1+=1\n \n for c in key2:\n if c.isdigit():\n n2.append(c)\n if c.isspace():\n s2+=1\n \n d1 = int(''.join(n1))\n d2 = int(''.join(n2))\n z1=d1/s1\n z2=d2/s2\n \n print \"Key 1 has %d spaces:\" % s1, z1\n print \"Key 2 has %d spaces:\" % s2, z2\n \n mdThing = struct.pack(\">LL\", z1, z2) + last8\n return md5(mdThing).digest()",
"def calculate_key_signature(public_key: str) -> str:\n rsa_obj = RSA.import_key(public_key)\n rsa_der = rsa_obj.export_key(\"DER\")\n\n hasher = SHA1.new()\n hasher.update(rsa_der)\n fingerprint = base64url_encode(hasher.digest())\n\n return fingerprint.decode(\"utf8\")",
"def fingerprint_key(key):\n try: key = key.public_key()\n except: pass\n\n serialized = key.public_bytes(\n encoding = serialization.Encoding .OpenSSH,\n format = serialization.PublicFormat.OpenSSH)\n\n blob = b64decode(serialized.split(None,2)[1])\n return fingerprint_public_key_blob(blob)",
"def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()",
"def hex_key(uid: Text, mp: Text) -> Text:\n\n key = sha256(mp.encode('utf-8') + admin_pass.encode('utf-8')).hexdigest()\n return sha256(uid.lower().encode('utf-8') + key.encode('utf-8')).hexdigest()[:40]",
"def fingerprint(public_key):\r\n\r\n return hashlib.new('ripemd160', hashlib.sha256(public_key).digest()).digest()[:4]",
"def concat_hash(self, x, symkey):\n msg = '%s%s' % (x, symkey)\n return int(hashlib.sha1(msg).hexdigest(), 16)",
"def _hash_djb2(self, key):\n # OPTIONAL STRETCH: Research and implement DJB2\n hash_grotto = 5381\n for k in key:\n hash_grotto = ((hash_grotto << 5) + hash_grotto) + ord(k)\n return hash_grotto & 0xFFFFFFFF",
"def short_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()[::2]",
"def RSA_SIGNATURE_HASH() :\n return \"SHA-256\"",
"def long_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()",
"def fnv1(self, key, seed=0):\n # def fnv1(self, key):\n\n # Your code here\n \"\"\"\n Returns: The FNV-1 hash (64-bit) of a given string. \n \"\"\"\n #Constants : Fails the tests\n # FNV_prime = 1099511628211\n # offset_basis = 14695981039346656037\n\n # #FNV-1a Hash Function\n # hash = offset_basis + seed\n # # hash = offset_basis\n # for c in key:\n # hash = hash * FNV_prime\n # hash = hash ^ ord(c)\n # return hash\n\n \"\"\"\n Returns: The FNV-1a (alternate) hash of a given string\n \"\"\"\n # #Constants : Passes the tests\n # FNV_prime = 1099511628211\n # offset_basis = 14695981039346656037\n\n # #FNV-1a alternate Hash Function\n # hash = offset_basis + seed\n # for c in key:\n # hash = hash ^ ord(c)\n # hash = hash * FNV_prime\n # return hash",
"def short_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()[::2]",
"def djb2(self, key):\n # Your code here\n hash = 5381\n for c in key:\n hash = (hash*33)+ ord(c)\n return hash",
"def gen_key(self, key):\n b_key = self._hash_digest(key)\n return self._hash_val(b_key, lambda x: x)",
"def parse_key(key_id):\n\tcomment = get_key_comment(key_id)[0]\n\tregex = re.compile(\".*?\\\\((.*?)\\\\)\")\n\tcomment_bits = re.findall(regex, comment)[0].split(' ')\n\tif comment_bits[0] == sha256(comment_bits[1]).hexdigest():\n\t\treturn comment_bits[1]",
"def parse_key(key_id):\n comment = get_key_comment(key_id)[0]\n regex = re.compile(\".*?\\\\((.*?)\\\\)\")\n comment_bits = re.findall(regex, comment)[0].split(' ')\n if comment_bits[0] == sha256(comment_bits[1]).hexdigest():\n return comment_bits[1]",
"def djb2(self, key):\n\n hash = 5381\n for n in key.encode():\n # hash = ((hash << 5) + hash) + n\n hash = hash * 33 + n\n\n return hash\n # return hash & 0xFFFFFFFF",
"def make_hash(self):\n timestamp = str(int(round(time.time()*1000)))\n auth = b64encode(config.username) + ':' \\\n + b64encode(config.password) + ':' \\\n + b64encode(timestamp)\n rsa = RSA.load_pub_key(config.public_key)\n encrypted_auth = rsa.public_encrypt(auth, RSA.pkcs1_padding)\n key = b64encode(encrypted_auth)\n return key",
"def GetHashKey(self, key):\r\n data = pickle.dumps(key)\r\n hashObject = hashlib.sha1(data)\r\n hashValue = hashObject.hexdigest()\r\n value = int(hashValue, 16)\r\n return value",
"def long_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()"
] | [
"0.59551054",
"0.59115833",
"0.59115833",
"0.58881515",
"0.5808499",
"0.5731819",
"0.5719893",
"0.57018846",
"0.569349",
"0.5679318",
"0.5663129",
"0.5659303",
"0.561188",
"0.5586423",
"0.5538995",
"0.55299336",
"0.54740316",
"0.5471576",
"0.5457066",
"0.5430525",
"0.54092103",
"0.540759",
"0.54051906",
"0.53947127",
"0.5394511",
"0.53936934",
"0.5391949",
"0.5386156",
"0.53848135",
"0.5376851"
] | 0.8006403 | 0 |
Identify distinct MPTCP Connections that reached Successful handshake Look for Ack packets with MPTCP option Header For each MPTCP connection report Receiver's token value which acts as the connectionID | def mptcp_connections(self, pkts):
count = 0
#MPTCP_Capable = 0x0
#MPTCP_CapableACK ---> successful handshake
print "======================================================================"
print "Successful Handshake --- Look for Ack packets with MPTCP option Header"
print """Token = connectionID = SHA1(key)[0-32] of Other party's key. (Capture from
either step 2 or 3 in the first handshake)"""
print "Total packets: %s" % len(pkts)
print "======================================================================"
print "Identifying MPTCP Connections...."
for i in range(len(pkts)):
if(MPTCP_CapableACK in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 0):
count +=1 #Count the number of distinct MPTCP connections
#Compute the receiver's token
self.key_rcv = pkts[i][TCPOption_MP].mptcp.rcv_key
self.rcv_token, self.rcv_dsn = self.key2tokenAndDSN(self.key_rcv)
#Compute the sender's token
self.key_snd = pkts[i][TCPOption_MP].mptcp.snd_key
self.snd_token, self.snd_dsn = self.key2tokenAndDSN(self.key_snd)
print ("%i. New MPTCP Connection (Successful Handshake) src: %s; dest: %s; Sender's key: %s; Receiver's key: %s; Receivers Token (connectionID): %s; Sender's Token: %s" % (count, pkts[i][IP].src, pkts[i][IP].dst, pkts[i][TCPOption_MP].mptcp.snd_key, pkts[i][TCPOption_MP].mptcp.rcv_key, self.rcv_token, self.snd_token))
print "Total MPTCP Connections: %i" % count | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process_mptcp_pkt_from_client(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport):\n dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp)\n conn_id = acks[saddr, sport, daddr, dport][co.CONN_ID]\n flow_id = acks[saddr, sport, daddr, dport][co.FLOW_ID]\n if conn_acks[conn_id][co.S2C] >= 0:\n max_val = 2**64 if dss_is_8_bytes else 2**32\n bytes_acked = (dack - conn_acks[conn_id][co.S2C]) % max_val\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_C2S] and (dss - conn_acks[conn_id][co.C2S]) % max_val < 2000000000\n and (mptcp_connections[conn_id].attr[co.C2S][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0):\n # This is a DSS retransmission! (take into account the seq overflow)\n mptcp_connections[conn_id].attr[co.C2S][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_C2S][dss][2],\n ts_delta - conn_acks[conn_id][HSEQ_C2S][dss][0],\n ts_delta - conn_acks[conn_id][HSEQ_C2S][dss][1],\n ts_delta - conn_acks[conn_id][co.TIMESTAMP][CLIENT]))\n conn_acks[conn_id][HSEQ_C2S][dss][1] = ts_delta\n elif size_payload > 0 and dss is not False:\n conn_acks[conn_id][SEQ_C2S].add(dss)\n conn_acks[conn_id][HSEQ_C2S][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][CLIENT]]\n\n conn_acks[conn_id][co.S2C] = dack\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][CLIENT] = ts_delta",
"def process_mptcp_pkt_from_server(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport):\n dss, dack, dss_is_8_bytes = get_dss_and_data_ack(tcp)\n conn_id = acks[daddr, dport, saddr, sport][co.CONN_ID]\n flow_id = acks[daddr, dport, saddr, sport][co.FLOW_ID]\n if conn_acks[conn_id][co.C2S] >= 0:\n max_val = 2**64 if dss_is_8_bytes else 2**32\n bytes_acked = (dack - conn_acks[conn_id][co.C2S]) % max_val\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if (size_payload > 0 and dss in conn_acks[conn_id][SEQ_S2C] and (dss - conn_acks[conn_id][co.S2C]) % max_val < 2000000000\n and (mptcp_connections[conn_id].attr[co.S2C][co.TIME_LAST_ACK_TCP] - ts_delta).total_seconds() > 0.0):\n # This is a DSS retransmission!\n mptcp_connections[conn_id].attr[co.S2C][co.RETRANS_DSS].append((ts_delta, flow_id, dss, conn_acks[conn_id][HSEQ_S2C][dss][2],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][0],\n ts_delta - conn_acks[conn_id][HSEQ_S2C][dss][1],\n ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]))\n conn_acks[conn_id][HSEQ_S2C][dss][1] = ts_delta\n elif size_payload > 0 and dss is not False:\n conn_acks[conn_id][SEQ_S2C].add(dss)\n conn_acks[conn_id][HSEQ_S2C][dss] = [ts_delta, ts_delta, ts_delta - conn_acks[conn_id][co.TIMESTAMP][SERVER]]\n\n conn_acks[conn_id][co.C2S] = dack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][SERVER] = ts_delta",
"def process_mptcp_first_syn(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns, ts_syn_timeout, ts_timeout):\n # The sender of the first SYN is the client\n # Check if the connection is black listed or not\n conn_id = False\n conn_candidates = fast_conns.get((saddr, daddr, sport, dport), [])\n min_delta = ts_syn_timeout\n for start, duration, cid, fid in conn_candidates:\n if (co.START in mptcp_connections[cid].flows[fid].attr\n and abs((ts_delta - mptcp_connections[cid].flows[fid].attr[co.START]).total_seconds()) < min_delta):\n conn_id = cid\n flow_id = fid\n min_delta = abs((ts_delta - mptcp_connections[cid].flows[fid].attr[co.START]).total_seconds())\n\n if not conn_id:\n black_list.add((saddr, sport, daddr, dport))\n return\n elif conn_id and (saddr, sport, daddr, dport) in black_list:\n black_list.remove((saddr, sport, daddr, dport))\n\n if ((saddr, sport, daddr, dport) in acks and (ts_delta - acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT]).total_seconds() <= ts_syn_timeout\n and acks[saddr, sport, daddr, dport][co.S2C] == -1) and conn_id in conn_acks:\n # SYN retransmission! But do nothing particular\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta\n conn_acks[conn_id][co.TIMESTAMP][CLIENT] = ts_delta\n else:\n acks[saddr, sport, daddr, dport] = {co.C2S: -1, co.S2C: -1, co.TIMESTAMP: {CLIENT: ts_delta, SERVER: None}, co.CONN_ID: conn_id,\n co.FLOW_ID: flow_id}\n conn_acks[conn_id] = {co.C2S: -1, co.S2C: -1, co.TIMESTAMP: {CLIENT: ts_delta, SERVER: None}, SEQ_C2S: set(), SEQ_S2C: set(), HSEQ_C2S: {},\n HSEQ_S2C: {}}",
"def compute_mptcp_dss_retransmissions(pcap_filepath, mptcp_connections, fast_conns, ts_syn_timeout=6.0, ts_timeout=3600.0):\n print(\"Computing MPTCP DSS retransmissions for\", pcap_filepath)\n acks = {}\n conn_acks = {}\n # Avoid processing packets that do not belong to any analyzed TCP connection\n black_list = set()\n pcap_file = open(pcap_filepath)\n pcap = dpkt.pcap.Reader(pcap_file)\n count = 0\n for ts, buf in pcap:\n ts_delta = get_ts_delta(ts)\n count += 1\n if count % 100000 == 0:\n print(count)\n eth = dpkt.ethernet.Ethernet(buf)\n if type(eth.data) == dpkt.ip.IP or type(eth.data) == dpkt.ip6.IP6:\n ip = eth.data\n if type(ip.data) == dpkt.tcp.TCP:\n tcp = ip.data\n fin_flag = (tcp.flags & dpkt.tcp.TH_FIN) != 0\n syn_flag = (tcp.flags & dpkt.tcp.TH_SYN) != 0\n rst_flag = (tcp.flags & dpkt.tcp.TH_RST) != 0\n ack_flag = (tcp.flags & dpkt.tcp.TH_ACK) != 0\n\n saddr, daddr, sport, dport = get_ips_and_ports(eth, ip, tcp)\n\n if syn_flag and not ack_flag and not fin_flag and not rst_flag:\n process_mptcp_first_syn(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns,\n ts_syn_timeout, ts_timeout)\n\n elif (saddr, sport, daddr, dport) in black_list:\n continue\n\n elif syn_flag and ack_flag and not fin_flag and not rst_flag:\n process_mptcp_syn_ack(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns,\n ts_syn_timeout, ts_timeout)\n\n elif not syn_flag and not rst_flag and ack_flag:\n if (saddr, sport, daddr, dport) in acks:\n process_mptcp_pkt_from_client(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport)\n\n elif (daddr, dport, saddr, sport) in acks:\n process_mptcp_pkt_from_server(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport)\n else:\n # Silently ignore those packets\n # print(saddr, sport, daddr, dport, \"haven't seen beginning...\")\n continue\n\n pcap_file.close()",
"def process_mptcp_syn_ack(ts_delta, acks, conn_acks, mptcp_connections, tcp, ip, saddr, daddr, sport, dport, black_list, fast_conns, ts_syn_timeout, ts_timeout):\n # The sender of the SYN/ACK is the server\n if (daddr, dport, saddr, sport) in acks and ((ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][CLIENT]).total_seconds() < ts_timeout\n and acks[daddr, dport, saddr, sport][co.C2S] == -1):\n # Better to check, if not seen, maybe uncomplete TCP connection\n acks[daddr, dport, saddr, sport][co.C2S] = tcp.ack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[acks[daddr, dport, saddr, sport][co.CONN_ID]][co.TIMESTAMP][SERVER] = ts_delta\n\n elif (daddr, dport, saddr, sport) in acks and ((ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][CLIENT]).total_seconds() < ts_timeout\n and tcp.ack == acks[daddr, dport, saddr, sport][co.C2S]):\n # SYN/ACK retransmission! But don't do anything special\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta\n conn_acks[acks[daddr, dport, saddr, sport][co.CONN_ID]][co.TIMESTAMP][SERVER] = ts_delta",
"def _parse(self):\n \n # HUA determine the host ip address\n # read 20 packages and set the most frequent one\n ips_dict = {}\n count = 0\n for raw_packet in self.raw_packets:\n if count > 100: break\n ethernet = Ethernet(raw_packet[0:14])\n if(ethernet.type != 'IP'):\n continue\n ip = Ip(raw_packet[14:])\n if(ip.protocol != 'TCP') :\n continue\n if(ip.src not in ips_dict):\n ips_dict[ip.src] = 0\n ips_dict[ip.src] += 1\n if(ip.dst not in ips_dict):\n ips_dict[ip.dst] = 0\n ips_dict[ip.dst] += 1\n # get the most frequent one\n max_appear = 0\n ip = None\n for key, value in ips_dict.items():\n if value > max_appear:\n ip = key\n max_appear = value\n\n global _device_ip\n if not self.enableFilter or not _device_ip:\n _device_ip = ip\n\n global _tcp_buf\n _tcp_buf = {}\n number = 0\n self.begin_ts = self.packet_headers[-1]['ts']\n rcount = 0\n for raw_packet in self.raw_packets:\n pcap_packet = Pcap_packet()\n pcap_packet.pcap_num = rcount#number # add one to be consistent with wireshark\n pcap_packet.top_layer = 1\n pcap_packet.ethernet = Ethernet(raw_packet[0:14])\n \n #skip the packet that is not ip packet\n \n rcount += 1\n\n if (pcap_packet.ethernet.type != 'IP'):\n continue\n\n pcap_packet.top_layer = 2\n pcap_packet.ip = Ip(raw_packet[14:])\n\n\n\n\n # just collect the packets between \n \n if self.enableFilter and not (pcap_packet.ip.src == _device_ip and pcap_packet.ip.dst == SERVER_IP) \\\n and not (pcap_packet.ip.dst == _device_ip and pcap_packet.ip.src == SERVER_IP):\n #print \"Ignore ip not ok\"\n continue\n '''\n if rcount < 10 or rcount > 2600:\n print 'rcount %d, time %d ---: %f' % (rcount, number, self.packet_headers[rcount - 1]['ts'] - self._ts_base)\n '''\n \n self.pcap_packets.append(pcap_packet)\n \n\n #skip the packet that is not tcp message\n if (pcap_packet.ip.protocol != 'TCP'):\n continue\n \n\n\n pcap_packet.top_layer = 3\n pcap_packet.tcp = Tcp(pcap_packet.ip, number)\n\n if pcap_packet.ip.src == _device_ip:\n pcap_packet.tcp.direction = \"out\"\n else:\n pcap_packet.tcp.direction = \"in\"\n\n\n #dispatch the tcp into tcp streams\n self._add_pkt_into_tcp_stream(pcap_packet, number)\n \n #reassemble tcp packet\n self._tcp_reassemble(pcap_packet.pcap_num, pcap_packet.ip.src, pcap_packet.ip.dst, pcap_packet.tcp)\n number += 1\n #endof for\n #flush the tcp_buf, other wise it will lose some http response\n for sockets in _tcp_buf.keys():\n self._tcp_flush(sockets)\n del _tcp_buf[sockets]",
"def payload_data(self, pkts):\n\n\t\t#Get all the payload bytes exchanged over MPTCP connections\n\t\tpayload_bytes = 0\n\t\tprint \"Determining the number of payload bytes excluding headers....\"\n\t\t#DSS = 0x2\n\t\tfor i in range(len(pkts)):\n\t\t\tif(TCPOption_MP in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 2 and Raw in pkts[i]):\n\t\t\t\tpayload_bytes += len(pkts[i][Raw].load)\n\t\t\t\t#print(\"DSN: %s; subflow_seqnum: %s; Data(bytes): %s\" % (pkts[i][TCPOption_MP].mptcp.dsn, pkts[i][TCPOption_MP].mptcp.subflow_seqnum, len(pkts[i][Raw].load)))\n\n\t\tprint \"Total Number of payload bytes in the file (entire MPTCP connections) excluding headers): %s\" % (payload_bytes)\n\t\t#MPTCP WITH SUBFLOW CONNECTIONS\n\t\t#MPTCP_JOINs = 0x1\n\t\tprint \"============================================================\"\n\t\tprint \"SUBFLOW Connections with their respective MPTCP connection (identified by connectionID)\"\n\t\tfor i in range(len(pkts)):\n\n\t\t\t#Initial Join Message\n\t\t\t#rcv_token Identifies the connection to which the subflow belongs: connectionID\n\t\t\tif(MPTCP_JoinSYN in pkts[i] and pkts[i][TCPOption_MP].mptcp.subtype == 1):\n\t\t\t\tprint(\"New subflow: connectionID: %s; src: %s; dest: %s; snd_nonce: %s\" % (pkts[i][TCPOption_MP].mptcp.rcv_token, pkts[i][IP].src, pkts[i][IP].dst, pkts[i][TCPOption_MP].mptcp.snd_nonce))\n\n\t\t#TODO: Now Need to track per-connection and per-subflow state",
"def copy_info_to_mptcp_connections(connections, mptcp_connections, failed_conns, acksize_all, acksize_all_mptcp, flow_name, fast_conns=None):\n connection = connections[flow_name]\n conn_id, flow_id = get_flow_name_connection_optimized(connection, mptcp_connections, fast_conns=fast_conns)\n if isinstance(conn_id, (int, long)):\n mptcp_connections[conn_id].flows[flow_id].subflow_id = flow_name\n mptcp_connections[conn_id].flows[flow_id].attr[co.TCP_COMPLETE] = connection.flow.attr[co.TCP_COMPLETE]\n mptcp_connections[conn_id].flows[flow_id].attr[co.START] = connection.flow.attr[co.START]\n mptcp_connections[conn_id].flows[flow_id].attr[co.DURATION] = connection.flow.attr[co.DURATION]\n if co.BACKUP in connection.attr:\n mptcp_connections[conn_id].flows[flow_id].attr[co.BACKUP] = connection.attr[co.BACKUP]\n if co.SOCKS_PORT in connection.attr:\n mptcp_connections[conn_id].flows[flow_id].attr[co.SOCKS_PORT] = connection.attr[co.SOCKS_PORT]\n mptcp_connections[conn_id].flows[flow_id].attr[co.SOCKS_DADDR] = connection.attr[co.SOCKS_DADDR]\n if co.SOCKS_PORT not in mptcp_connections[conn_id].attr:\n mptcp_connections[conn_id].attr[co.SOCKS_PORT] = connection.attr[co.SOCKS_PORT]\n mptcp_connections[conn_id].attr[co.SOCKS_DADDR] = connection.attr[co.SOCKS_DADDR]\n\n elif not mptcp_connections[conn_id].attr[co.SOCKS_PORT] == connection.attr[co.SOCKS_PORT] or not mptcp_connections[conn_id].attr[co.SOCKS_DADDR] == connection.attr[co.SOCKS_DADDR]:\n print(\"DIFFERENT SOCKS PORT...\", mptcp_connections[conn_id].attr[co.SOCKS_PORT], connection.attr[co.SOCKS_PORT], mptcp_connections[conn_id].attr[co.SOCKS_DADDR], connection.attr[co.SOCKS_DADDR], conn_id, flow_id)\n\n for direction in co.DIRECTIONS:\n for attr in connection.flow.attr[direction]:\n mptcp_connections[conn_id].flows[flow_id].attr[direction][attr] = connection.flow.attr[direction][attr]\n\n if flow_name in acksize_all[direction]:\n if conn_id not in acksize_all_mptcp[direction]:\n acksize_all_mptcp[direction][conn_id] = {}\n\n acksize_all_mptcp[direction][conn_id][flow_id] = acksize_all[direction][flow_name]\n\n else:\n # This is a TCPConnection that failed to be a MPTCP subflow: add it in failed_conns\n failed_conns[connection.conn_id] = connection\n\n return conn_id, flow_id",
"def transmitPollAck(): \n global data\n DW1000.newTransmit()\n data[0] = C.POLL_ACK\n data[17] = anchorID #data[17] is tag Id data[18] is anchor Id\n data[18] = tagID #data[17] is tag Id data[18] is anchor Id\n DW1000.setDelay(REPLY_DELAY_TIME_US, C.MICROSECONDS)\n DW1000.setData(data, LEN_DATA)\n DW1000.startTransmit()",
"def process_pkt_from_client(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag):\n if acks[saddr, sport, daddr, dport][co.S2C] >= 0:\n conn_id = acks[saddr, sport, daddr, dport][co.CONN_ID]\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_ACK_TCP] = ts_delta\n if fin_flag:\n connections[conn_id].flow.attr[co.S2C][co.TIME_FIN_ACK_TCP] = ts_delta\n\n bytes_acked = (tcp.ack - acks[saddr, sport, daddr, dport][co.S2C]) % 4294967296\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n increment_value_dict(nb_acks[co.S2C][conn_id], bytes_acked)\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n # If SOCKS command\n if size_payload == 7 and connections[conn_id].attr.get(co.SOCKS_PORT, None) is None:\n crypted_socks_cmd = tcp.data\n # This is possible because of packet stripping\n if len(crypted_socks_cmd) == 7:\n decrypted_socks_cmd = socks_parser.decode(crypted_socks_cmd)\n if decrypted_socks_cmd[0] == b'\\x01': # Connect\n connections[conn_id].attr[co.SOCKS_DADDR] = socks_parser.get_ip_address(decrypted_socks_cmd)\n connections[conn_id].attr[co.SOCKS_PORT] = socks_parser.get_port_number(decrypted_socks_cmd)\n\n if size_payload > 0 and tcp.seq in acks[saddr, sport, daddr, dport][SEQ_C2S]:\n # This is a retransmission! (take into account the seq overflow)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIMESTAMP_RETRANS].append((ts_delta,\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][0],\n ts_delta - acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1],\n ts_delta - acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT]))\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq][1] = ts_delta\n elif size_payload > 0:\n acks[saddr, sport, daddr, dport][SEQ_C2S].add(tcp.seq)\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_PAYLD_TCP] = ts_delta\n acks[saddr, sport, daddr, dport][HSEQ_C2S][tcp.seq] = [ts_delta, ts_delta]\n # Don't think will face this issue\n# if len(acks[saddr, sport, daddr, dport][SEQ][co.C2S]) >= 3000000:\n# for x in range(50000):\n# acks[saddr, sport, daddr, dport][SEQ][co.C2S].popleft()\n\n acks[saddr, sport, daddr, dport][co.S2C] = tcp.ack\n acks[saddr, sport, daddr, dport][co.TIMESTAMP][CLIENT] = ts_delta",
"def compute_tcp_acks_retrans(pcap_filepath, connections, inverse_conns, ts_syn_timeout=6.0, ts_timeout=3600.0):\n print(\"Computing TCP ack sizes for\", pcap_filepath)\n nb_acks = {co.C2S: {}, co.S2C: {}}\n acks = {}\n # Avoid processing packets that do not belong to any analyzed TCP connection\n black_list = set()\n pcap_file = open(pcap_filepath)\n pcap = dpkt.pcap.Reader(pcap_file)\n count = 0\n try:\n for ts, buf in pcap:\n ts_delta = get_ts_delta(ts)\n count += 1\n if count % 100000 == 0:\n print(count)\n # Check if linux cooked capture\n if pcap.datalink() == dpkt.pcap.DLT_LINUX_SLL:\n eth = dpkt.sll.SLL(buf)\n else:\n eth = dpkt.ethernet.Ethernet(buf)\n if type(eth.data) == dpkt.ip.IP or type(eth.data) == dpkt.ip6.IP6:\n ip = eth.data\n if type(ip.data) == dpkt.tcp.TCP:\n tcp = ip.data\n fin_flag = (tcp.flags & dpkt.tcp.TH_FIN) != 0\n syn_flag = (tcp.flags & dpkt.tcp.TH_SYN) != 0\n rst_flag = (tcp.flags & dpkt.tcp.TH_RST) != 0\n ack_flag = (tcp.flags & dpkt.tcp.TH_ACK) != 0\n\n saddr, daddr, sport, dport = get_ips_and_ports(eth, ip, tcp)\n if syn_flag and not ack_flag and not fin_flag and not rst_flag:\n process_first_syn(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, black_list, inverse_conns,\n ts_syn_timeout, ts_timeout)\n\n elif (saddr, sport, daddr, dport) in black_list:\n continue\n\n elif syn_flag and ack_flag and not fin_flag and not rst_flag:\n process_syn_ack(ts_delta, acks, nb_acks, connections, tcp, saddr, ip, daddr, sport, dport, black_list, inverse_conns,\n ts_syn_timeout, ts_timeout)\n\n elif not syn_flag and not rst_flag and ack_flag:\n if (saddr, sport, daddr, dport) in acks:\n process_pkt_from_client(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag)\n\n elif (daddr, dport, saddr, sport) in acks:\n process_pkt_from_server(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag)\n else:\n # Silently ignore those packets\n # print(saddr, sport, daddr, dport, \"haven't seen beginning...\")\n continue\n\n except dpkt.NeedData as e:\n print(e, \": trying to continue...\", file=sys.stderr)\n finally:\n pcap_file.close()\n\n return nb_acks",
"def process_pkt_from_server(ts_delta, acks, nb_acks, connections, tcp, ip, saddr, daddr, sport, dport, fin_flag):\n if acks[daddr, dport, saddr, sport][co.C2S] >= 0:\n conn_id = acks[daddr, dport, saddr, sport][co.CONN_ID]\n connections[conn_id].flow.attr[co.C2S][co.TIME_LAST_ACK_TCP] = ts_delta\n if fin_flag:\n connections[conn_id].flow.attr[co.C2S][co.TIME_FIN_ACK_TCP] = ts_delta\n\n bytes_acked = (tcp.ack - acks[daddr, dport, saddr, sport][co.C2S]) % 4294967296\n if bytes_acked >= 2000000000:\n # Ack of 2GB or more is just not possible here\n return\n\n increment_value_dict(nb_acks[co.C2S][conn_id], bytes_acked)\n size_payload = ip.len - ip.hl * 4 - tcp.off * 4\n\n if size_payload > 0 and tcp.seq in acks[daddr, dport, saddr, sport][SEQ_S2C]:\n # This is a retransmission!\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.S2C][co.TIMESTAMP_RETRANS].append((ts_delta,\n ts_delta - acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][0],\n ts_delta - acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][1],\n ts_delta - acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER]))\n acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq][1] = ts_delta\n elif size_payload > 0:\n acks[daddr, dport, saddr, sport][SEQ_S2C].add(tcp.seq)\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = ts_delta\n connections[conn_id].flow.attr[co.S2C][co.TIME_LAST_PAYLD_TCP] = ts_delta\n acks[daddr, dport, saddr, sport][HSEQ_S2C][tcp.seq] = [ts_delta, ts_delta]\n # Don't think will face this issue\n# if len(acks[daddr, dport, saddr, sport][SEQ][co.S2C]) >= 3000000:\n# for x in range(50000):\n# acks[daddr, dport, saddr, sport][SEQ][co.S2C].popleft()\n\n acks[daddr, dport, saddr, sport][co.C2S] = tcp.ack\n acks[daddr, dport, saddr, sport][co.TIMESTAMP][SERVER] = ts_delta",
"def RETRANSMIT(self):\n\n ##############################################\n # retransmit all the unacknowledged packets #\n # (all the packets currently in self.buffer) #\n ##############################################\n \n if(self.timeout_hanjing):\n #If we are coming from the timeout state, retransmit all the buffer\n for k,v in self.buffer.items():\n if(self.SACK == 0):\n header_GBN = GBN(type = 'data', len = len(v), hlen = 6, num = k, win = self.win)\n else:\n header_GBN = GBN(type = 'data', options = 1, len = len(v), hlen = 6, num = k, win = self.win)\n send(IP(src = self.sender, dst = self.receiver) / header_GBN / v)\n log.debug(\"Sending packet number: %s\", k)\n \n if ((self.Q_3_2 == 1) and (self.dup_ack_hanjing == True) and (self.timeout_hanjing == False)):\n #just retransmit the packet that has been ack'ed 3 times consequtively\n header_GBN = GBN(type = 'data', len = len(self.buffer[self.unack]), hlen = 6, num = self.unack, win = self.win)\n send(IP(src = self.sender, dst = self.receiver) / header_GBN / self.buffer[self.unack])\n log.debug(\"Sending packet number: %s\", self.unack)\n \n #Question 3.3\n if(self.SACK == 1 and (self.timeout_hanjing == False) and (self.hlen > 6)):\n if(self.hlen == 9):\n optionalHeader_list = list(range(self.ledge1, self.ledge1 + self.len1)) \n if(self.hlen == 12):\n optionalHeader_list = list(range(self.ledge1, self.ledge1 + self.len1)) + list(range(self.ledge2, self.ledge2 + self.len2)) \n if(self.hlen == 15):\n optionalHeader_list = list(range(self.ledge1, self.ledge1 + self.len1)) + list(range(self.ledge2, self.ledge2 + self.len2)) + list(range(self.ledge3, self.ledge3 + self.len3)) \n \n for i in optionalHeader_list:\n optionalHeader_list[optionalHeader_list.index(i)] = i % 2**self.n_bits\n \n #We need to find the difference between the sender buffer, and the optionalHeader_list\n Sender_buffer_keys = list(self.buffer.keys()) \n log.debug(\"The sender buffer: %s\", Sender_buffer_keys)\n #Trimmed_sender_buffer includes the buffer list only up to the last packet number in the optional header list)\n trimmed_sender_buffer = Sender_buffer_keys[:Sender_buffer_keys.index(optionalHeader_list[-1])+1]\n #Retrans_list is the list of keys to be retransmitted\n log.debug(\"Trimmed Sender Buffer: %s\", trimmed_sender_buffer)\n log.debug(\"Optional Header List: %s\", optionalHeader_list)\n Retrans_list = [item for item in trimmed_sender_buffer if item not in optionalHeader_list]\n log.debug(\"SACK: packets should be retransmitted: %s\", Retrans_list)\n for i in Retrans_list:\n header_GBN = GBN(type = 'data', options = 1 , len = len(self.buffer[i]), hlen = 6, num = i, win = self.win)\n send(IP(src = self.sender, dst = self.receiver) / header_GBN / self.buffer[i])\n log.debug(\"SACK Retransmission: Sending packet number: %s\", i)\n # back to SEND state\n self.dup_ack_hanjing = False\n self.timeout_hanjing = False\n raise self.SEND()",
"def print_connection_being_established(pkt):\n print_headers(pkt, overwrite_min=0)\n print(green(\"!!!! New TCP/OpenFlow Connection being established!!\\n\"))",
"def transmitPollAck(): \r\n global data\r\n DW1000.newTransmit()\r\n data[0] = C.POLL_ACK\r\n DW1000.setDelay(REPLY_DELAY_TIME_US, C.MICROSECONDS)\r\n DW1000.setData(data, LEN_DATA)\r\n DW1000.startTransmit()",
"def parse_packets(pcap):\n # For each packet in the pcap process the contents\n flow_Info = []\n times = 0\n for timestamp, buf in pcap:\n times += 1\n tmp_flow_Info = {}\n\n # Unpack the Ethernet frame (mac src/dst, ethertype)\n eth = dpkt.ethernet.Ethernet(buf)\n # Unpack the data whthin the Ethernet frame (the IP packet)\n ip = eth.data\n\n # if protocol(ip.p) is not UDP(17) ,skip this packet\n if ip.p != 17:\n continue\n\n udp = ip.data\n # Temp_data = parse_data(eth.data.udp.data)\n # Filter CoAP by port\n if(udp.sport != 5683 or udp.dport != 5683):\n continue\n\n str_udp_data = parse_data(eth.data.udp.data)\n # skip packets of Non_confirmable\n if str_udp_data[0] == '5': \n continue\n\n cycle = 0\n index = 0\n Udp_data = []\n \n len_str_udp_data = len(str_udp_data)\n while cycle < (len_str_udp_data//3+1):\n # Udp_data.append(int('0x'+Str_Udp_data[index:index + 2], 16))\n Udp_data.append(int('0x' + str_udp_data[index:index + 2], 16))\n cycle += 1\n index += 3\n tmp_flow_Info['udp_data'] = (Udp_data)\n\n # confirmable or ack\n tmp_flow_Info['Coap_type'] = str_udp_data[0]\n #print(str_udp_data) \n \n # skip space and get \"Message ID\" \n HexMide = str_udp_data[6:8] + str_udp_data[9:11]\n tmp_flow_Info['Mid'] = int('0x'+HexMide, 16)\n\n tmp_flow_Info['Timestamp'] = str(datetime.datetime.fromtimestamp(timestamp))\n # print('Ethernet Frame: ', mac_addr(eth.src), mac_addr(eth.dst), eth.type)\n tmp_flow_Info['src'] = inet_to_str(ip.src)\n tmp_flow_Info['dst'] = inet_to_str(ip.dst)\n\n tmp_flow_Info['sport'] = udp.sport\n tmp_flow_Info['dport'] = udp.dport\n flow_Info.append(tmp_flow_Info)\n\n return flow_Info",
"def Connect(self):\r\n #sleep(1)\r\n #self.src_ref = randint(1, 20)\r\n self.src_ref = 10\r\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.s.settimeout(self.timeout)\r\n self.s.connect((self.ip, self.port))\r\n self.s.send(TPKTPacket(COTPConnectionPacket(self.dst_ref,\r\n self.src_ref,\r\n self.dst_tsap,\r\n self.src_tsap,\r\n 0x0a)).pack())\r\n reply = self.s.recv(1024)\r\n _ = COTPConnectionPacket().unpack(TPKTPacket().unpack(reply).data)\r\n\r\n self.NegotiatePDU()",
"def handshake(self):\n print(\"No: \"+str(len(self.threads)))\n indexes_to_del = []\n if len(self.threads)>2:\n raise IOError\n for i in range(0,len(self.threads)):\n if not self.threads[i].is_alive():\n indexes_to_del.append(i)\n \n for i in indexes_to_del:#do this otherwise if deleted above, out of index error occurs\n del self.threads[i]\n \n while True:\n data = self.s.recv(1024)\n if data ==\"O\":\n print(\"Hanshake Received\")\n return",
"def print_connection_terminated(pkt):\n print_headers(pkt, overwrite_min=0)\n print(red(\"!!!! Attention: TCP/OpenFlow Connection Terminated!!\\n\"))",
"def describe_connections_on_interconnect(interconnectId=None):\n pass",
"def get_dss_and_data_ack(tcp):\n dss, dack, dss_is_8_bytes = False, False, False\n opt_list = dpkt.tcp.parse_opts(tcp.opts)\n for option_num, option_content in opt_list:\n # Only interested in MPTCP with subtype 2\n if option_num == 30 and len(option_content):\n if ord(option_content[0]) == 32:\n flags = ord(option_content[1])\n dss_is_8_bytes = (flags & 0x08) != 0\n dss_is_present = (flags & 0x04) != 0\n dack_is_8_bytes = (flags & 0x02) != 0\n dack_is_present = (flags & 0x01) != 0\n if dack_is_present and not dss_is_present:\n range_max = 8 if dack_is_8_bytes else 4\n dack = 0\n for i in range(range_max):\n dack = dack * 256 + ord(option_content[2 + i])\n\n elif dss_is_present and dack_is_present:\n range_max_dack = 8 if dack_is_8_bytes else 4\n dack = 0\n for i in range(range_max_dack):\n dack = dack * 256 + ord(option_content[2 + i])\n\n start_dss = 2 + range_max_dack\n range_max_dss = 8 if dss_is_8_bytes else 4\n dss = 0\n for i in range(range_max_dss):\n dss = dss * 256 + ord(option_content[start_dss + i])\n\n elif dss_is_present and not dack_is_present:\n global dss_not_ack_warning\n if not dss_not_ack_warning:\n print(\"Case where dss_is_present and dack is not present (not compliant with Linux implementation): continue\", file=sys.stderr)\n dss_not_ack_warning = True\n\n start_dss = 2\n range_max_dss = 8 if dss_is_8_bytes else 4\n dss = 0\n for i in range(range_max_dss):\n dss = dss * 256 + ord(option_content[start_dss + i])\n\n return dss, dack, dss_is_8_bytes",
"def handleSent(): \r\n global sentAck\r\n sentAck = True",
"def hmVerifyMsgCRCOK(destination, protocol, source, expectedFunction, expectedLength, datal) :\r\n badresponse = 0\r\n if protocol == constants.HMV3_ID:\r\n checksum = datal[len(datal)-2:]\r\n rxmsg = datal[:len(datal)-2]\r\n crc = crc16() # Initialises the CRC\r\n expectedchecksum = crc.run(rxmsg)\r\n if expectedchecksum == checksum:\r\n print(\"CRC is correct\")\r\n else:\r\n print(\"CRC is INCORRECT\")\r\n s = \"Incorrect CRC: %s Expected: %s \\n\" % (datal, expectedchecksum)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n # Check the response\r\n dest_addr = datal[0]\r\n frame_len_l = datal[1]\r\n frame_len_h = datal[2]\r\n frame_len = (frame_len_h << 8) | frame_len_l\r\n source_addr = datal[3]\r\n func_code = datal[4]\r\n\r\n\r\n\r\n if (dest_addr != 129 and dest_addr != 160):\r\n print(\"dest_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (dest_addr != destination):\r\n print(\"dest_addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Dest Addr: %s\\n\" % (localtime, loop, dest_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr < 1 or source_addr > 32):\r\n print(\"source_addr is ILLEGAL\")\r\n s = \"%s : Controller %s : Illegal Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (source_addr != source):\r\n print(\"source addr is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect Src Addr: %s\\n\" % (localtime, loop, source_addr)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != constants.FUNC_WRITE and func_code != constants.FUNC_READ):\r\n print(\"Func Code is UNKNWON\")\r\n s = \"%s : Controller %s : Unknown Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code != expectedFunction):\r\n print(\"Func Code is UNEXPECTED\")\r\n s = \"%s : Controller %s : Unexpected Func Code: %s\\n\" % (localtime, loop, func_code)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (func_code == constants.FUNC_WRITE and frame_len != 7):\r\n # Reply to Write is always 7 long\r\n print(\"response length is INCORRECT\")\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n if (len(datal) != frame_len):\r\n print(\"response length MISMATCHES header\")\r\n s = \"%s : Controller %s : Mismatch length: %s %s\\n\" % (localtime, loop, len(datal), frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\r\n \"\"\"if (func_code == constants.FUNC_READ and expectedLength !=len(datal) ):\r\n # Read response length is wrong\r\n print(\"response length not EXPECTED value\")\r\n print(len(datal))\r\n print(datal)\r\n s = \"%s : Controller %s : Incorrect length: %s\\n\" % (localtime, loop, frame_len)\r\n sys.stderr.write(s)\r\n badresponse += 1\r\n\"\"\"\r\n if (badresponse == 0):\r\n return True\r\n else:\r\n return False\r\n\r\n else:\r\n assert 0, \"Un-supported protocol found %s\" % protocol",
"def extract_tstat_data_tcp_complete(filename, connections, conn_id):\n log_file = open(filename)\n data = log_file.readlines()\n for line in data:\n # Case 1: line start with #; skip it\n if not line.startswith(\"#\"):\n # Case 2: extract info from the line\n info = line.split()\n conn_id += 1\n connection = TCPConnection(conn_id)\n connection.flow.attr[co.TCP_COMPLETE] = True\n connection.flow.attr[co.SADDR] = co.long_ipv6_address(info[0])\n connection.flow.attr[co.DADDR] = co.long_ipv6_address(info[14])\n connection.flow.attr[co.SPORT] = info[1]\n connection.flow.attr[co.DPORT] = info[15]\n connection.flow.detect_ipv4()\n connection.flow.indicates_wifi_or_cell()\n # Except RTT, all time (in ms in tstat) shoud be converted into seconds\n connection.flow.attr[co.START] = timedelta(seconds=float(info[28])/1000)\n connection.flow.attr[co.DURATION] = float(info[30]) / 1000.0\n connection.flow.attr[co.C2S][co.PACKS] = int(info[2])\n connection.flow.attr[co.S2C][co.PACKS] = int(info[16])\n # Note that this count is about unique data bytes (sent in the payload)\n connection.flow.attr[co.C2S][co.BYTES] = int(info[6])\n connection.flow.attr[co.S2C][co.BYTES] = int(info[20])\n # This is about actual data bytes (sent in the payload, including retransmissions)\n connection.flow.attr[co.C2S][co.BYTES_DATA] = int(info[8])\n connection.flow.attr[co.S2C][co.BYTES_DATA] = int(info[22])\n\n connection.flow.attr[co.C2S][co.PACKS_RETRANS] = int(info[9])\n connection.flow.attr[co.S2C][co.PACKS_RETRANS] = int(info[23])\n connection.flow.attr[co.C2S][co.BYTES_RETRANS] = int(info[10])\n connection.flow.attr[co.S2C][co.BYTES_RETRANS] = int(info[24])\n\n connection.flow.attr[co.C2S][co.PACKS_OOO] = int(info[11])\n connection.flow.attr[co.S2C][co.PACKS_OOO] = int(info[25])\n\n connection.flow.attr[co.C2S][co.NB_SYN] = int(info[12])\n connection.flow.attr[co.S2C][co.NB_SYN] = int(info[26])\n connection.flow.attr[co.C2S][co.NB_FIN] = int(info[13])\n connection.flow.attr[co.S2C][co.NB_FIN] = int(info[27])\n connection.flow.attr[co.C2S][co.NB_RST] = int(info[3])\n connection.flow.attr[co.S2C][co.NB_RST] = int(info[17])\n connection.flow.attr[co.C2S][co.NB_ACK] = int(info[4])\n connection.flow.attr[co.S2C][co.NB_ACK] = int(info[18])\n\n # Except RTT, all time (in ms in tstat) shoud be converted into seconds\n connection.flow.attr[co.C2S][co.TIME_FIRST_PAYLD] = float(info[31]) / 1000.0\n connection.flow.attr[co.S2C][co.TIME_FIRST_PAYLD] = float(info[32]) / 1000.0\n connection.flow.attr[co.C2S][co.TIME_LAST_PAYLD] = float(info[33]) / 1000.0\n connection.flow.attr[co.S2C][co.TIME_LAST_PAYLD] = float(info[34]) / 1000.0\n connection.flow.attr[co.C2S][co.TIME_FIRST_ACK] = float(info[35]) / 1000.0\n connection.flow.attr[co.S2C][co.TIME_FIRST_ACK] = float(info[36]) / 1000.0\n\n connection.flow.attr[co.C2S][co.RTT_SAMPLES] = int(info[48])\n connection.flow.attr[co.S2C][co.RTT_SAMPLES] = int(info[55])\n connection.flow.attr[co.C2S][co.RTT_MIN] = float(info[45])\n connection.flow.attr[co.S2C][co.RTT_MIN] = float(info[52])\n connection.flow.attr[co.C2S][co.RTT_MAX] = float(info[46])\n connection.flow.attr[co.S2C][co.RTT_MAX] = float(info[53])\n connection.flow.attr[co.C2S][co.RTT_AVG] = float(info[44])\n connection.flow.attr[co.S2C][co.RTT_AVG] = float(info[51])\n connection.flow.attr[co.C2S][co.RTT_STDEV] = float(info[47])\n connection.flow.attr[co.S2C][co.RTT_STDEV] = float(info[54])\n connection.flow.attr[co.C2S][co.TTL_MIN] = float(info[49])\n connection.flow.attr[co.S2C][co.TTL_MIN] = float(info[56])\n connection.flow.attr[co.C2S][co.TTL_MAX] = float(info[50])\n connection.flow.attr[co.S2C][co.TTL_MAX] = float(info[57])\n\n connection.flow.attr[co.C2S][co.SS_MIN] = int(info[71])\n connection.flow.attr[co.S2C][co.SS_MIN] = int(info[94])\n connection.flow.attr[co.C2S][co.SS_MAX] = int(info[70])\n connection.flow.attr[co.S2C][co.SS_MAX] = int(info[93])\n\n connection.flow.attr[co.C2S][co.CWIN_MIN] = int(info[76])\n connection.flow.attr[co.S2C][co.CWIN_MIN] = int(info[99])\n connection.flow.attr[co.C2S][co.CWIN_MAX] = int(info[75])\n connection.flow.attr[co.S2C][co.CWIN_MAX] = int(info[98])\n\n connection.flow.attr[co.C2S][co.NB_RTX_RTO] = int(info[78])\n connection.flow.attr[co.S2C][co.NB_RTX_RTO] = int(info[101])\n connection.flow.attr[co.C2S][co.NB_RTX_FR] = int(info[79])\n connection.flow.attr[co.S2C][co.NB_RTX_FR] = int(info[102])\n connection.flow.attr[co.C2S][co.NB_REORDERING] = int(info[80])\n connection.flow.attr[co.S2C][co.NB_REORDERING] = int(info[103])\n connection.flow.attr[co.C2S][co.NB_NET_DUP] = int(info[81])\n connection.flow.attr[co.S2C][co.NB_NET_DUP] = int(info[104])\n connection.flow.attr[co.C2S][co.NB_UNKNOWN] = int(info[82])\n connection.flow.attr[co.S2C][co.NB_UNKNOWN] = int(info[105])\n connection.flow.attr[co.C2S][co.NB_FLOW_CONTROL] = int(info[83])\n connection.flow.attr[co.S2C][co.NB_FLOW_CONTROL] = int(info[106])\n connection.flow.attr[co.C2S][co.NB_UNNECE_RTX_RTO] = int(info[84])\n connection.flow.attr[co.S2C][co.NB_UNNECE_RTX_RTO] = int(info[107])\n connection.flow.attr[co.C2S][co.NB_UNNECE_RTX_FR] = int(info[85])\n connection.flow.attr[co.S2C][co.NB_UNNECE_RTX_FR] = int(info[108])\n\n connection.attr[co.C2S][co.BYTES] = {}\n connection.attr[co.S2C][co.BYTES] = {}\n\n connection.flow.attr[co.C2S][co.TIMESTAMP_RETRANS] = []\n connection.flow.attr[co.S2C][co.TIMESTAMP_RETRANS] = []\n\n connection.flow.attr[co.C2S][co.TIME_FIN_ACK_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_FIN_ACK_TCP] = timedelta(0)\n\n connection.flow.attr[co.C2S][co.TIME_LAST_ACK_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_LAST_ACK_TCP] = timedelta(0)\n\n connection.flow.attr[co.C2S][co.TIME_LAST_PAYLD_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_LAST_PAYLD_TCP] = timedelta(0)\n\n connection.flow.attr[co.C2S][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = timedelta(0)\n connection.flow.attr[co.S2C][co.TIME_LAST_PAYLD_WITH_RETRANS_TCP] = timedelta(0)\n\n connections[conn_id] = connection\n\n log_file.close()\n return connections, conn_id",
"def delcomptcprxpackets(self) :\n\t\ttry :\n\t\t\treturn self._delcomptcprxpackets\n\t\texcept Exception as e:\n\t\t\traise e",
"def handleReceived():\r\n global receivedAck\r\n receivedAck = True",
"def handleReceived(): \n global receivedAck\n receivedAck = True",
"def snmpqosqos_sch_session_conns(self) :\n\t\ttry :\n\t\t\treturn self._snmpqosqos_sch_session_conns\n\t\texcept Exception as e:\n\t\t\traise e",
"def on_ctcp(self, raw_msg, source, msg, **kwargs):",
"def describe_connections(connectionId=None):\n pass"
] | [
"0.5511609",
"0.5456236",
"0.5397558",
"0.53674424",
"0.5341668",
"0.5300616",
"0.52610534",
"0.5249031",
"0.5168756",
"0.5108904",
"0.5044106",
"0.5025528",
"0.50196034",
"0.49723715",
"0.4895106",
"0.48823294",
"0.4870531",
"0.48611027",
"0.4818591",
"0.4818538",
"0.48022884",
"0.47958198",
"0.47927332",
"0.47881642",
"0.47863764",
"0.4780633",
"0.47642824",
"0.47637206",
"0.47362474",
"0.47344252"
] | 0.7726427 | 0 |
return the current schema_org schema version | def get_schema_org_version():
return _get_schemaorg_version() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_schemaorg_version():\n try:\n version = get_latest_schemaorg_version()\n except ValueError:\n version = SCHEMAORG_DEFAULT_VERSION\n return version",
"def schema_version(self):\n # return self._parsed[\"schemaVersion\"]\n # does not exist in manifest reference\n pass",
"def schema_version(self):\n return self._parsed[\"schemaVersion\"]",
"def schema_version(self) -> str:\n return self._pipeline_definition.get(\"version\")",
"def get_datasetSchemaVersion(self):\n\t\treturn self.dsDoc['about']['datasetSchemaVersion']",
"def get_problemSchemaVersion(self):\n\t\treturn self.prDoc['about']['problemSchemaVersion']",
"def get_latest_schemaorg_version():\n tag_name = requests.get(SCHEMAORG_VERSION_URL).json()[\"tag_name\"] # \"v13.0-release\"\n mat = re.match(r\"v([\\d.]+)-release\", tag_name)\n if not mat:\n raise ValueError(f\"Unrecognized release tag name {tag_name}\")\n latest = mat.group(1)\n return latest",
"def schema_version(conn):\n with Tx(conn) as c:\n try:\n c.execute('SELECT version FROM meta LIMIT 1', ['version'])\n except psycopg2.ProgrammingError:\n return 0\n if c.rowcount == 0:\n return 0\n return c.fetchone()['version']",
"def schema_transformation_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"schema_transformation_version\")",
"def query_version(self):\n return self.connection.cursor().execute('SELECT version()').fetchone()[0]",
"def db_version():\n return IMPL.db_version()",
"def get_version(self):\n return 0",
"def version(self):\n return self.get_current_version()",
"def version(self):\r\n print migration.db_version()",
"def version(self):\n if \"version\" in self._prop_dict:\n return self._prop_dict[\"version\"]\n else:\n return None",
"def version(self):\n return self._get(\"version\")",
"def get_version(self):\n pass",
"def __get_db_version_int(self):\r\n query = QtSql.QSqlQuery('PRAGMA user_version')\r\n query.first()\r\n return query.value(0).toInt()[0]",
"def get_version(self):\n return self.version",
"def getversion(self):\n return self.__version",
"def get_schema(): # noqa: WPS440\n return config.DEFAULT_SCHEMA",
"def database_version(self) -> str:\n return pulumi.get(self, \"database_version\")",
"def version(self):\n if not self._version:\n self._version = self._get_version()\n\n return self._version",
"def model_version(self) -> str:\n return pulumi.get(self, \"model_version\")",
"def get_version():\n global __model\n return __model.__version__",
"def version(self):\n self.cursor.execute(\"SELECT VERSION()\")\n # Fetch a single row using fetchone() method.\n data = self.cursor.fetchone()\n print(\"Database version : %s \" % data)",
"def version(self):\n if not hasattr(self, \"_version_string\"):\n return None\n return semantic_version.Version(self._version_string)",
"def get_version(self):\n return self._version",
"def get_version(self):\n return self._version",
"def _get_schema(want_version):\n for maj, min in _GET_SCHEMA_MICROVERSIONS:\n if want_version.matches((maj, min)):\n return getattr(schema, 'GET_SCHEMA_%d_%d' % (maj, min))\n\n return schema.GET_SCHEMA_1_10"
] | [
"0.84921485",
"0.82677555",
"0.8089187",
"0.7736187",
"0.74551374",
"0.7435045",
"0.73047084",
"0.6960835",
"0.68983686",
"0.68766624",
"0.68467546",
"0.68244046",
"0.6751349",
"0.67256296",
"0.6715355",
"0.6658414",
"0.6645252",
"0.6636151",
"0.6614081",
"0.66119516",
"0.6587621",
"0.6576962",
"0.65766615",
"0.6544186",
"0.6543358",
"0.65220594",
"0.6519874",
"0.6515619",
"0.6515619",
"0.65141004"
] | 0.89709204 | 0 |
get all classes and label them if they are referenced if include_ref is False, only "defined" classes are included. | def get_classes(self, include_ref=True):
defs = self._get_class_defs()
ans = {}
ans.update(defs)
if include_ref:
refs = self._get_class_refs()
ans.update(refs)
return list(ans.values()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_class_refs(self):\n return list(self._get_class_refs().values())",
"def process_class_list(self, module, classes):",
"def _load_classes(self):\n classdocs = self._docset.get_classes()\n for classdoc in classdocs:\n files = [self._docmap[filedoc] for filedoc in classdoc.get_files()]\n classobj = Class(classdoc, files)\n self._docmap[classdoc] = classobj\n self._classes.add(classobj)",
"def return_classes(self):\n\n\t\t \n\t\t \n\t\treturn self.classes",
"def classes(self):\n raise NotImplementedError(\"Please implement this yourself.\")",
"def addClassRef(clazz):\n\n global h_classes\n header = \"class %s;\" % clazz\n if not header in h_classes:\n h_classes.append(header)",
"def _class_list(parent, section, objects, refs):\n\n sec = etree.SubElement(parent, section, count=str(len(objects)))\n\n for cls, objs in _class_count(objects):\n obj = etree.SubElement(sec, \"Object\", type=cls, count=str(len(objs)))\n if refs:\n _class_list(obj, \"Referrers\", gc.get_referrers(*objs), False)",
"def classes(self):\r\n return self._classes",
"def classes(self):\n if self.classname:\n return [self.classname]\n return []",
"def child_classes(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n children = self.se.full_class_only_graph.successors(self.uri)\n result = restructure_output(self,\n children,\n inspect.stack()[0][3],\n self.output_type)\n return result",
"def get_classes(self):\n return self._classes",
"def _load_classes(self):\n\t\t# load class names (name -> label)\n\t\tcategories = self.coco.loadCats(self.coco.getCatIds())\n\t\tcategories.sort(key=lambda x: x['id'])\n\n\t\tself.classes \t\t\t\t= {}\n\t\tself.coco_labels \t\t\t= {}\n\t\tself.coco_labels_inverse \t= {}\n\t\tfor c in categories:\n\t\t\tself.coco_labels[len(self.classes)] = c['id']\n\t\t\tself.coco_labels_inverse[c['id']] = len(self.classes)\n\t\t\tself.classes[c['name']] = len(self.classes)\n\t\tself.labels = {}\n\t\tfor key, value in self.classes.items():\n\t\t\tself.labels[value] = key\n\n\t\tprint(self.coco_labels)\n\t\tprint(self.coco_labels_inverse)\n\t\tprint(self.classes)\n\t\tprint(self.labels)",
"def get_classes(self):\n return",
"def dump_class_ref_counts(referrer_depth=2, cutoff=500, rcutoff=1,\r\n ignore=('tuple', 'list', 'function', 'dict',\r\n 'builtin_function_or_method',\r\n 'wrapper_descriptor')):\r\n import gc\r\n __dump_class_ref_counts(gc, referrer_depth, cutoff, rcutoff, ignore)\r\n gc.collect()\r\n plog(\"NOTICE\", \"GC: Done.\")",
"def class_labels(self):\n return self._class_labels",
"def classes(self):\n if not hasattr(self, '_unique_classes'):\n # build when we don't have\n self._unique_classes = self.data['label'].unique()\n self._unique_classes.sort()\n\n ret = self._unique_classes\n return ret",
"def getClasses(self):\n self._process()\n return self._sets",
"def descendant_classes(self):\n response = check_defined(self, inspect.stack()[0][3])\n if not response:\n return response\n descendants = nx.descendants(self.se.full_class_only_graph,\n self.uri)\n result = restructure_output(self,\n descendants,\n inspect.stack()[0][3],\n self.output_type)\n return result",
"def load_classes(self):\n\t\t\t# Load class names (name -> label).\n\t\t\tcategories = self.coco.loadCats(self.coco.getCatIds())\n\t\t\tcategories.sort(key=lambda x: x['id'])\n\n\t\t\tself.classes = {}\n\t\t\tself.coco_labels = {}\n\t\t\tself.coco_labels_inverse = {}\n\t\t\tfor c in categories:\n\t\t\t\tself.coco_labels[len(self.classes)] = c['id']\n\t\t\t\tself.coco_labels_inverse[c['id']] = len(self.classes)\n\t\t\t\tself.classes[c['name']] = len(self.classes)\n\n\t\t\t# Also load the reverse (label -> name).\n\t\t\tself.labels = {}\n\t\t\tfor key, value in self.classes.items():\n\t\t\t\tself.labels[value] = key",
"def classes(self):\n if \"classes\" in self._prop_dict:\n return ClassesCollectionPage(self._prop_dict[\"classes\"])\n else:\n return None",
"def getByReferenceClassifiers(inpClassifiers, startExecCount=0):\n\toutVals = [_ByReferenceClassifier(inpClassifier, execCount=startExecCount) for inpClassifier in inpClassifiers]\n\treturn outVals",
"def get_classes_conditional(doxy_xml_files, cond):\n found = {}\n for xmlfile in doxy_xml_files:\n xml = lxml.etree.parse(xmlfile)\n classes = xml.xpath('.//compounddef[@kind=\"class\" or @kind=\"struct\"]')\n for cl in classes:\n if cond(cl):\n classname = cl.find('./compoundname')\n baseclasses = cl.xpath('./basecompoundref')\n membervars = cl.xpath('.//memberdef[@kind=\"variable\"]/name')\n\n # An exception: Members get attached to Graph classes\n # through this macro, and is not understood by\n # Doxygen, so we have to parse it outselves.\n graphvars = cl.xpath('.//memberdef[@kind=\"function\"]/name'\n +'[text()=\"INSTALL_GRAPH_PROPERTIES\"]')\n graphmems = []\n\n if len(graphvars)>0:\n r = re.compile('\\(\\(\\w+,\\s*[\\w: ]+,\\s*(\\w+)\\)\\)')\n for g in graphvars:\n for a in g.xpath('../argsstring'):\n graphmems += r.findall(a.text)\n # The INSTALL_GRAPH_PROPERTIES macro also adds a\n # bool called \"dummy\"\n graphmems.append('dummy')\n\n location = cl.find('./location')\n found[classname.text] = (\n {'name': classname.text,\n 'bases': [base.text for base in baseclasses],\n 'members': [mem.text for mem in membervars] + graphmems,\n 'filepath': location.attrib['file'],\n 'line': int(location.attrib['line']),\n 'abstract': cl.xpath('@abstract=\"yes\"'),\n })\n return found",
"def classes(self):\n return self._.d",
"def find_references(self):\n cls = self.__class__\n nodes = []\n for sobj in self._std.FindDependances(self.get_sobj()):\n nodes.append(cls(self._std, self._bld, sobj.GetID()))\n return nodes",
"def _fill_class_dicts():\n global _taxonomy_classes\n global _data_classes\n if not _taxonomy_classes:\n _taxonomy_classes = get_taxonomies()\n if not _data_classes:\n stack = []\n next_module = data\n while next_module is not None:\n stack += _inspect_module(next_module)\n if stack:\n next_module = stack.pop()\n else:\n next_module = None",
"def relevant_classifications(self):\n return self.relevant_classes",
"def get_label_classes(scope, op, node_names=False):\n options = scope.get_options(op, dict(nocl=False))\n if options[\"nocl\"]:\n if len(op.classes_.shape) > 1 and op.classes_.shape[1] > 1:\n raise RuntimeError(\n \"Options 'nocl=True' is not implemented for multi-label \"\n \"classification (class: {}).\".format(op.__class__.__name__)\n )\n classes = np.arange(0, len(op.classes_))\n elif node_names:\n try:\n options = scope.get_options(op, dict(zipmap=False))\n zipcol = options[\"zipmap\"] == \"columns\"\n except NameError:\n zipcol = False\n if zipcol:\n clnames = op.classes_.ravel()\n if np.issubdtype(clnames.dtype, np.integer) or clnames.dtype == np.bool_:\n classes = np.array([\"i%d\" % c for c in clnames])\n else:\n classes = np.array([\"s%s\" % c for c in clnames])\n else:\n classes = op.classes_\n elif hasattr(op, \"classes_\"):\n classes = op.classes_\n elif hasattr(op, \"intercept_\"):\n classes = len(op.intercept_)\n elif hasattr(op, \"y_\"):\n # _ConstantPredictor\n classes = np.array(list(sorted(set(op.y_))))\n else:\n raise RuntimeError(\n \"No known ways to retrieve the number of classes for class %r.\"\n \"\" % type(op)\n )\n return classes",
"def import_all_known_classes(debug=False):\r\n\r\n output = {}\r\n for cls in KnownClass.objects:\r\n if debug:\r\n print \"Importing %s.%s\"%(cls.module_name, cls.class_name)\r\n x = get_class(cls.module_name, cls.class_name)\r\n output[(cls.module_name, cls.class_name)] = x()\r\n return output",
"def _get_classifers(self):\n return self.__classifers",
"def _get_classifers(self):\n return self.__classifers"
] | [
"0.6035254",
"0.58814853",
"0.5880051",
"0.5856091",
"0.57512456",
"0.5681698",
"0.5672593",
"0.5591788",
"0.5589396",
"0.55738693",
"0.55246097",
"0.55189526",
"0.5512281",
"0.5482113",
"0.54583037",
"0.5448857",
"0.54194885",
"0.5409159",
"0.53911316",
"0.5388025",
"0.53866357",
"0.5350316",
"0.5342173",
"0.5330371",
"0.5297328",
"0.52816486",
"0.5261616",
"0.5258449",
"0.52572185",
"0.52572185"
] | 0.7381208 | 0 |
return validation errors as a list of dictionaries | def get_validation_errors(self):
return [err.to_dict() for err in self._schema.validator.validation_errors] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error_wrapper(x):\n errors = list()\n for error_key, error_list in list(x.items()):\n for error in error_list:\n if error_key == 'non_field_errors':\n errors.append(error)\n else:\n errors.append(\"%s: %s\" % (error_key, error))\n return errors",
"def filter_validation_errors(errors):\n error_messages = []\n for field, msgs in errors.items():\n if isinstance(msgs, dict):\n for f, m in msgs.items():\n error_messages.append(dict(\n field=f,\n message=m,\n code=error_codes['validation_error'],\n ))\n else:\n error_messages.append(dict(\n field=field,\n message=msgs,\n code=error_codes['validation_error'],\n ))\n return error_messages",
"def validation_errors(self):\n return self._validation_errors",
"def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages",
"def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages",
"def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages",
"def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages",
"def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f\"{field} : {error}\")\n return errorMessages",
"def errors(self) -> List[Error]:",
"def validation_errors_to_error_messages(validation_errors):\n error_messages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n error_messages.append(f\"{field}: {error}\")\n return error_messages",
"def validation_errors_to_error_messages(validation_errors):\n errorMessages = []\n for field in validation_errors:\n for error in validation_errors[field]:\n errorMessages.append(f'{field} : {error}')\n return errorMessages",
"def json(self):\n d = [err.json for err in self.errors]\n return d",
"def getErrorsList(self):\n return self.__errors",
"def errors(self):\n _errors = {}\n # pylint: disable=no-member\n for name, field in self._fields.items():\n if field.errors:\n _errors[name] = field.errors.pop()\n\n return _errors",
"def _pydantic_errors_to_validation_results(\n errors: list[dict | Exception] | ValidationError,\n file_path: Path,\n scope: Scope,\n) -> list[ValidationResult]:\n out = []\n for e in (\n errors.errors() if isinstance(errors, ValidationError) else cast(list, errors)\n ):\n if isinstance(e, Exception):\n message = getattr(e, \"message\", str(e))\n id = \"exception\"\n scope = Scope.FILE\n else:\n id = \".\".join(\n filter(\n bool,\n (\n \"dandischema\",\n e.get(\"type\", \"UNKNOWN\"),\n \"+\".join(e.get(\"loc\", [])),\n ),\n )\n )\n message = e.get(\"message\", e.get(\"msg\", None))\n out.append(\n ValidationResult(\n origin=ValidationOrigin(\n name=\"dandischema\",\n version=dandischema.__version__,\n ),\n severity=Severity.ERROR,\n id=id,\n scope=scope,\n path=file_path,\n message=message,\n # TODO? dataset_path=dataset_path,\n # TODO? dandiset_path=dandiset_path,\n )\n )\n return out",
"def getErrors(self) -> java.util.Collection:\n ...",
"def get_errors(self):\n return [result for result in self.values() if result.outcome == Result.ERROR]",
"def get_validation_errors(\n self,\n schema_version: Optional[str] = None,\n devel_debug: bool = False,\n ) -> list[ValidationResult]:\n ...",
"def GetAll(self):\n return self._errors.copy()",
"def validations(self):\n return self.container['validations']",
"def getErrors(self):\n return self.errors",
"def get_form_errors(form):\n all_errors = []\n for field in form.errors:\n all_errors += form.errors[field]\n return all_errors",
"def security_errors(self):\n errors = ErrorDict()\n for f in [\"honeypot\", \"timestamp\", \"security_hash\"]:\n if f in self.errors:\n errors[f] = self.errors[f]\n return errors",
"def errors(self):\n return self._errors",
"def errors(self):\n return self.__errors",
"def Errors(self):\r\n\t\treturn self._get_attribute('errors')",
"def errors(self):\r\n if not hasattr(self, '_errors_cache'):\r\n self._errors_cache = self.form.get_field_errors(self)\r\n return self._errors_cache",
"def errors (self):\n return self._errors",
"def errors (self):\n return self._errors",
"def failure(self, validation_failure):\n \n self.request.response.status_int = 400\n return validation_failure.error.asdict()"
] | [
"0.7443273",
"0.7419965",
"0.7331665",
"0.7269884",
"0.7269884",
"0.7269884",
"0.7269884",
"0.7269884",
"0.72689366",
"0.72655326",
"0.7202294",
"0.7177334",
"0.7172576",
"0.7163961",
"0.7127716",
"0.6941691",
"0.6911603",
"0.6893204",
"0.6834352",
"0.6751597",
"0.6711623",
"0.6692114",
"0.65932",
"0.6580191",
"0.65579456",
"0.6547798",
"0.6542113",
"0.6536023",
"0.6536023",
"0.6513981"
] | 0.84701467 | 0 |
Faster Wavelenght selector If passed lists it will return lists. If passed np arrays it will return arrays Fastest is using np.ndarrays fast_wav_selector ~10002000 quicker than wav_selector | def fast_wav_selector(wav, flux, wav_min, wav_max):
if isinstance(wav, list): # if passed lists
wav_sel = [value for value in wav if(wav_min < value < wav_max)]
flux_sel = [value[1] for value in zip(wav,flux) if(wav_min < value[0] < wav_max)]
elif isinstance(wav, np.ndarray):
# Super Fast masking with numpy
mask = (wav > wav_min) & (wav < wav_max)
wav_sel = wav[mask]
flux_sel = flux[mask]
else:
raise TypeError("Unsupported input wav type")
return [wav_sel, flux_sel] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wav_selector(wav, flux, wav_min, wav_max, verbose=False):\n if isinstance(wav, list): # if passed lists\n wav_sel = [wav_val for wav_val in wav if (wav_min < wav_val < wav_max)]\n flux_sel = [flux_val for wav_val, flux_val in zip(wav,flux) if (wav_min < wav_val < wav_max)]\n elif isinstance(wav, np.ndarray):\n # Super Fast masking with numpy\n mask = (wav > wav_min) & (wav < wav_max)\n if verbose:\n print(\"mask=\", mask)\n print(\"len(mask)\", len(mask))\n print(\"wav\", wav)\n print(\"flux\", flux)\n wav_sel = wav[mask]\n flux_sel = flux[mask]\n else:\n raise TypeError(\"Unsupported input wav type\")\n return [wav_sel, flux_sel]",
"def GetSpectraFromIndexList(all_wl,all_spectra,idx_list):\n NBSPEC=len(all_spectra)\n \n \n all_wl_sel=[]\n all_spectra_sel=[]\n \n for idx in np.arange(0,NBSPEC):\n if idx in idx_list:\n all_wl_sel.append(all_wl[idx])\n all_spectra_sel.append(all_spectra[idx])\n return all_wl_sel,all_spectra_sel",
"def _choose_wavelength_slice(self, offset):\n if 'WAVE' not in self.axes_wcs.wcs.ctype:\n raise cu.CubeError(2, \"Spectral dimension not present\")\n if self.data.ndim == 4:\n raise cu.CubeError(4, \"Can only work with 3D cubes\")\n\n axis = -2 if self.axes_wcs.wcs.ctype[0] in ['TIME', 'UTC'] else -1\n arr = None\n length = self.data.shape[axis]\n if isinstance(offset, int) and offset >= 0 and offset < length:\n arr = self.data.take(offset, axis=axis)\n\n if isinstance(offset, u.Quantity):\n delta = self.axes_wcs.wcs.cdelt[-1 - axis] * u.m\n wloffset = offset.to(u.m) / delta\n wloffset = int(wloffset)\n if wloffset >= 0 and wloffset < self.data.shape[axis]:\n arr = self.data.take(wloffset, axis=axis)\n\n return arr",
"def wav_reader(directory):\n wav_list = find_wavs(directory)\n res_list = []\n\n for wav in wav_list:\n temp_list = [wav]\n\n if re.match(r'.*target1.*\\.wav$', wav):\n temp_list.append(True)\n else:\n temp_list.append(False)\n\n res_list.append(tuple(temp_list))\n\n return res_list",
"def torch_calc_spectrograms(waves, window_lengths, spectral_diffs=(0, 1),\r\n window_name='hann', use_mel_scale=True,\r\n proj_method='matmul', num_spec_bins=256,\r\n random_crop=True):\r\n # waves = [tf.squeeze(w, axis=-1) for w in waves]\r\n waves = [torch.squeeze(w, dim=-1) for w in waves]\r\n\r\n if window_name == 'hann':\r\n # windows = [tf.reshape(tf.signal.hann_window(wl, periodic=False), [1, 1, -1])\r\n # for wl in window_lengths]\r\n windows = [torch.reshape(torch.from_numpy(W.hann(wl)), [1, 1, -1])\r\n for wl in window_lengths]\r\n elif window_name is None:\r\n windows = [None] * len(window_lengths)\r\n else:\r\n raise ValueError('Unknown window function (%s).' % window_name)\r\n\r\n spec_len_wave = []\r\n for d in spectral_diffs:\r\n for length, window in zip(window_lengths, windows):\r\n\r\n wave_crops = waves\r\n for _ in range(d):\r\n wave_crops = [w[:, 1:] - w[:, :-1] for w in wave_crops]\r\n\r\n if random_crop:\r\n # wave_crops = aligned_random_crop(wave_crops, length)\r\n wave_crops = torch_aligned_random_crop(wave_crops, length)\r\n\r\n # frames = [tf.signal.frame(wc, length, length // 2) for wc in wave_crops]\r\n frames = [torch.tensor(librosa.util.frame(wc.numpy(),length,length//2)) for wc in wave_crops]\r\n # TODO: Whether this method is feasible (in the gradient part) remains to be verified\r\n if window is not None:\r\n frames = [f * window for f in frames]\r\n\r\n if proj_method == 'fft':\r\n # ffts = [tf.signal.rfft(f)[:, :, 1:] for f in frames]\r\n ffts = [torch.rfft(f,signal_ndim=1)[:, :, 1:] for f in frames]\r\n elif proj_method == 'matmul':\r\n # mat = get_spectral_matrix(length, num_spec_bins=num_spec_bins,\r\n # use_mel_scale=use_mel_scale)\r\n # ffts = [matmul_real_with_complex(f, mat) for f in frames]\r\n mat = torch_get_spectral_matrix(length, num_spec_bins=num_spec_bins,\r\n use_mel_scale=use_mel_scale)\r\n ffts = [torch_matmul_real_with_complex(f, mat) for f in frames]\r\n\r\n #sq_mag = lambda x: tf.square(tf.math.real(x)) + tf.square(tf.math.imag(x))\r\n sq_mag = lambda x: (torch.view_as_real(x)[:,0])**2 + (torch.view_as_real(x)[:,1])**2\r\n # torch.view_as_real() opreation need the last release edition of Pytorch 1.6.0\r\n specs_sq = [sq_mag(f) for f in ffts]\r\n\r\n if use_mel_scale and proj_method == 'fft':\r\n sample_rate = 24000\r\n upper_edge_hertz = sample_rate / 2.\r\n lower_edge_hertz = sample_rate / length\r\n # lin_to_mel = tf.signal.linear_to_mel_weight_matrix(\r\n # num_mel_bins=num_spec_bins,\r\n # num_spectrogram_bins=length // 2 + 1,\r\n # sample_rate=sample_rate,\r\n # lower_edge_hertz=lower_edge_hertz,\r\n # upper_edge_hertz=upper_edge_hertz,\r\n # dtype=tf.dtypes.float32)[1:]\r\n # specs_sq = [tf.matmul(s, lin_to_mel) for s in specs_sq]\r\n lin_to_mel = torch_build_mel_basis(\r\n num_mel_bins=num_spec_bins,\r\n num_spectrogram_bins=length,\r\n sample_rate=sample_rate,\r\n lower_edge_hertz=lower_edge_hertz,\r\n upper_edge_hertz=upper_edge_hertz,\r\n dtype=torch.float32)\r\n # TODO: I use librosa to build the mel filters here to instead, and i'm not sure whether this method works or not\r\n specs_sq = [torch.matmul(s, lin_to_mel) for s in specs_sq]\r\n\r\n # specs = [tf.sqrt(s+EPSILON) for s in specs_sq]\r\n specs = [torch.sqrt(s+EPSILON) for s in specs_sq]\r\n\r\n spec_len_wave.append(specs)\r\n\r\n spec_wave_len = zip(*spec_len_wave)\r\n return spec_wave_len",
"def process_audio_multiprocess(file_paths_arr,\n filt_type, filt_cutoff_freq, filt_order,\n trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength,\n SAMPLE_RATE=48000, MIN_SAMPLE_RATE=15999, BIT_DEPTH=2,\n ignore_dirs=[\"Noise samples\",\"_Noisy_\",\"_Very Noisy_\"], skip_existing=False,\n in_ext_=None, out_ext=\".wav\", use_tqdm=True, dump_sample_rates=True\n ):\n import soundfile as sf\n import scipy\n from scipy import signal\n \n if dump_sample_rates:\n sample_rates = {} # array of dicts. e.g: [{path 0: sample_rate 0}, {path 1: sample_rate 1}, {path 2: sample_rate 2}, ...]\n \n skip = 0\n prev_sr = 0\n iterator = tqdm(file_paths_arr, smoothing=0.0) if use_tqdm else file_paths_arr\n for file_path in iterator: # recursive directory search\n in_ext = in_ext_ if (in_ext_ is not None) else os.path.splitext(os.path.split(file_path)[-1])[-1] # get ext from file_path or use override.\n out_path = file_path.replace(in_ext,out_ext)\n if skip_existing and os.path.exists(out_path):\n continue\n if any([filter_dir in file_path for filter_dir in ignore_dirs]):\n continue\n \n # VCTK cleanup\n #if file_path.endswith(f\"_mic1{in_ext}\"):\n # os.rename(file_path, file_path.replace(f\"_mic1{in_ext}\",in_ext))\n #if file_path.endswith(f\"_mic2{in_ext}\"):\n # continue\n try:\n native_sound, native_SR = sf.read(file_path, always_2d=True)\n except RuntimeError as ex:\n print(f'\"{os.path.split(file_path)[-1]}\" failed to load and has been deleted.\\nDELETED PATH: \"{file_path}\"')\n os.unlink(file_path)\n #raise RuntimeError(ex)\n native_sound = native_sound[:,0]# take first channel (either mono or left audio channel)\n native_sound = np.asfortranarray(native_sound).astype('float64') # and ensure the audio is contiguous\n \n if native_SR < MIN_SAMPLE_RATE: # skip any files with native_SR below the minimum\n continue\n if native_SR != SAMPLE_RATE: # ensure all audio is same Sample Rate\n try:\n sound = librosa.core.resample(native_sound, native_SR, SAMPLE_RATE)\n except ValueError as ex:\n print(ex, file_path, native_SR, len(native_sound), sep=\"\\n\")\n raise ValueError(ex)\n else:\n sound = native_sound\n \n if dump_sample_rates:\n sample_rates[os.path.abspath(out_path)] = native_SR\n \n # 24 bit -> 16 bit, 32 bit -> 16 bit\n if max(np.amax(native_sound), -np.amin(native_sound)) > (2**23): # if samples exceed values possible at 24 bit\n sound = (sound / 2**(31-15))#.astype('int16') # change bit depth from 32 bit to 16 bit\n elif max(np.amax(native_sound), -np.amin(native_sound)) > (2**15): # if samples exceed values possible at 16 bit\n sound = (sound / 2**(23-15))#.astype('int16') # change bit depth from 24 bit to 16 bit\n \n # apply audio filters\n for type_, freq_, order_ in zip(filt_type, filt_cutoff_freq, filt_order): # eg[ ['lp'], [40], [10] ] # i.e [type, freq, strength]\n sos = signal.butter(order_, freq_, type_, fs=SAMPLE_RATE, output='sos') # calcuate filter somethings\n sound = signal.sosfilt(sos, sound) # apply filter\n \n # apply audio trimming\n for i, (margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_, preemphasis_strength_) in enumerate(zip(trim_margin_left, trim_margin_right, trim_top_db, trim_window_length, trim_hop_length, trim_ref, trim_preemphasis_strength)):\n if preemphasis_strength_:\n sound_filt = librosa.effects.preemphasis(sound, coef=preemphasis_strength_)\n _, index = librosa.effects.trim(sound_filt, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n else:\n _, index = librosa.effects.trim(sound, top_db=top_db_, frame_length=window_length_, hop_length=hop_length_, ref=ref_) # gonna be a little messed up for different sampling rates\n try:\n sound = sound[int(max(index[0]-margin_left_, 0)):int(index[1]+margin_right_)]\n except TypeError:\n print(f'Slice Left:\\n{max(index[0]-margin_left_, 0)}\\nSlice Right:\\n{index[1]+margin_right_}')\n assert len(sound), f\"Audio trimmed to 0 length by pass {i+1}\\nconfig = {[margin_left_, margin_right_, top_db_, window_length_, hop_length_, ref_]}\\nFile_Path = '{file_path}'\"\n \n # write updated audio to file\n if os.path.exists(out_path):\n os.unlink(out_path) # using unlink incase the out_path object is a symlink\n sf.write(out_path, sound, SAMPLE_RATE)\n \n if dump_sample_rates:\n return sample_rates",
"def make_wavetables(kernel: GPy.kern.Kern, n: int = 17, waveshaping: bool = False) -> List[np.ndarray]:\n wavetables = []\n\n if not waveshaping:\n cholesky = make_cov_cholesky(kernel)\n else:\n cholesky = make_cov_cholesky_waveshaping(kernel)\n for _ in range(n):\n wavetable = fast_normal_from_cholesky(cholesky)[0]\n wavetables.append(wavetable[:-1])\n\n return wavetables",
"def select(arrays, index):\n if arrays is None or any(i is None for i in arrays):\n return arrays\n return tuple(i.ravel()[index] for i in arrays)",
"def extract_features(audio_filename, args):\n #print(\"Extract_features\")\n spec_type = args['spec_type']\n\n if spec_type == 'cqt':\n bin_multiple = args['bin_multiple']\n max_midi = args['max_midi']\n min_midi = args['min_midi']\n note_range = max_midi - min_midi + 1\n sr = args['sr']\n hop_length = args['hop_length']\n window_size = args['window_size']\n\n bins_per_octave = 12 * bin_multiple # should be a multiple of 12\n n_bins = note_range * bin_multiple\n\n # down-sample,mono-channel\n y, _ = librosa.load(audio_filename, sr)\n # y: an np.ndarray[ shape=(n,) ] giving the audio time series. librosa.load automatically downsamples to the\n # required sample rate sr\n # doku on librosa.cqt:\n # https://librosa.github.io/librosa/generated/librosa.core.cqt.html?highlight=cqt#librosa.core.cqts\n S = librosa.cqt(y, fmin=librosa.midi_to_hz(min_midi), sr=sr, hop_length=hop_length,\n bins_per_octave=bins_per_octave, n_bins=n_bins)\n S = S.T\n S = np.abs(S)\n min_db = np.min(S)\n print(np.min(S), np.max(S), np.mean(S))\n S = np.pad(S, ((window_size // 2, window_size // 2), (0, 0)), 'constant', constant_values=min_db)\n\n windows = []\n\n # IMPORTANT NOTE:\n # Since we pad the the spectrogram frame,\n # the onset frames are actually `offset` frames.\n # To obtain a window of the center frame at each true index, we take a slice from i to i+window_size\n # starting at frame 0 of the padded spectrogram\n for i in range(S.shape[0] - window_size + 1):\n w = S[i:i + window_size, :]\n windows.append(w)\n\n # print inputs\n x = np.array(windows)\n return x\n\n else:\n print(\"WARNING: feature type \" + spec_type + \" not implemented.\")\n return 0",
"def dtw_list_store(source, target, source_list, target_list):\n\n dtw_source = []\n dtw_target = []\n\n fs, source = scipy.io.wavfile.read(source)\n fs, target = scipy.io.wavfile.read(target)\n\n\n #source = psf.mfcc(source, 16000)\n #target = psf.mfcc(target, 16000)\n\n source, energy = psf.fbank(source, 16000)\n target, energy = psf.fbank(target, 16000)\n\n distance, path = fastdtw(source, target, dist=euclidean)\n\n for vertex in path:\n dtw_source.append(source[vertex[0],:])\n dtw_target.append(target[vertex[1],:])\n\n dtw_source = np.array(dtw_source)\n dtw_target = np.array(dtw_target)\n\n\n source_list.append(dtw_source)\n target_list.append(dtw_target)",
"def morse_to_audio(words, playsound=None, name_file=\"output\\\\code_to_audio_output.wav\"):\n dot = wave.open(\"kropka.wav\", 'rb')\n dash = wave.open(\"kreska.wav\", 'rb')\n\n rate_dot = dot.getframerate()\n\n rate_dash = dash.getframerate()\n\n data_dot = dot.readframes(-1)\n data_dash = dash.readframes(-1)\n data_dot = np.fromstring(data_dot, 'Int16')\n data_dash = np.fromstring(data_dash, 'Int16')\n\n l2=len(data_dot)\n l1=len(data_dash)\n\n output=[]\n\n for element in words:\n # print(element)\n for i in range(0, len(element)):\n # print(element[i])\n if element[i] == '1':\n # playsound(\"kropka.wav\")\n output.extend(data_dot)\n\n if element[i] == '0':\n # playsound(\"kreska.wav\")\n output.extend(data_dash)\n if element[i] == ' ':\n output.extend(np.zeros(int(len(data_dash)))*3)\n if i != len(element) - 1:\n # time.sleep(dl_kropka)\n output.extend(np.zeros(int(len(data_dot))))\n else:\n continue\n # time.sleep(dl_kreska)\n output.extend(np.zeros(int(len(data_dash))))\n\n # print(output)\n\n wynik=np.asarray(output)\n\n wynik=np.array(wynik).astype('int16')\n\n wav.write(name_file, rate_dash, wynik)\n\n #plik sie nie odtwarza w windowsie ale w audacity jest już wyraźnym szumem XD\n\n dot.close()\n dash.close()",
"def get_data(self, wave):\n data = np.array([lfilter(self.b, self.a, wave[i]) for i in range(self.n_channels)])\n self.min_threshold = np.min(data)\n self.max_threshold = np.max(data)\n return data",
"def _pick_elements(self,regexp_ind,array_list):\r\n new_array_list = [] #New list with elements matching regexp_ind\r\n array_indices = [] #Indices that matches the arrays in new_array_list and array_list\r\n\r\n array_index = 0\r\n for array in array_list:\r\n _new = []\r\n for ai in array:\r\n if ai in regexp_ind:\r\n _new.append(ai)\r\n if len(_new):\r\n new_array_list.append(np.array(_new))\r\n array_indices.append(array_index)\r\n array_index += 1\r\n return new_array_list, array_indices",
"def load_all_audios(\n df: pd.DataFrame,\n *,\n target_sample_rate: int = None,\n mono: bool = constants.STEREO_TO_MONO_DEFAULT.value\n) -> Tuple[List[int], List[np.ndarray]]:\n file_list = list(df[\"audio_file_path\"])\n\n # audios is a list of (rate: int, data: np.ndarray)\n audios = load_multiple_wav(file_list)\n rate = [i[0] for i in audios]\n data = [i[1] for i in audios]\n\n # Convert to mono if needed\n if mono:\n data = p_map(stereo_to_mono, data, desc=\"Converting to mono...\")\n\n # Resample if needed\n if target_sample_rate is not None:\n data = p_map(resample, data, rate, [\n target_sample_rate for _ in data], desc=\"Resampling...\")\n rate = [target_sample_rate for _ in data]\n\n return rate, data",
"def arrays(self, select_output):\n pass",
"def analyzeWAV(inputFile):\n data, fs, nbits = audiolab.wavread(inputFile)\n samplingRate = fs\n return [data, samplingRate]",
"def my_get_paths_to_wavs(self, path):\n if self.label_type == 'original':\n # Just get all files\n return get_paths_to_wavs(path)\n elif self.label_type == 'four':\n # Filter out emotions Excitement and Frustration, leaving only\n # anger, happiness, neutral, sadness.\n new_paths_to_wavs = []\n paths_to_wavs, path_to_noise = get_paths_to_wavs(path)\n for file in paths_to_wavs:\n file_name = os.path.split(file)[1]\n emotion_label = self.get_emotion_label(file_name)\n if emotion_label in ('ang', 'hap', 'neu', 'sad'):\n new_paths_to_wavs.append(file)\n return new_paths_to_wavs, path_to_noise\n else:\n raise ValueError('Unknown label type! Should be either \"original\" for all samples, or \"four\" for anger, '\n 'happiness, neutral, sadness')",
"def pick_samples_1D(arr, indices, dtype = np.float32):\n\n n_samples = len(indices)\n\n arr_samples = np.zeros((n_samples), dtype = dtype)\n\n for i, index in enumerate(indices):\n arr_samples[i] = arr[index]\n\n return arr_samples",
"def wand_features(data, signals=EMG_SIGNALS, frame_len=EMG_FRAME_LEN,\n frame_shift=EMG_SHIFT_LEN, k=10):\n\n # samples is n_signals x n_timesteps\n samples = np.array(data[signals].T)\n phones = compute_subphones(data[\"phone\"])\n\n n_signals, n_timesteps = samples.shape[0], samples.shape[1]\n\n # Create the 17-point weighted moving average filter shown in Figure 4.2.\n ramp_filter = np.linspace(0,0.1,num=9)\n ma_filter = np.concatenate((ramp_filter[:-1], ramp_filter[::-1]))\n assert len(ma_filter) == 17\n \n n_frames = int(n_timesteps / frame_shift)\n n_feats = 5\n features = np.zeros((n_signals, n_feats, n_frames))\n frame_phones = []\n\n for i in range(n_signals):\n # Mean normalize\n x = samples[i] - np.mean(samples[i])\n\n # Apply moving average filter to compute low frequency signal w\n w = np.convolve(x, ma_filter, mode=\"same\")\n\n # Compute high frequency signal p\n p = x - w\n\n # Compute rectified signal r\n r = abs(p)\n\n # Ignore any frames that are incomplete (i.e. if n_timesteps is 2500 but \n # n_frames is 416 and frame_shift is 6, count up to 416*6 = 2496 rather\n # than 2500 timesteps, so we don't end up with a unit in the features that\n # is made up of an incomplete set of samples)\n for frame_id, t in enumerate(range(0, n_frames*frame_shift, frame_shift)):\n w_frame = w[t:t+frame_len]\n p_frame = p[t:t+frame_len]\n r_frame = r[t:t+frame_len]\n M_w = np.mean(w_frame) # Frame-based mean of w\n P_w = np.mean(w_frame * w_frame) # Frame-based power of w\n P_r = np.mean(r_frame * r_frame) # Frame-based power of r\n M_r = np.mean(r_frame) # Frame-based mean of r\n\n # Zero-crossing rate of p\n z_p = len(np.where(np.diff(np.signbit(p_frame)))[0]) / len(p_frame)\n\n features[i, :, frame_id] = np.array([M_w, P_w, P_r, z_p, M_r])\n mode_phone = mode(phones[t:t+frame_len])\n frame_phones.append(mode_phone)\n\n features = np.reshape(features, [-1, n_frames])\n\n features, labels = stack_context(features, k=k, labels=frame_phones)\n\n return features, labels",
"def get_trimmed_features(words, num_recordings, base_path=\"\", energy_threshold=0.001):\n\n features_by_word = []\n for i in range(len(words)):\n indexes = []\n feature_array = []\n for j in range(1, num_recordings[i] + 1):\n # Determine the path\n path = base_path + words[i] + str(j) + \".wav\"\n (rate, data) = get_sig(path)\n # features is all the audio features for a given file\n features = get_st_features(data, rate)[0]\n # features[1] is total frame energies\n # energy threshold of 0.001 is arbitrary\n indexes.append(relevant_indexes(features[1], energy_threshold))\n # Add features for this specific audio file to the feature array for this word\n feature_array.append(features)\n # Finds the minimum index of all start indexes\n min_index = sorted(indexes, key=lambda x: x[0])[0][0]\n # Finds the max index of all end indexes\n max_index = sorted(indexes, key=lambda x: x[1])[::-1][0][1]\n # Debug print statements commented out\n # print(\"min, max index for word\", words[i])\n # print(min_index, max_index)\n # Only take the frames between min index and max index for each sample word\n # Note: Potential for a bug; if maxIndex is outside the length of its frame array\n # To fix, need to pad the shorter recordings with extra data\n features_by_word.append([x[0:34, min_index:max_index].transpose() for x in feature_array])\n # print(numpy.shape(features_by_word[i]))\n # features_by_word is an array of len(words) cells\n # Each cell has num_recordings[i] elements corresponding to the number of recordings of each word words[i]\n # Each recording has the same number of frames for a given word, as determined by minIndex and maxIndex\n # for a given word.\n # Finally, each frame contains the 34 features from that frame's raw data samples\n return features_by_word",
"def get_1d_features(waveforms):\n durations = []\n PTratio= []\n repolarizationslope= []\n recoveryslope = []\n for i in range(len(waveforms)): \n waveform=waveforms[i,:] \n durations.append(get_waveform_duration(waveform))\n PTratio.append(get_waveform_PTratio(waveform))\n repolarizationslope.append(get_waveform_repolarizationslope(waveform))\n recoveryslope.append(get_waveform_recoveryslope(waveform))\n return np.array(durations), np.array(PTratio), np.array(repolarizationslope), np.array(recoveryslope)",
"def karplus_strong(wavetable,nSamples):\n samples = []\n current_sample = 0\n previous_value = 0\n while len(samples) < nSamples:\n wavetable[current_sample] = 0.5 * (wavetable[current_sample] + previous_value)\n samples.append(wavetable[current_sample])\n previous_value = samples[-1]\n current_sample += 1\n current_sample = current_sample % wavetable.size\n return np.array(samples)",
"def read_wav_data(timestamps, wavfile, snapint=[-0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3], fft_size=1024):\n sig, samplerate = librosa.load(wavfile, sr=None, mono=True)\n data = list()\n\n # normalize sound wave\n # sig = sig / np.sqrt(np.mean(sig**2, axis=0));\n # sig = sig / np.max(np.max(np.abs(sig), axis=0));\n sig = sig / np.max(np.abs(sig))\n\n # calc a length array\n tmpts = np.array(timestamps)\n timestamp_interval = tmpts[1:] - tmpts[:-1]\n timestamp_interval = np.append(timestamp_interval, timestamp_interval[-1])\n\n for sz in snapint:\n data_r = np.array([get_wav_data_at(max(0, min(len(sig) - fft_size, coord + timestamp_interval[i] * sz)),\n sig, samplerate, fft_size=fft_size, freq_high=samplerate//4) for i, coord in enumerate(timestamps)])\n data.append(data_r)\n\n raw_data = np.array(data)\n norm_data = np.tile(np.expand_dims(\n np.mean(raw_data, axis=1), 1), (1, raw_data.shape[1], 1, 1))\n std_data = np.tile(np.expand_dims(\n np.std(raw_data, axis=1), 1), (1, raw_data.shape[1], 1, 1))\n return (raw_data - norm_data) / std_data",
"def big_sweep(all_kernels: List[GPy.kern.Kern], path: str, ls_subdivisions: int = 16, n_wavetables: int = 7) -> None:\n out_long = WavFile(os.path.join(path, 'c.wav'))\n\n delta_t = 1.\n ls_start = 0.01\n ls_end = np.pi\n\n score = []\n time = 0.\n l_vals = np.geomspace(ls_start, ls_end, ls_subdivisions).tolist()\n\n n_combinations = 1000\n for _ in range(n_combinations):\n k1_str = random.choice(all_kernels)\n while True:\n k2_str = random.choice(all_kernels)\n if k2_str != k1_str:\n break\n l1 = random.choice(l_vals)\n l2 = random.choice(l_vals)\n l1_idx = l_vals.index(l1)\n l2_idx = l_vals.index(l2)\n\n k1 = kernel_for_string(k1_str, lengthscale=l1)\n k2 = kernel_for_string(k2_str, lengthscale=l2)\n operator = random.choice(['plus', 'times'])\n if operator == 'plus':\n kernel = k1 + k2\n else:\n kernel = k1 * k2\n\n waveshaping = random.choice([True, False])\n\n synth = GPSynth(kernel, out_rt=None, out_wav=out_long, n_wavetables=n_wavetables, waveshaping=waveshaping)\n print(f'waveshaping={waveshaping}', k1_str, l1, operator, k2_str, l2)\n for n_idx in range(1): # only one note to c.wav otherwise the file becomes too big for the web.\n score.append({\n 'kernel_1': k1_str,\n 'operator': 'plus',\n 'kernel_2': k2_str,\n 'lengthscale_1': l1,\n 'lengthscale_1_idx': l1_idx,\n 'lengthscale_2': l2,\n 'lengthscale_2_idx': l2_idx,\n 'waveshaping': waveshaping,\n 'time': time,\n 'note': n_idx\n })\n synth.note(60, delta_t)\n time += delta_t\n\n waveshaping_str = 'waveshaping_' if waveshaping else ''\n prefix = waveshaping_str + k1_str + f'_l{l1_idx:03d}(plus)' + k2_str + f'_l{l2_idx:03d}_n'\n synth.save_wavetables(os.path.join(path, 'samples'), prefix)\n\n for waveshaping in [False, True]:\n for kernel_str in all_kernels:\n ls_start = 0.01\n ls_end = np.pi\n l_vals = np.geomspace(ls_start, ls_end, ls_subdivisions)\n for l_idx, lengthscale in enumerate(l_vals):\n k = kernel_for_string(kernel_str, lengthscale=lengthscale)\n synth = GPSynth(k, out_rt=None, out_wav=out_long, n_wavetables=n_wavetables, waveshaping=waveshaping)\n print(f'waveshaping={waveshaping}', kernel_str, lengthscale, f'waveshaping = {waveshaping}')\n for n_idx in range(1): # only one note to c.wav otherwise the file becomes too big for the web.\n score.append({\n 'kernel_1': kernel_str,\n 'operator': '',\n 'kernel_2': '',\n 'lengthscale_1': lengthscale,\n 'lengthscale_1_idx': l_idx,\n 'lengthscale_2': -1,\n 'lengthscale_2_idx': -1,\n 'waveshaping': waveshaping,\n 'time': time,\n 'note': n_idx\n })\n synth.note(60, delta_t)\n time += delta_t\n\n waveshaping_str = 'waveshaping_' if waveshaping else ''\n prefix = waveshaping_str + kernel_str + f'_l{l_idx:03d}_n'\n synth.save_wavetables(os.path.join(path, 'samples'), prefix)\n\n with open(os.path.join(path, 'score.json'), 'w') as f:\n json.dump(score, f, indent=4)",
"def get_spectral_response(wavelengths_arr, stack):\n\n resolution = 1\n for i, re_index in enumerate(stack.index):\n step_size = stack.thickness.sum() / 2 ** 17\n z0 = np.linspace(0, stack.thickness[i], round(stack.thickness[i] / step_size))\n resolution += len(z0)\n\n electric_tot_te = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n electric_tot_tm = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n reflectivity_te = np.zeros(len(wavelengths_arr), dtype=complex)\n reflectivity_tm = np.zeros(len(wavelengths_arr), dtype=complex)\n transmission_te = np.zeros(len(wavelengths_arr), dtype=complex)\n transmission_tm = np.zeros(len(wavelengths_arr), dtype=complex)\n index_tot = np.zeros([resolution, len(wavelengths_arr)], dtype=complex)\n theta_tot = np.zeros([len(stack.index) + 1, wavelengths_arr.size], dtype=complex)\n\n a0 = 1 # Initial amplitude of electric field going toward the coating\n b0 = 0 # Initial amplitude of electric field going back the coating (if 0, no counter propagating light)\n theta = 0 # angle of the beam with respect to the coating\n\n for i, lam in enumerate(wavelengths_arr):\n # print a progressbar in the console\n print_progressbar(i, len(wavelengths_arr), suffix = '%')\n electric_tot_te[:, i], electric_tot_tm[:, i], reflectivity_te[i], reflectivity_tm[i], transmission_te[i], \\\n transmission_tm[i], index_tot, L, theta_tot = transfer_matrix_method(stack, a0, b0, lam, theta)\n return reflectivity_te, transmission_te, 1 - (reflectivity_te + transmission_te)",
"def torch_sample(array, indexes, desired_shape):\n torch_arr = torch.tensor(array, dtype=torch.float32)\n indexed = torch_arr[[indexes[0], indexes[1]]]\n return indexed.reshape(desired_shape)",
"def get_table_arrays(self):\n# ftable = np.asarray(self.filter_table)\n ftable = self.filter_table\n wavelength = []\n transmission = []\n for item in ftable:\n wavelength.append(item[0])\n transmission.append(item[1])\n wavelength = np.asarray(wavelength)\n transmission = np.asarray(transmission)\n return (wavelength, transmission)",
"def array2(self):\n print \"array2\"\n msgbox(whoami())\n #research\n inputLabelID = self.__needleLabelSelector.currentNode().GetID()\n labelnode=slicer.mrmlScene.GetNodeByID(inputLabelID)\n i = labelnode.GetImageData()\n shape = list(i.GetDimensions())\n shape.reverse()\n a = vtk.util.numpy_support.vtk_to_numpy(i.GetPointData().GetScalars()).reshape(shape)\n labels=[]\n val=[[0,0,0] for i in range(a.max()+1)]\n for i in xrange(2,a.max()+1):\n w =numpy.transpose(numpy.where(a==i))\n # labels.append(w.mean(axis=0))\n val[i]=[0,0,0]\n val[i][0]=w[int(round(w.shape[0]/2))][2]\n val[i][1]=w[int(round(w.shape[0]/2))][1]\n val[i][2]=w[int(round(w.shape[0]/2))][0]\n if val[i] not in self.previousValues:\n labels.append(val[i])\n self.previousValues.append(val[i])\n return labels",
"def preprocess_single_chords_list(self, window_size=5, flattened_window=True, hop_length=4410, to_skip=5, norm_to_C=False, spectrogram_generator=log_mel_spectrogram, skip_coef=1) -> tuple:\n prep_data = []\n prep_targets = []\n k = 0\n # Iterate over all audio files\n for audio, chords, keys in zip(self.DATA, self.CHORDS, self.KEYS):\n print(k)\n k = k+1\n # Get log mel spectrogram\n spectrogram = IsophonicsDataset.preprocess_audio(waveform=audio.WAVEFORM, sample_rate=audio.SAMPLE_RATE, spectrogram_generator=spectrogram_generator, nfft=self.NFFT, hop_length=hop_length, norm_to_C=norm_to_C, key=keys.get_first_key())\n spectrogram = np.array(spectrogram)\n spec_length, num_samples = spectrogram.shape\n\n # Collect data for each spectrogram sample\n j = 0 # labels index\n for i in [index for index in range(num_samples) if index%to_skip==0]:\n # Get data window with zero margin\n n_pre_zeros, window_indices, n_post_zeros = IsophonicsDataset.get_flatten_indices(i, num_samples, skip_coef, window_size)\n if flattened_window:\n prep_data.append(\n np.concatenate((\n np.zeros((n_pre_zeros, spec_length)),\n np.array(spectrogram[:, window_indices]).swapaxes(0,1),\n np.zeros((n_post_zeros, spec_length))\n ), axis = 0).flatten()\n )\n else:\n prep_data.append(\n np.concatenate((\n np.zeros((n_pre_zeros, spec_length)),\n np.array(spectrogram[:, window_indices]).swapaxes(0,1),\n np.zeros((n_post_zeros, spec_length))\n ), axis = 0)\n )\n\n\n # Get label\n second = float(i)/(float(self.SAMPLE_RATE) / float(hop_length))\n while j < len(chords.START) and second > chords.START[j] :\n j = j + 1\n if j == len(chords.START):\n prep_targets.append(Dataset.get_integered_chord(\"N\", norm_to_C, keys.get_first_key()))\n else:\n prep_targets.append(Dataset.get_integered_chord(chords.CHORD[j], norm_to_C, keys.get_first_key()))\n\n print(\"[INFO] The Isophonics Dataset was successfully preprocessed.\")\n return np.array(prep_data), np.array(prep_targets)",
"def get_indices(waves):\n prob_ = np.abs(waves)**2\n # batch\n prob = [np.sum(prob_[i:i+4,:], axis=0) for i in range(0, len(waves[:,0]), 4)]\n prob = np.asarray(prob)\n prob_tot = np.sum(prob, axis=0)\n \n # cutoff\n length = np.size(prob[:,0])\n len10 = int(length/10)\n flags = np.zeros((prob.shape[1]), dtype=int)\n # hinges\n # 50% within 10% of corners\n\n # surface\n # 50% within 10% of surfaces\n # not already labelled hinges\n prob_left = np.sum(prob[0:len10,:], axis=0)\n frac_left = prob_left/prob_tot\n\n prob_right = np.sum(prob[length-len10:length,:], axis=0)\n frac_right = np.divide(prob_right, prob_tot)\n\n for i in range(len(flags)):\n if frac_left[i]>0.5 or frac_right[i]>0.5:\n flags[i] = 1\n \n indices = [i for i, x in enumerate(flags) if x == 1]\n indices0 = [i for i, x in enumerate(flags) if x == 0]\n \n return indices, indices0"
] | [
"0.7074208",
"0.54926926",
"0.543694",
"0.5338126",
"0.5282017",
"0.5270855",
"0.51453614",
"0.5138771",
"0.5112849",
"0.5108346",
"0.5105792",
"0.5059946",
"0.5033493",
"0.5002626",
"0.49823514",
"0.49728918",
"0.4955809",
"0.4946704",
"0.49403378",
"0.49394882",
"0.48933354",
"0.48768497",
"0.48729745",
"0.48646903",
"0.4860009",
"0.4858704",
"0.48454726",
"0.48243514",
"0.48228773",
"0.48178145"
] | 0.70528 | 1 |
Downloads a FASTA file for the proteome by organism ID | def get_fasta_by_id(proteome_id, output_file):
taxid_pattern = re.compile('^\d{1,7}$')
# if not taxid_pattern.match(proteome_id): # fetch file from Uniprot
# raise ValueError(str(proteome_id) + ' is not a valid proteome identifier')
url = UNIPROT_BASE_URL + proteome_id
attempts = 0
while attempts < 3:
try:
response = requests.get(url)
if response.status_code > 399 or response.status_code < 200:
raise requests.HTTPError(response.status_code + ': ' + response.content)
content = response.content
if len(content) < 10:
raise FastaNotFoundError()
with open(output_file, 'w') as f:
f.write(content)
break
except requests.HTTPError as e:
attempts += 1
if attempts >= 3:
raise FastaNotFoundError('Failed to download fasta: ' + response.status_code + ' response.content')
return output_file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def seq_download(name, organism=\"Homo sapiens\", gaba=False):\n\n subunits = {\n \"Alpha-1\": \"Gabra1\",\n \"Alpha-2\": \"Gabra2\",\n \"Alpha-3\": \"Gabra3\",\n \"Alpha-4\": \"Gabra4\",\n \"Alpha-5\": \"Gabra5\",\n \"Alpha-6\": \"Gabra6\",\n \"Beta-1\": \"Gabrb1\",\n \"Beta-2\": \"Gabrb2\",\n \"Beta-3\": \"Gabrb3\",\n \"Gamma-1\": \"Gabrg1\",\n \"Gamma-2\": \"Gabrg2\",\n \"Gamma-3\": \"Gabrg3\",\n \"Delta\": \"Gabrd\",\n \"Pi\": \"Gabrp\",\n \"Rho-1\": \"Gabrr1\",\n \"Rho-2\": \"Gabrr2\",\n \"Rho-3\": \"Gabrr3\",\n \"Epsilon\": \"Gabre\",\n \"Theta\": \"Gabrq\"\n }\n if gaba:\n results = search(subunits[name])\n else:\n results = search(name)\n results = results[results[\"Organism\"].str.contains(organism, na=False)]\n if len(results):\n if gaba:\n target = results[results[\"Gene names\"].str.contains(subunits[name].upper())][\"Entry\"].max()\n else:\n target = results[results[\"Gene names\"].str.contains(name)][\"Entry\"].max()\n response = urlopen(f\"https://www.uniprot.org/uniprot/{target}.fasta\").read().decode(\"utf-8\")\n with open(\"Temp_seq.fasta\", \"w\") as file:\n file.write(response)\n seq = SeqIO.read(\"Temp_seq.fasta\", \"fasta\")\n os.remove(\"Temp_seq.fasta\")\n\n return seq\n\n else:\n return -1",
"def download_proteome(proteome_id, data_dir, domain=\"Eukaryota\"):\n base = (\"ftp://ftp.uniprot.org/pub/databases/uniprot/current_release/\"\n \"knowledgebase/reference_proteomes\")\n\n url = [base, domain, proteome_id + \".fasta.gz\"]\n outfile = os.path.join(data_dir, proteome_id + \".fasta\")\n\n with closing(request.urlopen(url)) as remote_handle:\n with open(remote_handle, \"rb\") as remote_file:\n mem_file = io.BytesIO(remote_file.read())\n\n with open(outfile, \"w\") as out, gzip.open(mem_file) as gz:\n outfile.write(gz.read())\n\n return outfile",
"def fetch_as_fasta(chrom,start,end,gindex,fname):\n \n # Print the sequence in fasta format.\n header = '>%s:%s-%s' % (chrom, start, end)\n fname.write('%s\\n%s\\n' % (header, gindex[chrom][start:end]))",
"def download(dataset_csv_path='ncbi_ids.csv', save_path='../data/RefSeq'):\n\n Entrez.email = \"[email protected]\"\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n with open(dataset_csv_path, 'r') as f:\n data = csv.reader(f)\n for row in data:\n microbe_id = row[0].split('.')[0]\n if os.path.exists(os.path.join(save_path, microbe_id + '.fasta')):\n continue\n\n handle = Entrez.efetch(db=\"nucleotide\", id=microbe_id,\n rettype=\"fasta\", retmode=\"text\")\n record = SeqIO.read(handle, \"fasta\")\n handle.close()\n SeqIO.write(record, os.path.join(save_path, microbe_id + \".fasta\"),\n \"fasta\")",
"def getFasta(fileGI,fileout = \"gis.fasta\", outfmt = \"fasta\"):\n myGIs = open(fileGI).read().split()\n gilist = [\",\".join(myGIs[i:i+500]) for i in range(0,len(myGIs),500)]\n from Bio import Entrez\n import time\n fout = open(fileout,\"w\")\n Entrez.email = \"[email protected]\"\n for ele in gilist:\n handle = Entrez.efetch(db = \"protein\", id = ele, rettype = outfmt, retmode = \"text\")\n fout.write(handle.read())\n time.sleep(3)\n fout.close()",
"def get_genome_download_link(self, name, mask=\"soft\", **kwargs):\n genome = self.genomes[safe(name)]\n division, is_vertebrate = self.get_division(name)\n\n # base directory of the genome\n ftp = \"http://ftp.ensemblgenomes.org\"\n if is_vertebrate:\n ftp = \"http://ftp.ensembl.org\"\n version = self.get_version(name, kwargs.get(\"version\"))\n div_path = \"\" if is_vertebrate else f\"/{division}\"\n lwr_name = genome[\"name\"]\n ftp_directory = f\"{ftp}/pub/release-{version}{div_path}/fasta/{lwr_name}/dna\"\n\n # this assembly has its own directory\n if name == \"GRCh37\":\n ftp_directory = genome[\"genome\"].format(version)\n\n # specific fasta file\n cap_name = lwr_name.capitalize()\n asm_name = re.sub(r\"\\.p\\d+$\", \"\", safe(genome[\"assembly_name\"]))\n mask_lvl = {\"soft\": \"_sm\", \"hard\": \"_rm\", \"none\": \"\"}[mask]\n asm_lvl = \"toplevel\" if kwargs.get(\"toplevel\") else \"primary_assembly\"\n version_tag = \"\" if version > 30 else f\".{version}\"\n\n ftp_file = f\"{cap_name}.{asm_name}{version_tag}.dna{mask_lvl}.{asm_lvl}.fa.gz\"\n\n # combine\n link = f\"{ftp_directory}/{ftp_file}\"\n if check_url(link, 2):\n return link\n\n # primary assemblies do not always exist\n if asm_lvl == \"primary_assembly\":\n link = link.replace(\"primary_assembly\", \"toplevel\")\n if check_url(link, 2):\n return link\n\n raise GenomeDownloadError(\n f\"Could not download genome {name} from {self.name}.\\n\"\n \"URL is broken. Select another genome or provider.\\n\"\n f\"Broken URL: {link}\"\n )",
"def download_rna_seq(rna_seq_uuid_list, dirpath):\n data_dict = {}\n data_dict[\"ids\"] = rna_seq_uuid_list\n\n headers = {'Content-Type': 'application/json'}\n data = json.dumps(data_dict)\n\n try:\n response = requests.post('https://api.gdc.cancer.gov/data', headers=headers, data=data)\n filename = os.path.join(dirpath,response.headers[\"Content-Disposition\"].split(\"filename=\")[1])\n\n with open(filename, \"wb\") as file:\n file.write(response.content)\n file.close()\n return filename\n except:\n return None",
"def fetch_sequence(sequence_id, database='uniprot'):\n if sequence_id.startswith('UPI'):\n database = 'uniparc'\n url_template = 'http://www.uniprot.org/uniparc/{}.fasta'\n elif sequence_id.startswith('UniRef'):\n database = 'uniref'\n url_template = 'http://www.uniprot.org/uniref/{}.fasta'\n else:\n database = 'uniprot'\n url_template = 'http://www.uniprot.org/uniprot/{}.fasta'\n\n url = url_template.format(sequence_id)\n logger.debug('Downloading sequence {} from {}...'.format(sequence_id, url))\n\n r = requests.get(url)\n if r.status_code != 200:\n raise Exception(\"Failed to fetch sequence with return code: {}\".format(r.status_code))\n\n seq = Bio.SeqIO.read(io.StringIO(r.text), 'fasta')\n if database == 'uniprot':\n seq.annotations['db'], seq.id, seq.name = re.split('[\\| ]', seq.id)\n return seq",
"def get_assemblies_link_from_accession_number(term):\n ###########print('+++++++',term)\n # provide your own mail here # I wrote the email at the begining of the codes\n handle = Entrez.esearch(db=\"assembly\", term=term, retmax=\"200\")\n record = Entrez.read(handle)\n ids = record[\"IdList\"]\n links = []\n for aid in ids:\n summary = get_id_give_assembly_summary(aid) # get summary\n url = summary[\"DocumentSummarySet\"][\"DocumentSummary\"][0][\"FtpPath_RefSeq\"]\n if url == \"\":\n continue\n label = os.path.basename(url)\n # get the fasta link - change this to get other formats\n link = url + \"/\" + label + \"_genomic.fna.gz\"\n link = link.replace(\"ftp://\", \"https://\")\n links.append(link)\n \n #############print('=======', links)\n return links",
"def get_protein_fasta(uniprot_id):\r\n url = \"http://www.uniprot.org/uniprot/{}.fasta\".format(uniprot_id)\r\n string = re.split(\"\\n\",ur.urlopen(url).read().decode(),1)[1]\r\n return re.sub(\"\\n\",\"\",string)",
"def fetch_genome(reference_name):\n from utils import script_dir\n genome_list = yaml.load(open(script_dir + \"/utils/genomes.yaml\",\"r\"))\n makedir(\"genomes\")\n if reference_name not in genome_list:\n msg(\"Reference Genome not available\", \"error\")\n ftp_loc = genome_list[reference_name]\n filename = os.path.split(ftp_loc)[1]\n makedir(\"{script_dir}/genomes/{reference_name}\".format(**locals()))\n reference_loc = \"{script_dir}/genomes/{reference_name}/{filename}\".format(**locals())\n if not file_exists( reference_loc + \".sa\"):\n print(\"Downloading {filename}\".format(**locals()))\n os.system(\"curl {ftp_loc} > {script_dir}/genomes/{reference_name}/{filename}\".format(**locals()))\n # Unzip and rezip with bgzip\n if filename.endswith(\".gz\"):\n os.system(\"gunzip {reference_loc} && bgzip {reference_loc2}\".format(reference_loc=reference_loc, reference_loc2=reference_loc.replace(\".gz\",\"\")))\n print(\"Indexing {script_dir}/genomes/{reference_name}/{filename}\".format(**locals()))\n os.system(\"bwa index {script_dir}/genomes/{reference_name}/{filename}\".format(**locals()))\n else:\n msg(\"Reference Already downloaded and indexed.\", \"error\")",
"def get_assemblies(term, download=True, path='assemblies'):\n\n from Bio import Entrez\n #provide your own mail here\n Entrez.email = \"[email protected]\"\n handle = Entrez.esearch(db=\"assembly\", term=term, retmax='200')\n record = Entrez.read(handle)\n ids = record['IdList']\n print (f'found {len(ids)} ids')\n links = []\n for id in ids:\n #get summary\n summary = get_assembly_summary(id)\n #get ftp link\n url = summary['DocumentSummarySet']['DocumentSummary'][0]['FtpPath_RefSeq']\n if url == '':\n continue\n label = os.path.basename(url)\n #get the fasta link - change this to get other formats\n link = os.path.join(url,label+'_genomic.fna.gz')\n print (link)\n links.append(link)\n if download == True:\n #download link\n urllib.request.urlretrieve(link, f'{label}.fna.gz')\n return links",
"def test_ncbi_sequence_info_download(self):\n\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_ncbi_sequence_info_download\"\n params[\"input_target\"] = \"sequence\"\n params[\"taxonomy\"] = \"ncbi\"\n params[\"taxonomy_files\"] = data_dir + \"build-custom/taxdump.tar.gz\"\n\n # Simulate download from local files (nucl_gb and species_genome_size)\n params[\"ncbi_url\"] = \"file://\" + os.path.abspath(data_dir) + \"/build-custom/remote/\"\n params[\"ncbi_sequence_info\"] = [\"nucl_gb\"]\n\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")",
"def download_file(id, output=DATA_DIR, quiet=False):\n url = f\"https://drive.google.com/uc?id={id}\"\n gdown.download(url, output=output, quiet=quiet)",
"def download_proteins(proteins, data_dir, fileroot=\"uniprot\"):\n uniprot = bioservices.UniProt()\n outfile = os.path.join(data_dir, fileroot + \".fasta\")\n with open(outfile, \"w\") as fasta_out:\n lines = uniprot.retrieve(proteins, frmt=\"fasta\")\n lines = \"\".join(lines)\n fasta_out.write(lines)\n\n return outfile",
"def download_it(fw, acquisition, file_name, input_path):\n\n safe = make_file_name_safe(file_name, replace_str='_')\n\n full_path = input_path + safe\n\n if acquisition.timestamp:\n if acquisition.timezone:\n created = acquisition.original_timestamp.isoformat()\n else:\n created = acquisition.timestamp.isoformat()\n else:\n created = 'unknown'\n\n rpt = 1\n while full_path in context.gear_dict['niftis']: # then repeated name\n full_path = input_path + str(rpt) + '_' + safe\n rpt += 1\n\n if os.path.isfile(full_path):\n log.info('File exists ' + file_name + ' -> ' +\\\n full_path + ' created ' + created)\n else:\n log.info('Downloading ' + file_name + ' -> ' +\\\n full_path + ' created ' + created)\n acquisition.download_file(file_name, full_path)\n\n full_file = fw.get_acquisition_file_info(acquisition.id, file_name)\n field_strength = full_file.info.get('MagneticFieldStrength')\n\n context.gear_dict['niftis'].append(full_path)\n context.gear_dict['file_names'].append(file_name)\n context.gear_dict['createds'].append(created)\n context.gear_dict['field_strength'].append(field_strength)",
"def _download_single(url, to, id):\n if os.path.exists(to):\n error_flags[id] = 1\n return\n\n try:\n request = rq.Request(url=url, headers=forge_agent_header)\n info = rq.urlopen(request).read()\n\n except urllib.error.URLError as e:\n print(url, 'urllib error')\n error_flags[id] = 2\n return\n\n except Exception as e:\n print(url, e)\n error_flags[id] = 2\n return\n\n with open(to, \"wb\") as file:\n print(url, 'writing')\n file.write(info)\n\n error_flags[id] = 1",
"def _download_chieffi04():\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'\n import urllib\n print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()",
"def _download_chieffi04():\n url = 'http://cdsarc.u-strasbg.fr/viz-bin/nph-Cat/tar.gz?J%2FApJ%2F608%2F405'\n import urllib\n print('Downloading Chieffi 04 yield tables from Vizier (should happen only at the first time)...')\n if os.path.exists(MASTERFILE):\n os.remove(MASTERFILE)\n urllib.urlretrieve(url,MASTERFILE)\n\n import tarfile\n tar = tarfile.open(MASTERFILE)\n tar.extractall(path=DATADIR)\n tar.close()",
"def download_epubs(epub_file, outdir=None, sep='|'):\n \n if not outdir:\n outdir = epub_file.split('.')[0]\n if not os.path.exists(outdir):\n os.mkdir(outdir) \n \n print(\"Downloading files to\", outdir)\n with open(epub_file, 'r') as file:\n for line in file.readlines():\n row = line.split(sep)\n gid = row[0]\n try:\n int(gid) \n url = gut_utf8.format(gid)\n r = requests.get(url)\n filename = '_'.join(row[1:3]).strip()\n filename = re.sub(r'\\W+', '_', filename)\n filename = re.sub(r'_+', '_', filename)\n print(gid, filename) \n with open(\"{}/{}-pg{}.txt\".format(outdir, filename, gid), 'w') as outfile:\n outfile.write(r.text)\n except ValueError as e:\n print('#', gid, \"not a GID\")",
"def download_SRA(SRA):\n\n print(\"Downloading SRA archive\")\n output = subprocess.run(['prefetch', '-f', 'yes', SRA], stderr=subprocess.STDOUT)\n\n print(\"Extracting FASTQ data\")\n output = subprocess.run(['fastq-dump', '--gzip', NCBI_DIR+SRA+'.sra'], stderr=subprocess.STDOUT)",
"def download_assignments(opener, fasta_fname, interval=3):\n params = {\"file\" : open(fasta_fname, \"rb\") }\n #submit and refresh until processed\n result = opener.open(rdp_base+servlet, params)\n while is_processing(result):\n sleep(interval)\n result = opener.open(rdp_base + check_page)\n\n #download the detailed text result\n result = opener.open(rdp_base + get_download_url(result))\n return result",
"def getGenomeSequence(genomeId):\n \n r = urllib.urlopen(PatricURL+genomeId+'/'+genomeId+'.fna').read()\n soup = BeautifulSoup(r)\n #print type(soup)\n\n genomeSequence = soup.prettify().split('| '+genomeId+']')[1]\n return genomeSequence.replace('\\n', '')",
"def download_index(gaia_index):\n # Create regex to extract URL and file name\n reFile = re.compile(r'<a href=\"(.*(GaiaSource.*gz))\"\\>')\n # Open Gaia HTML index file\n response = urllib.request.urlopen(gaia_index)\n # Read content\n files = []\n page = response.readlines()\n # Extract URLs from the page\n for line in page:\n line = line.decode('utf-8')\n # Extract URLs\n f = reFile.findall(line)\n if (f):\n f = f[0]\n if (f[0].startswith('http')):\n # Absolute path\n files.append((f[0], f[1]))\n else:\n # Relative path\n files.append((urljoin(gaia_index, f[0]), f[1]))\n if len(files) == 0:\n print(f\"Couldn't extract file names from the index page.\\nCheck URL: {gaia_index}\")\n exit(1)\n return files",
"def download_genotype_data():\n print(\"downloading genotype data\")\n download_from_url(PSAM_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.psam\", desc=\"downloading psam\")\n download_from_url(PVAR_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pvar.zst\",\n desc=\"downloading pvar\")\n download_from_url(PGEN_PATH, dst=f\"{GENOTYPE_DATA_PATH}/{MERGED_GENOTYPE_FILE}.pgen.zst\",\n desc=\"downloading pgen\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pvar\")\n decompress_genotype_file(f\"{MERGED_GENOTYPE_FILE}.pgen\")",
"def generate_fasta_single(seq_file, rfam_acc, out_dir):\n\n sequence = ''\n fp_out = None\n seq_bits = None\n\n # logging sequences not exported\n # rename this to family log\n log_file = os.path.join(out_dir, rfam_acc + \".log\")\n logging.basicConfig(\n filename=log_file, filemode='w', level=logging.INFO)\n\n # connect to db\n cnx = RfamDB.connect()\n\n # get a new buffered cursor\n cursor = cnx.cursor(raw=True)\n\n # fetch sequence accessions for specific family - significant only!!\n query = (\"SELECT fr.rfam_acc, fr.rfamseq_acc, fr.seq_start, fr.seq_end, rf.description\\n\"\n \"FROM full_region fr, rfamseq rf\\n\"\n \"WHERE fr.rfamseq_acc=rf.rfamseq_acc\\n\"\n \"AND fr.is_significant=1\\n\"\n \"AND fr.rfam_acc=\\'%s\\'\") % (rfam_acc)\n\n # execute the query\n cursor.execute(query)\n\n # open a new fasta output file\n fp_out = gzip.open(\n os.path.join(out_dir, str(rfam_acc) + \".fa.gz\"), 'w')\n\n for region in cursor:\n\n cmd = \"esl-sfetch -c %s/%s %s %s\" % (str(region[START]), str(region[END]),\n seq_file, str(region[SEQ_ACC]))\n\n proc = subprocess.Popen(\n cmd, shell=True, stdout=subprocess.PIPE)\n\n seq = proc.communicate()[0]\n\n # get sequence\n sequence = ''\n seq_bits = seq.split('\\n')[1:]\n sequence = sequence.join(seq_bits)\n\n # print sequence\n\n if sequence != '' and seq_validator(sequence) is True:\n # write header\n fp_out.write(\">%s/%s-%s %s\\n\" % (str(region[SEQ_ACC]),\n str(region[START]),\n str(region[END]),\n str(region[DESC])))\n\n # write sequence\n fp_out.write(sequence + '\\n')\n\n else:\n # logging sequences that have not been exported\n logging.info(str(region[SEQ_ACC]))\n\n # close last file\n fp_out.close()\n\n # disconnect from DB\n cursor.close()\n RfamDB.disconnect(cnx)",
"def dascasi_download():\n p = argparse.ArgumentParser(description=\"download DASC all-sky camera data\")\n p.add_argument(\"site\", choices=[\"EAA\", \"FYU\", \"KAK\", \"PKR\", \"TOO\", \"VEE\"])\n p.add_argument(\n \"startend\", help=\"start/end times UTC e.g. 2012-11-03T06:23 2012-11-03T07\", nargs=2\n )\n p.add_argument(\"odir\", help=\"directory to write downloaded FITS to\")\n p.add_argument(\"-w\", \"--wavelen\", help=\"request specific wavelength(s)\", nargs=\"+\")\n p.add_argument(\"-host\", default=\"ftp://optics.gi.alaska.edu\")\n p = p.parse_args()\n\n # host = \"ftp://mirrors.arsc.edu/AMISR/PKR/DASC/RAW/\"\n download(p.startend, p.site, p.odir, p.host, p.wavelen)",
"def Save_Fastas2(UniprotIDs):\r\n file=open(\"../Data/Negative_cases/negative_cases.fasta\",\"w\")\r\n for ID in UniprotIDs:\r\n data=urllib.request.urlopen(\"http://www.uniprot.org/uniprot/%s.fasta\" %ID)\r\n f=data.readlines()\r\n for lines in f:\r\n file.write(str(lines))\r\n #help(data)\r\n file.close()",
"def download_participants_document(cupASSistName):\n opener = urllib2.build_opener(urllib2.HTTPCookieProcessor(cookielib.CookieJar()), urllib2.HTTPRedirectHandler())\n opener.open(\"http://www.cupassist.com/pamelding/redirect.php?tknavn=\" + cupASSistName)\n return opener.open(\"http://www.cupassist.com/pamelding/vis_paamelding.php\").read()",
"def uniprot(gene, organism, output_file):\n\n print(\"\\tUniprot ...\")\n\n # Request\n domain = \"https://www.uniprot.org/uniprot\"\n query = f\"?query=gene_exact%3A{gene}+organism%3A{organism}\"\n extend = \"columns=id,protein_names&format=tab\"\n r = requests.get(f\"{domain}/{query}&{extend}\")\n result = r.text.splitlines()\n\n # Extract Uniprot IDs and Offical Protein Names\n uniprot_id = []\n uniprot_name = []\n if result != []:\n del(result[0]) # Remove the header\n for line in result: # Extracting IDs and names\n colonne = line.split('\\t')\n id = colonne[0]\n name = colonne[1]\n uniprot_id.append(id)\n if colonne[1] not in uniprot_name:\n uniprot_name.append(name)\n\n # Write the Uniprot IDs\n output_file.write(\"<td><div class='scroll'>\")\n for id in uniprot_id:\n output_file.write(f'<a href=\"{domain}/{id}\">{id}</a><br>')\n output_file.write(\"</div></td>\")\n\n # Write the Uniprot Offical Names\n output_file.write(\"<td><div class='scroll'>\")\n output_file.write(f\"{'<br>'.join(uniprot_name)}</div></td>\")\n return uniprot_id\n else:\n output_file.write(\"<td><i>No data found</i></td>\"*2)\n return uniprot_id"
] | [
"0.72803736",
"0.64713925",
"0.622999",
"0.6218201",
"0.6060453",
"0.59448034",
"0.57914644",
"0.5711981",
"0.5588932",
"0.5571663",
"0.556189",
"0.5560884",
"0.55341786",
"0.553164",
"0.55225044",
"0.5511088",
"0.5500932",
"0.54671043",
"0.54671043",
"0.54506993",
"0.53673744",
"0.53551334",
"0.53490084",
"0.5342134",
"0.5337148",
"0.5291302",
"0.52912056",
"0.52849984",
"0.52830195",
"0.5264628"
] | 0.71304876 | 1 |
Gapfill a model using probabilistic weights | def probabilistic_gapfill(model, universal_model, reaction_probabilities, clean_exchange_rxns=True, default_penalties=None, dm_rxns=False, ex_rxns=False, **solver_parameters):
universal_model = universal_model.copy()
model = clean_exchange_reactions(model) if clean_exchange_rxns else model.copy()
if default_penalties is None:
default_penalties = {'Universal': 1, 'Exchange': 100, 'Demand': 1, 'Reverse': 75}
penalties = default_penalties
reactions_to_remove = []
for r in universal_model.reactions:
if model.reactions.has_id(r.id):
reactions_to_remove.append(r)
penalties[r.id] = 0 # In the model
elif r.id in reaction_probabilities:
penalties[r.id] = max(0, 1 - reaction_probabilities[r.id]) * (penalties[r.id] if r.id in penalties else 1)
universal_model.remove_reactions(reactions_to_remove)
return cobra.flux_analysis.gapfill(model, universal_model, penalties=penalties, demand_reactions=dm_rxns, exchange_reactions=ex_rxns, **solver_parameters) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def init_weights(model):\n ...",
"def gap2d(_w_in):\n return nn.AdaptiveAvgPool2d((1, 1))",
"def weightGenerate(self):\n\t\tfor i in range(0, self.numberOfInput):\n\t\t\tself.weight.append(random.random()-0.5)",
"def update_weights(self):\n\n self.weights -= self.loss_grads\n self.loss_grads = np.zeros(self.weights.shape)",
"def rebalance_weightings(context):\r\n total_ratio = 0\r\n log.info(\"*******Rebalancing weightings********\")\r\n print(context.up_ratios)\r\n \r\n for asset, ratio in context.up_ratios.items():\r\n total_ratio += ratio\r\n \r\n for asset, ratio in context.up_ratios.items():\r\n context.max_weights[asset] = ratio/total_ratio\r\n \r\n log.info(context.max_weights)",
"def balance_training_weight(w, y):\n sample_weight = w.copy()\n neg_mask = (y == 0)\n pos_mask = (y == 1)\n \n bkg_sum_weight = np.sum(sample_weight[neg_mask])\n sig_sum_weight = np.sum(sample_weight[pos_mask])\n\n sample_weight[pos_mask] = sample_weight[pos_mask] / sig_sum_weight\n sample_weight[neg_mask] = sample_weight[neg_mask] / bkg_sum_weight\n return sample_weight",
"def fillGap(self, X, y, T, knn):\n knnobj = neighbors.KNeighborsRegressor(knn)\n return knnobj.fit(X, y).predict(T)",
"def bias_prior(self):",
"def get_nml_probs(x, model, data=None, normalize=True, num_classes=2, query_point_weight=20, dist_weight_thresh=None, \n num_grad_steps=10, lr=0.01, batch_size=32, grad_penalty=None, verbose=False, \n show_plots=False, plotting_2d=False, return_params=False):\n results = []\n data = data or DEFAULT_DATA\n orig_inputs, orig_targets = data\n \n if show_plots and plotting_2d:\n plt.figure()\n plt.title(f\"Original rewards\")\n plot_rewards(model, contours=True)\n plot_dataset(data)\n \n marker_for_class = {\n 0: 'x',\n 1: '*'\n }\n \n model.cuda()\n num_batches = ceil(len(orig_inputs) / batch_size)\n\n # NOTE train on gpu, move back to cpu for eval\n \n for proposed_class in range(num_classes):\n new_model = copy.deepcopy(model)\n new_model.cuda()\n \n # Sample all of the adaptation batches in advance\n optimizer = optim.SGD(new_model.parameters(), lr=lr)\n \n for _ in range(num_grad_steps):\n idxs = np.random.permutation(range(len(orig_inputs)))[:batch_size-1]\n X, y = orig_inputs[idxs], orig_targets[idxs]\n X = torch.Tensor(np.vstack((X, x))).cuda()\n y = torch.Tensor(np.hstack((y, proposed_class))).long().cuda()\n \n logits = new_model(X)\n loss = F.cross_entropy(logits, y, reduction='none')\n \n if dist_weight_thresh:\n weights = np.exp(-np.linalg.norm(x - X.cpu().numpy(), axis=-1) * 2.3 / dist_weight_thresh)\n else:\n weights = np.ones(len(y))\n \n weights[-1] *= query_point_weight * 1. / num_batches\n weights = torch.Tensor(weights).cuda()\n loss = torch.sum(loss * weights) / torch.sum(weights)\n \n loss.backward()\n optimizer.step()\n \n new_model.cpu()\n \n with torch.no_grad():\n x_tensor = torch.Tensor(x[None])\n probs = torch.softmax(new_model(x_tensor), -1)\n results.append(probs[0][proposed_class].item())\n \n if show_plots:\n new_model.to(torch.device(\"cpu\"))\n\n if plotting_2d: \n plt.figure()\n plot_rewards(new_model, contours=True, env = False, title=f\"Finetuning on label {proposed_class}\")\n plot_dataset(data)\n plt.scatter(x[0], x[1], marker=marker_for_class[proposed_class], color='w', s=100)\n \n plt.figure()\n plt.title(f\"Losses for label {proposed_class}\")\n plt.plot(losses)\n \n plt.figure()\n plt.title(f\"x loss for label {proposed_class}\")\n plt.plot(x_losses)\n \n plt.figure()\n plt.title(f\"x probs for label {proposed_class}\")\n plt.plot(x_vals)\n \n model.cpu()\n \n if normalize:\n results = np.array(results) / sum(results)\n else:\n results = np.array(results)\n return results if not return_params else (results, new_model)",
"def copy_para(from_model, to_model):\n for i, j in zip(from_model.trainable_weights, to_model.trainable_weights):\n j.assign(i)",
"def update_weights(self):\n\t\tpass",
"def modify_weights_after_load(model):\n # Prune heads if needed\n if model.config.pruned_heads:\n model.prune_heads(model.config.pruned_heads)\n\n # Tie weights if needed\n model.tie_weights()",
"def init_weights(self) -> None:\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)",
"def step(self, model):\n weights = []\n if (self.prune_scope == 'global'):\n # First collect all weights\n for step, (name, param) in enumerate(self.prune_parameters):\n scale = 1\n # Pointer to the original tensor\n tensor = param.data#.cpu().numpy()\n # Gradient-based selection\n if (self.prune_selection == 'gradient_max'):\n grad = param.grad#.cpu().numpy()\n tensor = grad[torch.nonzero(tensor, as_tuple=True)]#np.nonzero(tensor)]\n elif (self.prune_selection == 'gradient_min'):\n grad = 1 / (torch.abs(param.grad) + 1e-7) #.cpu().numpy()\n tensor = grad[torch.nonzero(tensor, as_tuple=True)]#np.nonzero(tensor)]\n #tensor = torch.abs(torch.max(tensor) - tensor) #np.max(tensor) - tensor\n # Retrieve non-pruned weights\n if (self.prune_scale == 'dimension'):\n scale = tensor.size\n if (self.prune_scale == 'normalize'):\n scale = torch.max(torch.abs(tensor))#np.max(np.abs(tensor))\n if (self.prune_scale == 'xavier'):\n scale = 1.0 / np.sqrt(2.0 / tensor.shape[0] + tensor.shape[1])\n alive = tensor[torch.nonzero(tensor, as_tuple=True)]#np.nonzero(tensor)]\n alive /= scale \n # Add to global weights\n weights.append(alive)\n # Flatten the whole weights\n weights = torch.cat(weights)#np.concatenate(weights)\n value = abs(weights)\n # Compute global percentile\n percentile_value = torch_percentile(value, self.percent) #np.percentile(value, self.percent)\n # Now apply the global or compute local factor\n for step, (name, param) in enumerate(self.prune_parameters):\n scale = 1\n # Pointer to the original tensor\n tensor = param.data #.cpu().numpy()\n # Gradient-based selection\n if (self.prune_selection == 'gradient_max'):\n tensor = param.grad#.cpu().numpy()\n elif (self.prune_selection == 'gradient_min'):\n tensor = 1.0 / (torch.abs(param.grad) + 1e-7)#np.abs(param.grad.cpu().numpy())\n #tensor = torch.abs(torch.max(tensor) - tensor) #np.max(tensor) - tensor\n # Compute scaling\n if (self.prune_scale == 'dimension'):\n scale = tensor.size\n if (self.prune_scale == 'normalize'):\n scale = torch.max(torch.abs(tensor)) #np.max(np.abs(tensor))\n if (self.prune_scale == 'xavier'):\n scale = 1.0 / np.sqrt(2.0 / tensor.shape[0] + tensor.shape[1])\n local_weights = tensor\n local_weights /= scale\n # We do not prune bias term\n if (self.prune_scope == 'local'):\n weights = tensor[torch.nonzero(param.data, as_tuple=True)] #tensor[np.nonzero(tensor)]\n # Retrieve non-pruned weights\n value = abs(weights)\n # Compute global percentile\n percentile_value = torch_percentile(value, self.percent)\n # Use the selection function to compute mask\n new_mask = PruningMasking.ranking(self, name, local_weights, percentile_value, self.mask[step])\n # Store the computed mask\n self.mask[step] = new_mask\n step = 0\n return model",
"def gradient_weight(X, Y, model):\n W = model['weight']\n b = model['bias']\n weight_decay = model['weight_decay']\n\n # YOUR CODE HERE\n # Write the gradient with respect to the weights.\n return np.add(np.subtract(np.dot(np.transpose(predict(X, model)), X), np.dot(np.transpose(Y), X)), 2 * LAMBDA * np.transpose(model['weight'])) #np.zeros((X.shape[1], Y.shape[1]))",
"def proba_redefined_predict(model,X,weigh):\n\n y_proba=model.predict_proba(X)\n tuned=renorm(y_proba,weigh)\n y_max_arg=tuned.argmax(axis=1)\n predict=to_class(y_max_arg,model.classes_)\n\n return predict",
"def proba_redefined_predict(model,X,weigh,classes=string.ascii_lowercase):\n\n y_proba=model.predict_proba(X)\n tuned=renorm(y_proba,weigh)\n y_max_arg=tuned.argmax(axis=1)\n predict=to_class(y_max_arg,classes)\n \n return predict",
"def train_gradient_boost(self, params, num_boost_round = 50):\n print \"training GB......\"\n dtrain = xgb.DMatrix(self.X, self.y)\n model = xgb.train(params, dtrain, num_boost_round = num_boost_round)\n self.models += [model]",
"def transfer_weights(self):\n W, target_W = self.model.get_weights(), self.target_model.get_weights()\n for i in range(len(W)):\n target_W[i] = self.tau * W[i] + (1 - self.tau)* target_W[i]\n self.target_model.set_weights(target_W)",
"def update_policy(self, minibatch_size):\n \n steps = self.rewards.shape[0]\n batch_size = self.rewards.shape[0] * self.rewards.shape[1]\n #steps = 500\n #batch_size = 500\n #print(steps)\n #print(batch_size)\n \n # Compute advantages\n '''\n with torch.no_grad():\n if self.gae:\n advantages = torch.zeros_like(self.rewards).to(self.training_device)\n lastgaelam = 0\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n nextvalues = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t + 1]\n nextvalues = self.state_values[t + 1]\n delta = self.rewards[t] + self.gamma * nextvalues * nextnonterminal - self.state_values[t]\n advantages[t] = lastgaelam = delta + self.gamma * self.gae_lambda * nextnonterminal * lastgaelam\n returns = advantages + self.state_values\n else:\n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n ''' \n returns = torch.zeros_like(self.rewards).to(self.training_device)\n for t in reversed(range(steps)):\n if t == steps - 1:\n nextnonterminal = 1.0 - self.dones[t]\n next_return = self.state_values[t]\n else:\n nextnonterminal = 1.0 - self.dones[t+1]\n next_return = returns[t+1]\n returns[t] = self.rewards[t] + self.gamma * nextnonterminal * next_return\n advantages = returns - self.state_values\n \n\n # flatten the batch\n #b_obs = self.states.reshape((-1,) + self.state_space)\n #print(self.states.shape)\n b_obs = self.states.reshape((-1,4)).detach()\n b_logprobs = self.action_probs.reshape(-1,1).detach()\n b_actions = self.actions.reshape((-1,)).detach()\n b_advantages = advantages.reshape(-1,1)\n b_returns = returns.reshape(-1,1)\n b_values = self.state_values.reshape(-1,1)\n \n # Optimize policy and value network for K epochs, run optimization in minibatches\n \n inds = np.arange(batch_size)\n for i_epoch_pi in range(self.epochs):\n np.random.shuffle(inds)\n for start in range(0, batch_size, minibatch_size):\n end = start + minibatch_size\n minibatch_ind = inds[start:end]\n mb_advantages = b_advantages[minibatch_ind]\n if self.norm_adv:\n mb_advantages = (mb_advantages - mb_advantages.mean()) / (mb_advantages.std() + 1e-8)\n \n #_, newlogproba, entropy = self.get_action(b_obs[minibatch_ind], b_actions[minibatch_ind])\n newlogproba, entropy = self.evaluate(b_obs[minibatch_ind], b_actions[minibatch_ind])\n #ratio = (newlogproba - b_logprobs[minibatch_ind]).exp()\n ratio = torch.exp((newlogproba - b_logprobs[minibatch_ind].detach()))\n \n # Stats\n approx_kl = (b_logprobs[minibatch_ind] - newlogproba).mean()\n\n # Policy loss\n pg_loss1 = -mb_advantages * ratio\n pg_loss2 = -mb_advantages * torch.clamp(ratio, 1 - self.clip_epsilon, 1 + self.clip_epsilon)\n pg_loss = torch.max(pg_loss1, pg_loss2).mean()\n entropy_loss = entropy.mean()\n\n # Value loss\n _, new_values = self.policy.forward(b_obs[minibatch_ind])\n if self.clip_vloss:\n \n v_loss_unclipped = self.MseLoss(new_values,b_returns[minibatch_ind])\n #v_loss_unclipped = ((new_values - b_returns[minibatch_ind]) ** 2)\n v_clipped = b_values[minibatch_ind] + torch.clamp(new_values - b_values[minibatch_ind],\n -self.clip_epsilon, self.clip_epsilon)\n #v_loss_clipped = (v_clipped - b_returns[minibatch_ind]) ** 2\n v_loss_clipped = self.MseLoss(v_clipped,b_returns[minibatch_ind])\n v_loss_max = torch.max(v_loss_unclipped, v_loss_clipped)\n #v_loss = 0.5 * v_loss_max.mean()\n v_loss = 0.5 * v_loss_max\n else:\n #v_loss = 0.5 * ((new_values - b_returns[minibatch_ind]) ** 2).mean()\n v_loss = self.MseLoss(new_values,b_returns[minibatch_ind])\n\n loss = pg_loss + v_loss * self.vf_coeff - self.ent_coeff * entropy_loss\n\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.policy.parameters(), self.max_grad_norm)\n self.optimizer.step()\n # Copy new weights into old policy:\n self.old_policy.load_state_dict(self.policy.state_dict())",
"def init_weights(self):\r\n self.embedding.weight.data.uniform_(-0.1, 0.1)\r\n self.fc.bias.data.fill_(0)\r\n self.fc.weight.data.uniform_(-0.1, 0.1)",
"def __init__(self, in_features, out_features):\n \n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n\n self.params = {'weight': 0.0001 * np.random.randn(out_features, in_features), 'bias': np.zeros((out_features, 1))}\n self.grads = {'weight': np.zeros((out_features, in_features)), 'bias': np.zeros((out_features, 1))}\n\n\n\n ########################\n # END OF YOUR CODE #\n #######################",
"def sparsify_model(model, x_test, y_test, k_sparsity, pruning='weight'):\r\n # Copying a temporary sparse model from our original\r\n sparse_model = model #tf.keras.models.clone_model(model)\r\n # sparse_model.set_weights(model.get_weights())\r\n\r\n # Getting a list of the names of each component (w + b) of each layer\r\n names = [weight.name for layer in sparse_model.layers for weight in layer.weights]\r\n # print(names)\r\n # Getting the list of the weights for each component (w + b) of each layer\r\n weights = sparse_model.get_weights()\r\n # print(weights)\r\n\r\n\r\n # Initializing list that will contain the new sparse weights\r\n newWeightList = []\r\n\r\n # Iterate over all but the final 2 layers (the softmax)\r\n for i in range(0, len(weights), 2):\r\n\r\n # print(weights[i])\r\n # print(weights[i+1])\r\n\r\n if pruning == 'weight':\r\n kernel_weights, bias_weights = weight_prune_dense_layer(weights[i],\r\n weights[i + 1],\r\n k_sparsity)\r\n elif pruning == 'unit':\r\n kernel_weights, bias_weights = unit_prune_dense_layer(weights[i],\r\n weights[i + 1],\r\n k_sparsity)\r\n else:\r\n print('does not match available pruning methods ( weight | unit )')\r\n\r\n # Append the new weight list with our sparsified kernel weights\r\n newWeightList.append(kernel_weights)\r\n\r\n # Append the new weight list with our sparsified bias weights\r\n newWeightList.append(bias_weights)\r\n\r\n # Adding the unchanged weights of the final 2 layers\r\n # for i in range(len(weights) - 2, len(weights)):\r\n # for i in range(len(weights) - 2, len(weights)):\r\n # unmodified_weight = np.copy(weights[i])\r\n # newWeightList.append(unmodified_weight)\r\n\r\n # Setting the weights of our model to the new ones\r\n sparse_model.set_weights(newWeightList)\r\n\r\n # Re-compiling the Keras model (necessary for using `evaluate()`)\r\n adam = Adam(lr=0.0004, beta_1=0.9, beta_2=0.999, epsilon=1e-08, decay=0.0)\r\n sparse_model.compile(\r\n loss='mean_squared_error',\r\n optimizer='adam',\r\n metrics=['accuracy'])\r\n\r\n # print((sparse_model.summary()))\r\n #\r\n # sparse_model.fit(np.expand_dims(x_test, axis=2), y_test,\r\n # batch_size=32, epochs=20, verbose=2, validation_split=0.2)\r\n #\r\n # print((sparse_model.summary()))\r\n\r\n # Printing the the associated loss & Accuracy for the k% sparsity\r\n # score = sparse_model.evaluate(np.expand_dims(x_test, axis=2), y_test, verbose=0)\r\n # print('k% weight sparsity: ', k_sparsity,\r\n # '\\tTest loss: {:07.5f}'.format(score[0]),\r\n # '\\tTest accuracy: {:05.2f} %%'.format(score[1] * 100.))\r\n\r\n\r\n return sparse_model, weights",
"def reset_weights(self):\n self.policy_backbone.reset_weights()\n self.value_backbone.reset_weights()\n self.action_head.reset_weights()\n self.critic_head.reset_weights()",
"def weight_wrtg(self, wrtg):\n # Clear caches because weights are going to change.\n # TODO: it might be possible to not clear the caches\n # if the weight doesn't change, and re-use previous decoding.\n wrtg.ClearCaches()\n for p in wrtg.P:\n rule = p.rhs.rule\n assert isinstance(rule.features, list)\n rule.weight = self.weight_rule(rule)",
"def _precompute_probabilities(self):\n\n d_graph = self.d_graph\n first_travel_done = set()\n\n nodes_generator = self.graph.nodes() if self.quiet \\\n else tqdm(self.graph.nodes(), desc='Computing transition probabilities')\n\n for source in nodes_generator:\n\n # Init probabilities dict for first travel\n if self.PROBABILITIES_KEY not in d_graph[source]:\n d_graph[source][self.PROBABILITIES_KEY] = dict()\n\n for current_node in self.graph.neighbors(source):\n\n # Init probabilities dict\n if self.PROBABILITIES_KEY not in d_graph[current_node]:\n d_graph[current_node][self.PROBABILITIES_KEY] = dict()\n\n unnormalized_weights = list()\n first_travel_weights = list()\n d_neighbors = list()\n\n # Calculate unnormalized weights\n for destination in self.graph.neighbors(current_node):\n\n p = self.sampling_strategy[current_node].get(self.P_KEY,\n self.p) if current_node in self.sampling_strategy else self.p\n q = self.sampling_strategy[current_node].get(self.Q_KEY,\n self.q) if current_node in self.sampling_strategy else self.q\n\n if destination == source: # Backwards probability\n ss_weight = self.graph[current_node][destination].get(self.weight_key, 1) * 1 / p\n elif destination in self.graph[source]: # If the neighbor is connected to the source\n ss_weight = self.graph[current_node][destination].get(self.weight_key, 1)\n else:\n ss_weight = self.graph[current_node][destination].get(self.weight_key, 1) * 1 / q\n\n # Assign the unnormalized sampling strategy weight, normalize during random walk\n unnormalized_weights.append(ss_weight)\n if current_node not in first_travel_done:\n first_travel_weights.append(self.graph[current_node][destination].get(self.weight_key, 1))\n d_neighbors.append(destination)\n\n # Normalize\n unnormalized_weights = np.array(unnormalized_weights)\n d_graph[current_node][self.PROBABILITIES_KEY][\n source] = unnormalized_weights / unnormalized_weights.sum()\n\n if current_node not in first_travel_done:\n unnormalized_weights = np.array(first_travel_weights)\n d_graph[current_node][self.FIRST_TRAVEL_KEY] = unnormalized_weights / unnormalized_weights.sum()\n first_travel_done.add(current_node)\n\n # Save neighbors\n d_graph[current_node][self.NEIGHBORS_KEY] = d_neighbors",
"def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)",
"def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)",
"def init_weights(self):\n self.embedding.weight.data.uniform_(-0.1, 0.1)\n self.fc.bias.data.fill_(0)\n self.fc.weight.data.uniform_(-0.1, 0.1)",
"def propose_patch(self, weight_bounds, learn_rate=1.0):\n in_dims, mid_dims, _, _ = weight_bounds.shape\n\n best_index = (None, None)\n best_constraints = -1\n best_delta = 0.0\n indices = itertools.product(range(in_dims), range(mid_dims))\n for in_dim, mid_dim in tqdm(indices, total=(in_dims * mid_dims),\n desc=\"Computing Patch\"):\n bounds = weight_bounds[in_dim, mid_dim, :, :]\n # We focus on the bounds that are non-NaN\n non_nan_bounds = bounds[~np.isnan(bounds[:, 0])]\n if len(non_nan_bounds) < best_constraints:\n continue\n lower, upper, n_met = self.interval_MAX_SMT(non_nan_bounds)\n\n if n_met <= best_constraints:\n continue\n best_constraints = n_met\n best_index = (in_dim, mid_dim)\n\n if lower <= 0.0 <= upper:\n best_delta = 0.0\n else:\n # True if the interval suggests to increase the weight.\n is_increase = lower > 0.0\n # If the interval suggests to increase the weight, suggest a\n # delta slightly above lower. Otherwise, suggest one slightly\n # below upper. Either way, we're trying to stay as close to 0\n # as possible.\n ratio = 0.1 if is_increase else 0.9\n best_delta = lower + (ratio * (upper - lower))\n if not np.isfinite(best_delta):\n eps = 0.1\n if is_increase: # => upper == np.Infinity\n assert np.isfinite(lower + eps)\n best_delta = lower + eps\n elif upper < 0.0: # => lower == -np.Infinity\n assert np.isfinite(upper - eps)\n best_delta = upper - eps\n else:\n assert False\n assert np.isfinite(best_delta)\n print(\"Would be satisfying\", best_constraints, \"constraints.\")\n print(\"Updating weight\", best_index)\n best_delta *= learn_rate\n return best_index, best_delta, best_constraints"
] | [
"0.59936357",
"0.5899231",
"0.5710139",
"0.5680814",
"0.5665641",
"0.565304",
"0.56094337",
"0.56018",
"0.55877113",
"0.55586326",
"0.5552374",
"0.5551015",
"0.5542684",
"0.5506722",
"0.5504694",
"0.5479312",
"0.54784465",
"0.5458892",
"0.54003364",
"0.5388704",
"0.5387438",
"0.53765374",
"0.5354086",
"0.534679",
"0.53443646",
"0.5343532",
"0.53322417",
"0.53322417",
"0.53322417",
"0.5319208"
] | 0.5979327 | 1 |
Exports the given reaction probabilities into a JSON formatted file, saved at filename | def export_json(rxn_probs, filename):
with open(filename, 'w') as f:
f.write(json.dumps(rxn_probs))
return filename | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def write_json(self, filename):\n with open(filename, 'a+') as f:\n f.write(json.dumps(self.weights))\n f.write(\"\\n\")",
"def save(statistic_entries):\n with open('learn.json', 'w') as file:\n json.dump(statistic_entries, file, indent=2)",
"def dump(pred_out_path, xyz_pred_list, verts_pred_list):\n # make sure its only lists\n xyz_pred_list = [x.tolist() for x in xyz_pred_list]\n verts_pred_list = [x.tolist() for x in verts_pred_list]\n #import pdb; pdb.set_trace()\n # save to a json\n with open(pred_out_path, 'w') as fo:\n json.dump(\n [\n xyz_pred_list,\n verts_pred_list\n ], fo)\n print('Dumped %d joints and %d verts predictions to %s' % (len(xyz_pred_list), len(verts_pred_list), pred_out_path))",
"def class2json(classifier, filename = \"classifier\"):\n model_json = classifier.to_json()\n with open(filename + \".json\", \"w\") as json_file:\n json_file.write(model_json)\n # Serialize weights to HDF5\n classifier.save_weights(filename + \".h5\")\n print(\"Successfully saved the classifier to file \" + filename + \".\")",
"def output(self, filename):\n with open(filename, 'w') as f:\n op = {}\n layer_res = []\n alphas_res = []\n for layer in self._layers:\n weights = []\n alphas = []\n for neuron in layer._neurons:\n weights.append(neuron._weights)\n alphas.append(neuron._alpha)\n layer_res.append(weights)\n alphas_res.append(alphas)\n op['layers'] = layer_res\n op['alphas'] = alphas_res\n json.dump(op, f, indent='\\t')",
"def save_results(predictions, filename):\n with open(filename, 'w') as f:\n f.write(\"id,ACTION\\n\")\n for i, pred in enumerate(predictions):\n f.write(\"%d,%f\\n\" % (i + 1, pred))",
"def writeJSON(filename):\n if not filename.endswith('.json'):\n filename += '.json'\n with open(filename, 'w') as f:\n for x in range(numRows):\n scores = quizScores()\n types = getTypes(scores)\n row = { 'id': x,\n 'challenger': types[0], 'collaborator': types[1],\n 'communicator': types[2], 'contributor': types[3],\n 'q1': scores[0], 'q2': scores[1], 'q3': scores[2],\n 'q4': scores[3], 'q5': scores[4], 'q6': scores[5],\n 'q7': scores[6], 'q8': scores[7], 'q9': scores[8],\n 'q10': scores[9], 'q11': scores[10], 'q12': scores[11],\n 'q13': scores[12], 'q14': scores[13], 'q15': scores[14],\n 'q16': scores[15], 'q17': scores[16], 'q18': scores[17]\n }\n json.dump(row, f, sort_keys=True)",
"def save(self, filename):\n import json\n\n json = json.dumps(self.joint_limits)\n with open(filename, 'w') as f:\n f.write(json)",
"def write_to_json(dicts, filename: str):\n\n with open(filename, 'w', encoding='utf-8') as f:\n mmcv.dump(dicts, f, file_format='json')",
"def writeFile(fileName, profile, singleScores, bestMotifs, dnaScores, bestMotif):\n with open(fileName, 'w+') as f:\n f.write(strftime(\"Created on: %Y-%m-%d %H:%M:%S\\n\", localtime()))\n f.write('Best Motifs: ')\n f.write('\\n')\n json.dump(bestMotif, f)\n f.write('\\n')\n f.write('Motifs Profile: ')\n f.write('\\n')\n json.dump(profile, f)\n f.write('\\n')\n f.write('Single Scores: ')\n f.write('\\n')\n for i in range(0, len(singleScores)):\n json.dump(bestMotifs[i], f)\n f.write(': ')\n json.dump(singleScores[i], f)\n f.write('\\n')\n f.write('Motifs that have a better score than the worst scoring one: ')\n f.write('\\n')\n for scores in dnaScores:\n json.dump(scores, f)\n f.write('\\n')",
"def __write_csv(self, prediction_probs, n, filename):\n d = {'Id': pd.Series([i for i in xrange(1, n + 1)]),\n 'Action': pd.Series(prediction_probs)}\n df = pd.DataFrame(d)\n df = df[['Id', 'Action']]\n df.to_csv(filename, sep=',', encoding='utf-8',\n index=False)",
"def save_highscores(self, contents):\n\t\ttry:\n\t\t\twith open(self.filename, 'w') as f_obj:\n\t\t\t\tf_obj.write(json.dumps(contents)) #save as json\n\t\texcept FileNotFoundError:\n\t\t\tprint('File for highscores not found! Call 016 741 6243 for assistance.')",
"def save(self, filename):\n data = {\"sizes\": self.sizes,\n \"weights\": [w.tolist() for w in self.weights],\n \"biases\": [b.tolist() for b in self.biases]}\n f = open(filename, \"w\")\n json.dump(data, f)\n f.close()",
"def write_predictions(prediction_dic, result_path):\n with open(result_path, 'wb') as outfile:\n outfile.write(bytes('Patient_ID,HPV/p16_status\\n', 'UTF-8'))\n for patient_id, pred in prediction_dic.items():\n outfile.write(bytes(str(patient_id) + ',' + str(pred) + '\\n', 'UTF-8'))",
"def dump_distributions(self):\n file_path = self.get_local_path(self.filename_distributions)\n\n with open(file_path, \"w\") as f:\n json_obj = {\n \"feature_uniques\": self.feature_uniques,\n \"feature_summaries\": self.feature_summaries,\n }\n json.dump(json_obj, f)\n return file_path",
"def save_priors(name, prior_dict):\n with open(name + \"_priors.json\", \"w\") as fp:\n json.dump(prior_dict, fp)",
"def write_to_json(self, export_fp: str):\n # TODO:\n pass",
"def save(self, characters, filepath):\n\n\t\twith open(filepath, 'w') as out:\n\t\t\tjson.dump(characters, out, sort_keys=True, indent=4)",
"def save(self, filename):\n data = {\"sizes\": self.sizes,\n \"weights\": [w.tolist() for w in self.weights],\n \"biases\": [b.tolist() for b in self.biases],\n \"cost\": str(self.cost.__name__)}\n f = open(filename, \"w\")\n json.dump(data, f)\n f.close()",
"def write_output_file(filename, actions, log):\n f = open(filename, 'w')\n\n for i in range(len(actions)):\n f.write(str(actions[i]))\n if i < len(actions) - 1:\n f.write(',')\n f.write('\\n')\n\n for k in log.keys():\n f.write(str(k) + ' = ' + str(log.get(k)))\n f.write('\\n')\n\n f.close()",
"def to_json_file(self, path):\n with open(path, 'w') as f:\n f.write(self.to_json())",
"def write_prediction_results(formatted_outputs, file_path):\n\n with codecs.open(file_path, 'w', 'utf-8') as f:\n for formatted_instance in formatted_outputs:\n json_str = json.dumps(formatted_instance, ensure_ascii=False)\n f.write(json_str)\n f.write('\\n')\n zipfile_path = file_path + '.zip'\n f = zipfile.ZipFile(zipfile_path, 'w', zipfile.ZIP_DEFLATED)\n f.write(file_path)\n\n return zipfile_path",
"def write_to_json(missense_dict, frame_shift_dict, missense_name_dict, frame_shift_name_dict, person):\n json_file[person] = {\n \"missense_variant\": missense_dict,\n \"missense_HGNC_name\": missense_name_dict,\n \"frame_shift_variant\": frame_shift_dict,\n \"frame_shift_HGNC_name\": frame_shift_name_dict}",
"def write_submission(ratings, file_name):\n # Build output string to write into the file\n output = \"Id,Prediction\\n\"\n for (row, col, rat) in ratings:\n # every line is of the format 'rX_cY,R' where X and Y correspond to row(user) and column(movie) indices and R is the rating\n # we have do increase row and col by one because numpy arrays use 0-base indexing while movie/user indices start at 1\n output += \"r%d_c%d,%f\\n\" % (row + 1, col + 1, rat)\n \n # Write file \n with open(os.path.join('../predictions_csv', file_name), 'w') as output_file:\n output_file.write(output)\n \n return output",
"def save_modal_output_to_json(file_name: str, data_to_save: dict) -> str:\n cprint(f\"### Function Name:-> {inspect.stack()[0][3]} ###\", 'yellow', 'on_grey', attrs=['bold'])\n try:\n data_handler_app = apps.get_app_config('data_handler')\n data_handler_path = data_handler_app.path\n modal_output_dir = Path(data_handler_path) / \"PM_Model\" / 'model_output_files'\n json_output_file = modal_output_dir / file_name\n json_file = open(json_output_file.as_posix(), 'w')\n json.dump(data_to_save, json_file, indent=3, sort_keys=True)\n json_file.close()\n\n return json_output_file.as_posix()\n except Exception as ex:\n json_output_file.unlink()\n cprint(traceback.format_exc(), 'red')\n log_exception(traceback.format_exc())\n return \"\"",
"def export_json(contents, filename):\n with open(filename, 'w') as f:\n json.dump(contents, f)",
"def saveFile(self, filename=\"UQModelTest.json\"):\n sd = self.saveDict()\n with open(filename, \"w\") as f:\n json.dump(sd, f, indent=2)",
"def to_file(self, filename):\n\n output_dict = {'random_forest': self.random_forest,\n 'apply_preprocessing': self.apply_preprocessing,\n 'apply_postprocessing': self.apply_postprocessing}\n pickle.dump(output_dict, open(filename, \"wb\"))",
"def toFile(self, file_path) -> None:\n\t\tjson_repr = self.toJSON()\n\t\t\n\t\twith open(file_path, \"w\") as f:\n\t\t\tf.write(json_repr)",
"def save_to_file(cls, list_objs):\n namefile = cls.__name__ + \".json\"\n rep_list = []\n if list_objs is not None and list_objs != []:\n for item in list_objs:\n repre = cls.to_dictionary(item)\n # rep_list.append(cls.to_json_string(repre))\n rep_list.append(repre)\n\n with open(namefile, \"w\", encoding=\"UTF-8\") as f:\n # json.dump(rep_list, f)\n f.write(cls.to_json_string(rep_list))"
] | [
"0.6705845",
"0.641551",
"0.6144338",
"0.6136017",
"0.6124605",
"0.612017",
"0.61101353",
"0.60968125",
"0.60849845",
"0.5977744",
"0.59029186",
"0.587066",
"0.5857155",
"0.5841988",
"0.5835876",
"0.5835178",
"0.5819759",
"0.5813063",
"0.5810192",
"0.57985073",
"0.5744357",
"0.57422674",
"0.57321066",
"0.57320017",
"0.5696555",
"0.56672543",
"0.5659373",
"0.56575745",
"0.5654339",
"0.5648293"
] | 0.8034322 | 0 |
return the probability of a given reaction | def get_probability(self, reaction):
return self.__getitem__(reaction) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def probability(self, item):\n count = self.counter.get(item, 0)\n if self.smoothing_dict:\n smooth_count = self.smoothing_dict.get(count, count)\n assert smooth_count > 0\n return smooth_count / self.smooth_total\n else:\n return count / self.total",
"def probability(structure,seq, react=None):\n return energy_to_proba(get_ens_energy(seq,react),get_stru_energy(structure,seq,react))",
"def calculate_probability(self):\n return 0",
"def Probability(rating1, rating2):\n return 1.0 * 1.0 / (1 + 1.0 * math.pow(10, 1.0 * (rating1 - rating2) / 400))",
"def p(self) -> Probability:\n ...",
"def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n probs = np.ones(len(states))/2\n return probs",
"def reconstructed_probability(self, x: torch.Tensor) -> torch.Tensor:\n with torch.no_grad():\n pred = self.predict(x)\n recon_dist = Normal(pred['recon_mu'], pred['recon_sigma'])\n x = x.unsqueeze(0)\n p = recon_dist.log_prob(x).exp().mean(dim=0).mean(dim=-1) # vector of shape [batch_size]\n return p",
"def get_response_probability(self, ind):\n return self.rp_t[ind]",
"def get_response_probability(self, ind):\n pass",
"def act(self, observation):\n self.t += 1\n\n probabilities = self.probabilities(observation)\n\n probabilities *= self.action_mask(observation[0])\n\n if probabilities.sum() == 0.0:\n probabilities += 1.0\n\n return torch.multinomial(probabilities, num_samples=1)[0]",
"def get_probs(self, states, actions):\n # YOUR CODE HERE\n \n # So we need to determine for every input state-action pair, what the resulting policy distribution is\n # This means that the input will be a single state and a single action per index. \n # We then need to determine if, according to our policy, the action should be taken (prob=1) \n # or not (prob=0)\n \n # state is a tuple of (player's current sum, dealer's single showing card, boolean for usable ace)\n probs = []\n for index, (state, action) in enumerate(zip(states, actions)):\n chosen_action = self.sample_action(state)\n if action == chosen_action:\n probs.append(1)\n else:\n probs.append(0)\n \n \n return np.array(probs)",
"def get_probability(self, word: Word):\n if len(word) == 0:\n return 0.0\n\n _check_is_legal_word(word, self.alphabet_size)\n result = 1.0\n current_state = self.initial_state\n for character in word:\n if current_state is None:\n return 0.0\n\n next_state, probability = self.transition_dict.get(current_state, {}).get(\n character, (None, 0.0)\n )\n current_state = next_state\n result *= probability\n\n return 0.0 if current_state != self.final_state else result",
"def prob(self, state, action):\n if state + action == 100:\n reward = 1\n else:\n reward = 0\n\n return [(state + action, self._p_head, reward), (state - action, 1 - self._p_head, 0)]",
"def getActionProb(self, canonicalBoard, temp=1):\n for i in range(self.args.numMCTSSims):\n dir_noise = (i == 0 and self.dirichlet_noise)\n self.search(canonicalBoard, dirichlet_noise=dir_noise)\n\n s = self.game.stringRepresentation(canonicalBoard)\n counts = [\n self.Nsa[(s, a)] if (s, a) in self.Nsa else 0\n for a in range(self.game.getActionSize())\n ]\n\n if temp == 0:\n bestAs = np.array(np.argwhere(counts == np.max(counts))).flatten()\n bestA = np.random.choice(bestAs)\n probs = [0] * len(counts)\n probs[bestA] = 1\n return probs\n\n counts = [x**(1. / temp) for x in counts]\n counts_sum = float(sum(counts))\n probs = [x / counts_sum for x in counts]\n return probs",
"def cond_prob(self, event, context):\n count = self.table[event, context] + self.prior\n norm = self.margin[context] + (self.prior * len(self.alphabet))\n return count / norm",
"def _pick_next_reaction(net, r0):\n\n propensities = []\n for reaction in net.reactions:\n try:\n div_result = reaction.rate(net.species) / r0\n except ZeroDivisionError:\n div_result = reaction.rate(net.species) / 1\n propensities.append(div_result)\n\n random_reaction = GillespieSimulator._pick_weighted_random(net.reactions, propensities)\n return random_reaction.change_vector(net.species)",
"def _evaluate_policy(self, state, legal_actions, step_rewards=None, action=None):\n assert step_rewards is not None\n probabilities = torch.exp(torch.tensor(step_rewards, dtype=self.dtype))\n probabilities = probabilities / torch.sum(probabilities)\n\n if action is not None:\n return probabilities[action]\n else:\n return probabilities",
"def ProbCorrect(efficacy, difficulty, a=1):\n return 1 / (1 + math.exp(-a * (efficacy - difficulty)))",
"def prob(self, w):\n return self.counts[w] / self.total_count",
"def get_chance(x):\n e = math.exp(1)\n return (1.0 + e) / (1. + math.exp(x + 1))",
"def reaction_rate (self):\n raise NotImplementedError('Subclass must implement this method')",
"def prob(self, sequence):\n prob = 1\n for event, context in self.extract_ngrams(sequence):\n prob *= self.cond_prob(event, context)\n return prob",
"def probability_of_all_successes(p: float, r: int, n: int) -> float:\n\n if r == 1:\n return pow(p, n)\n elif n == 0:\n return 1\n else:\n result = 0\n for x in range(0, n+1):\n result += pow(p, x) * pow(1-p, n-x) * probability_of_all_successes(p, r-1, n-x)\n return result",
"def get_probability(some_dict, some_string):\n lowercase_review = some_string.lower()\n split_review = lowercase_review.split()\n product = 1 \n for word in split_review:\n if word not in some_dict:\n probability = 0.00009\n #assigning unknown words a probability very close to zero\n else: \n probability = some_dict[word]\n product *= probability\n return product",
"def get_probs(self, a):\n with torch.no_grad():\n probabilities = (np.array(self.priorities) ** a) / sum(np.array(self.priorities) ** a)\n return probabilities",
"def probabilities(self):\n raise NotImplementedError",
"def prob(throw, n, d=6, type='classical'):\n count = 0\n table = throw_table(n, d, type)\n for t in table:\n if sum(t) == throw:\n count += 1\n \n return float(count)/len(table)",
"def probability(self, left, right=None):\n return 1",
"def get_action_probs(self, state):\n state = state.astype(np.float32)\n return self.session.run(self.action_probs,\n feed_dict={self.s_placeholder: state})",
"def _basic_probability(count: int, sequence_total_count: int) -> float:\n return float(count) / sequence_total_count"
] | [
"0.6590192",
"0.6571907",
"0.647744",
"0.64491594",
"0.6418756",
"0.63888365",
"0.63714635",
"0.6371433",
"0.6367964",
"0.63313365",
"0.6296423",
"0.62912256",
"0.62699294",
"0.62629133",
"0.62573755",
"0.6252374",
"0.62146765",
"0.6207749",
"0.62025577",
"0.6187049",
"0.61826676",
"0.61214125",
"0.61020696",
"0.60916483",
"0.6077021",
"0.6066374",
"0.6042399",
"0.6018653",
"0.6007051",
"0.5999446"
] | 0.8662731 | 0 |
Deserialize a ReactionProbabilities from a JSON file | def from_json_file(path):
with open(path, 'r') as f:
return ReactionProbabilities.from_json(f.read()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_priors(file_name):\n with open(file_name, \"r\") as fp:\n priors = json.load(fp)\n return priors",
"def read_classification_json(fn):\n with open(fn) as f:\n classification_data = json.load(f)\n f.close()\n \n return classification_data",
"def load_priors(self, json_file):\n\n with open(json_file, 'r') as jf:\n self.priors_dict = json.load(jf)",
"def load_reseq_conditions_from(json_file_or_dict):\n\n # refactor that common useage from TC io\n if isinstance(json_file_or_dict, dict):\n d = json_file_or_dict\n else:\n with open(json_file_or_dict, 'r') as f:\n d = json.loads(f.read())\n\n return ReseqConditions.from_dict(d)",
"def load_raw_annot(filename):\n with open(filename, 'r') as fp:\n data = json.loads(fp.read())\n\n mapping = _create_mapping()\n\n for k in data.keys():\n for i in xrange(len(data[k])):\n data[k][i] = eval_utils.revise_sentiment(data[k][i], mapping)\n return data",
"def from_json(cls, file_path):\n profile = cls()\n with open(file_path, 'r') as fd:\n profile._ngrams = json.load(fd)\n return profile",
"def from_json(cls, file):\n ref = os.path.basename(file)\n with open(file, 'r') as fp:\n j = json.load(fp)\n\n return sorted([cls.from_dict(ref, d) for d in j[ref]], key=lambda x: x.priority)",
"def _load(predictions, f):\n\n # with open(f) as json_file:\n data = json.load(f)\n for p in data['predictions']:\n prediction = Prediction(p)\n predictions[prediction.example_id] = prediction",
"def __init__(self, file):\n with open(file, 'r') as f:\n self.vocab = json.loads(f.read())",
"def read_proto(filename):\n results = results_pb2.ImprovementResults()\n with open(filename, 'rb') as f:\n results.ParseFromString(f.read())\n return results",
"def __load_class_representation(self, filename):\n\n # Reads in the reverse dictionary from the given file.\n with open(filename) as file:\n return json.load(file)",
"def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls.from_dict(json.loads(text))",
"def from_json(cls, fname):\n d = read_json(fname)\n return cls.from_dict(d)",
"def parse_creative_serving_decision(data):\n return json.loads(base64.b64decode(data))",
"def deserialize(cls, json_):\n schema = movers_schema.CatsMover()\n if 'tide' in json_:\n schema.add(Tide())\n _to_dict = schema.deserialize(json_)\n\n return _to_dict",
"def json2class(filename = \"classifier\"):\n json_file = open(filename + \".json\", 'r')\n loaded_model_json = json_file.read()\n json_file.close()\n loaded_model = model_from_json(loaded_model_json)\n # Load weights into new model\n loaded_model.load_weights(filename + \".h5\")\n print(\"Loaded model from disk.\")\n return loaded_model",
"def from_JSON(cls, filename):\n with open(os.path.expanduser(filename), encoding='utf-8') as f:\n return json.load(f, object_hook=class_hook)",
"def from_file(filename):\n # in order to complete this lab we are going to use the python lib json in which we have the function json.loads\n # which will automatically load a json from a string\n f = open(filename, 'r')\n string = f.read()\n return json.loads(string)",
"def load(cls, path: str) -> 'Vocab':\n with open(path, 'r', encoding='utf-8') as f:\n return cls.from_json(f.read())",
"def load_personality_adj():\n return json.load(open(personality_adj()))",
"def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls(**json.loads(text))",
"def load (cls, file):\n with open(file) as f:\n raw = json.load(f)\n obj = PasswordSetCharacteristics()\n obj.lengths = cls.to_num_dict(raw['lengths'])\n obj.lower_counts = cls.to_num_dict(raw['lowerCounts'])\n obj.upper_counts = cls.to_num_dict(raw['upperCounts'])\n obj.digit_counts = cls.to_num_dict(raw['digitCounts'])\n obj.symbol_counts = cls.to_num_dict(raw['symbolCounts'])\n obj.class_counts = cls.to_num_dict(raw['classCounts'])\n obj.word_counts = cls.to_num_dict(raw['wordCounts'])\n return obj",
"def read_json():\n try:\n rospack = rospkg.RosPack()\n file_path = rospack.get_path('autonomous') + \"/src/data.txt\"\n with open(file_path) as json_file:\n json_data = json.load(json_file)\n \n new_data = []\n for d in json_data:\n a = Autons(len(new_data))\n a.deserialize_json(d)\n new_data.append(a)\n\n global data\n data = new_data\n except:\n read_json()",
"def __init__(self, recipie_file):\n try:\n with open(recipie_file) as json_file:\n self.recipie = json.load(json_file)\n except IOError as io_error:\n raise IOError('File not found: {}'.format(io_error.filename))\n\n try:\n self.ing_pop = self.recipie[MMK.KEY_ING_PROP]\n self.steps = self.recipie[MMK.KEY_STEPS]\n self.pan = self.recipie[MMK.KEY_PAN]\n except KeyError as key_err:\n raise KeyError('{} not found in recipie'\n .format(key_err))",
"def load_predictions(fileobj):\n\n def _load(predictions, f):\n \"\"\"Read serialized json from `f`, create examples, and add to `examples`.\"\"\"\n\n # with open(f) as json_file:\n data = json.load(f)\n for p in data['predictions']:\n prediction = Prediction(p)\n predictions[prediction.example_id] = prediction\n\n predictions = {}\n _load(predictions, fileobj)\n\n return predictions",
"def load(self):\n with io.open(self.filename, encoding='utf-8') as f:\n self.load_from_dict(json.loads(f.read()))",
"def read_json():\n with open(\"Ratings.json\") as json_data:\n json_list = []\n for line in json_data:\n json_dict = json.loads(line)\n json_list.append(json_dict)\n return json_list",
"def read_from_file():\n global REPOS\n with file(OUTPUT, 'r') as infile:\n REPOS = json.loads(infile.read())",
"def from_json_file(cls, json_file):\n with open(json_file, \"r\") as reader:\n return cls.from_dict(json.load(reader))",
"def load_data(path_stats, path_rules):\n with open(path_stats) as json_file:\n material_probs = json.load(json_file)\n with open(path_rules) as json_file:\n convertion_rules = json.load(json_file)\n\n return material_probs, convertion_rules"
] | [
"0.6095132",
"0.60795057",
"0.5984798",
"0.59495234",
"0.57160014",
"0.56639713",
"0.5619954",
"0.5497838",
"0.5492578",
"0.53609985",
"0.5356933",
"0.53483236",
"0.5318638",
"0.53132015",
"0.52867985",
"0.52783847",
"0.5249616",
"0.52431166",
"0.5227828",
"0.5201468",
"0.5199053",
"0.51935226",
"0.5189107",
"0.5179041",
"0.5149647",
"0.51165074",
"0.5102031",
"0.5096326",
"0.5090816",
"0.5088783"
] | 0.85232556 | 0 |
Takes a big limit as an integer and get all the prime numbers in that range, including the limit itself. Returns a numpy array of the primes. Fragmentation is an int that multiplies the sqrt of the limit to increase the fragment size. Bigger fragmentation consumes more memory and less time. Fragmentation limit = sqrt of limit. For 4 GB RAM not enough memory for limit == 109. Fragmentation 1000 ok | def get_primes_in_big_limit(limit, fragmentation=1):
print("Getting primes...")
print("Fragmentation set to", fragmentation)
fragment_limit = int(np.sqrt(limit))
fragment_lowest = 0
fragment_highest = fragment_lowest + fragment_limit
primes_in_limit = np.array([], dtype=int)
while fragment_highest < limit:
if fragment_lowest == 0:
fragment_highest += 1
primes_in_first_fragment = get_primes_in(fragment_highest)
primes_in_limit = np.concatenate([primes_in_limit,
primes_in_first_fragment],
axis=None)
else:
primes_in_fragment = get_primes_in_fragment(fragment_lowest,
fragment_highest,
primes_in_first_fragment
)
primes_in_limit = np.concatenate([primes_in_limit,
primes_in_fragment],
axis=None)
fragment_lowest = fragment_highest
fragment_highest += (fragment_limit * fragmentation)
primes_in_last_fragment = get_primes_in_fragment(fragment_lowest,
limit+1,
primes_in_first_fragment
)
return np.concatenate([primes_in_limit, primes_in_last_fragment], axis=None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_primes_in(limit):\n range_limit = np.arange(limit)\n prime_mask = np.ones(limit, dtype=bool)\n prime_mask[0:2] = False\n for i in range_limit[:int(np.sqrt(limit))+1]:\n if prime_mask[i]:\n prime_mask[2*i::i] = False\n return range_limit[prime_mask]",
"def eratosthenes(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = []\n mask = [1]*(limit+1)\n for i in range(2, limit+1):\n if mask[i]:\n primes.append(i)\n for j in range(i*i, limit+1, i):\n mask[j] = 0\n return np.asarray(primes)",
"def eratosthenes_mem(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = [2]\n multiples = [2]\n limit += 1\n for candidate in range(3, limit):\n if candidate not in multiples:\n primes.append(candidate)\n multiples.append(2*candidate)\n for i, m in enumerate(multiples):\n if m <= candidate:\n multiples[i] += primes[i]\n return np.asarray(primes)",
"def getPrimes(limit): \n a = range(2,int(sqrt(limit)+1))\n isPrime = [True]*limit\n for n in a:\n if isPrime[n]:\n # for all primes, each multiple of prime from prime*prime to the end must not be prime\n for i in xrange(n*n, limit, n): \n isPrime[i] = False\n primes = [i for i in xrange(2,len(isPrime)) if isPrime[i]]\n return primes",
"def sieve(limit):\n primes = []\n\n s = xrange(2, limit + 1)\n while len(s) != 0:\n primes.append(s[0])\n s = [n for n in s if (n % s[0]) != 0]\n\n return primes",
"def eratosthenes_np(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit+1, dtype=np.bool)\n mask[:2] = False\n for i in range(2, int(np.sqrt(limit))+1):\n if mask[i]:\n mask[i*i::i] = False\n return np.nonzero(mask)[0]",
"def gen_primes(limit=10000):\n\n candidates = set(range(2, limit))\n primes = []\n\n while len(candidates) > 0:\n prime = min(candidates)\n primes.append(prime)\n for number in range(prime, limit, prime):\n candidates.discard(number)\n\n return primes",
"def get_primes_in_fragment(fragment_lowest, fragment_highest,\n primes_in_first_fragment):\n fragment_range = np.arange(fragment_lowest, fragment_highest)\n prime_mask = np.ones(len(fragment_range), dtype=bool)\n for p in primes_in_first_fragment:\n if fragment_lowest % p == 0:\n first_multiple = fragment_lowest // p\n else:\n first_multiple = fragment_lowest // p + 1\n first_multiple_index = first_multiple * p - fragment_lowest\n prime_mask[first_multiple_index::p] = False\n return fragment_range[prime_mask]",
"def list_primes(limit):\n sieve = [False]*2 + [True] * (limit-2)\n n = 2\n while n <= sqrt(limit):\n if sieve[n]:\n yield n\n for m in xrange(n**2, limit, n): # multiples\n sieve[m] = False # mark multiples as non prime\n n += 1\n while n < limit:\n if sieve[n]:\n yield n\n n += 1",
"def get_primes_by_limit_number(self, limit_number):\n if int(limit_number) < 2:\n print \"this method needs number >= 2\"\n return []\n ret = []\n prime = self._generate_prime()\n next = prime.next()\n while next <= limit_number:\n ret.append(next)\n next = prime.next()\n return ret",
"def get_primes_over(limit):\n candidate = 1000000\n count = 0\n while count < limit:\n if is_prime(candidate):\n yield candidate\n count += 1\n candidate += 1\n else:\n candidate += 1",
"def primes(lim):\n limsqrt = ceil(sqrt(lim))\n s = [ True ] * (lim + 1)\n for i in range(2, ceil(sqrt(lim))):\n if s[i]:\n k = 0\n while True:\n l = i * i + k * i\n if l > lim: break\n k += 1\n s[l] = False\n return [i for i in range(2, lim + 1) if s[i]]",
"def get_prime_array(high):\n\n # Array of pre-generated primes less than high\n primes = []\n\n with open(\"../pre_generated_primes/primes-to-100k.txt\") as f:\n for line in f:\n hundred = [int(i) for i in line.split()]\n primes.extend(hundred)\n\n if (high > 100000):\n with open(\"../pre_generated_primes/primes-to-200k.txt\") as f2:\n for line in f2:\n two_hundred = [int(i) for i in line.split()]\n primes.extend(two_hundred)\n\n if (high > 200000):\n with open(\"../pre_generated_primes/primes-to-300k.txt\") as f:\n for line in f:\n three_hundred = [int(i) for i in line.split()]\n primes.extend(three_hundred)\n\n if (high > 300000):\n with open(\"../pre_generated_primes/primes-to-400k.txt\") as f:\n for line in f:\n four_hundred = [int(i) for i in line.split()]\n primes.extend(four_hundred)\n\n if (high > 400000):\n with open(\"../pre_generated_primes/primes-to-500k.txt\") as f:\n for line in f:\n five_hundred = [int(i) for i in line.split()]\n primes.extend(five_hundred)\n\n for x in reversed(range(0, len(primes))):\n if primes[x] > high:\n primes.pop(x)\n else:\n break\n\n return primes",
"def eratosthenes_npo(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit//2, dtype=np.bool)\n for i in range(3, int(limit**0.5)+1, 2):\n if mask[i//2]:\n mask[i*i//2::i] = False\n return np.r_[2, 2*np.nonzero(mask)[0][1::]+1]",
"def getPrimes(start, end):\n # This list will contain every 4-digit prime numbers\n primes = []\n\n for i in range(start, end):\n if isPrime(i):\n primes.append(i)\n return primes",
"def primes(upper_bound):\n global cache\n lower_bound = 2\n prime_set = new_primes(upper_bound, cache, lower_bound)\n prime_set.update(cache)\n cache = prime_set\n\n return prime_set",
"def calculate_prime_numbers(max_number: int) -> list[int]:\n\n is_prime = [True] * max_number\n for i in range(2, isqrt(max_number - 1) + 1):\n if is_prime[i]:\n for j in range(i**2, max_number, i):\n is_prime[j] = False\n\n return [i for i in range(2, max_number) if is_prime[i]]",
"def create_primes(threshold):\n if threshold == 2:\n return [2]\n\n elif threshold < 2:\n return []\n\n numbers = list(range(3, threshold + 1, 2))\n root_of_threshold = threshold**0.5\n half = int((threshold + 1) / 2 - 1)\n idx = 0\n counter = 3\n while counter <= root_of_threshold:\n if numbers[idx]:\n idy = int((counter * counter - 3) / 2)\n numbers[idy] = 0\n while idy < half:\n numbers[idy] = 0\n idy += counter\n idx += 1\n counter = 2 * idx + 3\n return [2] + [number for number in numbers if number]",
"def prime_array(number_of_primes) -> array:\n p = array('i',list(primes(number_of_primes)))\n return p",
"def primesfrom2to(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]",
"def get_prime_array(number_of_primes) -> array:\n p = array('i')\n with open(f'prime{number_of_primes}.bin', 'rb') as prime_file:\n p.fromfile(prime_file, number_of_primes) \n return p",
"def find_truncatable_primes(limit: int, start_from: int):\n\n start_time = time.time()\n truncatable = set()\n next_prime = primes_generator_iterable(start_from)\n\n while len(truncatable) < limit:\n\n prime = next(next_prime)\n if is_truncatable(prime):\n truncatable.add(prime)\n\n result = sum(truncatable)\n print_time_log(start_time, result)\n return result",
"def prime_numbers(limit):\n primes = [2, 3, 5]\n for p in primes:\n yield p\n n = 5\n count = 3\n last_idx = -1\n sqrd_prime = 0\n while count <= limit:\n n += 2\n if n > sqrd_prime:\n last_idx += 1\n sqrd_prime = primes[last_idx] ** 2\n is_prime = True\n for i in range(1, last_idx + 1):\n p = primes[i]\n if n % p == 0:\n is_prime = False\n break\n if is_prime:\n count += 1\n primes.append(n)\n yield n",
"def eratosthenes(upperbound: int) -> list:\n if upperbound < 0 or type(upperbound) != int:\n raise ValueError(\"The value is not valid. The upperbound should be a positive integer.\")\n numbers = list(range(2, upperbound + 1)) # create a list between 0 and the upperbound inclusive\n counter = 0 # begin the counter at 2 as 1 and zero are not prime numbers\n while numbers[counter] < upperbound ** (1/2): # loop thru numbers until it reaches the square root of upperbound\n numbers = remove_multiples(numbers, numbers[counter]) # update numbers by removing multiples of current number\n counter += 1 # move on to the next number to check\n return numbers",
"def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]",
"def primesfrom2to(max):\n sieve = numpy.ones(max // 3 + (max % 6 == 2), dtype=numpy.bool)\n for i in range(1, int(max ** 0.5) // 3 + 1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return numpy.r_[2, 3, ((3 * numpy.nonzero(sieve)[0][1:] + 1) | 1)]",
"def solve(limit):\n upper_limit = ceil(sqrt(limit - 2**4 - 2**3))\n p_list = PrimeList(upper_limit)\n\n num_set = set()\n for x in p_list:\n val = limit - 2**4 - x**3\n if val < 0: continue\n lim = ceil(sqrt(val))\n for y in takewhile(lambda i: i<lim, p_list):\n val = limit - min(x,y)**4 - max(x,y)**3\n if val < 0: continue\n lim = ceil(sqrt(val))\n for z in takewhile(lambda i: i<lim, p_list):\n\n for a,b,c in permutations([x,y,z]):\n ans = a**2 + b**3 + c**4\n if ans > limit: continue\n num_set.add(ans)\n if a ==b and b == c: break\n\n return len(num_set)",
"def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n if n == 1:\n return []\n elif n == 2:\n return []\n elif n == 3:\n return [2]\n elif n == 4:\n return [2, 3]\n elif n == 5:\n return [2, 3]\n sieve = np.ones(n/3 + (n % 6 == 2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return map(int, np.r_[2, 3, ((3*np.nonzero(sieve)[0]+1) | 1)])",
"def primesfrom2to(n):\r\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\r\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\r\n sieve[0] = False\r\n for i in xrange(int(n**0.5)/3+1):\r\n if sieve[i]:\r\n k=3*i+1|1\r\n sieve[ ((k*k)/3) ::2*k] = False\r\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\r\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]",
"def primes_list(n):\n count = 0\n if n <= 7:\n p_list = [2, 3, 5, 7, 11, 13, 17]\n return p_list[:n]\n else:\n upper_bound = int(n * log(n) + n * log(log(n)))\n return primes(upper_bound)[:n]"
] | [
"0.7772032",
"0.75551635",
"0.754886",
"0.71958756",
"0.7041358",
"0.6890954",
"0.6870041",
"0.67170656",
"0.66442776",
"0.6626837",
"0.65599114",
"0.63915014",
"0.6374505",
"0.63370544",
"0.6328696",
"0.6316949",
"0.6307289",
"0.62010443",
"0.6167947",
"0.61109275",
"0.6107259",
"0.60967153",
"0.60713685",
"0.605898",
"0.6058528",
"0.6020613",
"0.6013907",
"0.6006447",
"0.5991404",
"0.5925827"
] | 0.8527422 | 0 |
Takes a limit as an integer and get all the prime numbers in that range, NOT including the limit itself. Returns a numpy array of the primes. | def get_primes_in(limit):
range_limit = np.arange(limit)
prime_mask = np.ones(limit, dtype=bool)
prime_mask[0:2] = False
for i in range_limit[:int(np.sqrt(limit))+1]:
if prime_mask[i]:
prime_mask[2*i::i] = False
return range_limit[prime_mask] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def eratosthenes(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = []\n mask = [1]*(limit+1)\n for i in range(2, limit+1):\n if mask[i]:\n primes.append(i)\n for j in range(i*i, limit+1, i):\n mask[j] = 0\n return np.asarray(primes)",
"def getPrimes(limit): \n a = range(2,int(sqrt(limit)+1))\n isPrime = [True]*limit\n for n in a:\n if isPrime[n]:\n # for all primes, each multiple of prime from prime*prime to the end must not be prime\n for i in xrange(n*n, limit, n): \n isPrime[i] = False\n primes = [i for i in xrange(2,len(isPrime)) if isPrime[i]]\n return primes",
"def sieve(limit):\n primes = []\n\n s = xrange(2, limit + 1)\n while len(s) != 0:\n primes.append(s[0])\n s = [n for n in s if (n % s[0]) != 0]\n\n return primes",
"def eratosthenes_mem(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = [2]\n multiples = [2]\n limit += 1\n for candidate in range(3, limit):\n if candidate not in multiples:\n primes.append(candidate)\n multiples.append(2*candidate)\n for i, m in enumerate(multiples):\n if m <= candidate:\n multiples[i] += primes[i]\n return np.asarray(primes)",
"def get_primes_by_limit_number(self, limit_number):\n if int(limit_number) < 2:\n print \"this method needs number >= 2\"\n return []\n ret = []\n prime = self._generate_prime()\n next = prime.next()\n while next <= limit_number:\n ret.append(next)\n next = prime.next()\n return ret",
"def get_primes_in_big_limit(limit, fragmentation=1):\n print(\"Getting primes...\")\n print(\"Fragmentation set to\", fragmentation)\n fragment_limit = int(np.sqrt(limit))\n fragment_lowest = 0\n fragment_highest = fragment_lowest + fragment_limit\n primes_in_limit = np.array([], dtype=int)\n while fragment_highest < limit:\n if fragment_lowest == 0:\n fragment_highest += 1\n primes_in_first_fragment = get_primes_in(fragment_highest)\n primes_in_limit = np.concatenate([primes_in_limit,\n primes_in_first_fragment],\n axis=None)\n else:\n primes_in_fragment = get_primes_in_fragment(fragment_lowest,\n fragment_highest,\n primes_in_first_fragment\n )\n primes_in_limit = np.concatenate([primes_in_limit,\n primes_in_fragment],\n axis=None)\n fragment_lowest = fragment_highest\n fragment_highest += (fragment_limit * fragmentation)\n primes_in_last_fragment = get_primes_in_fragment(fragment_lowest,\n limit+1,\n primes_in_first_fragment\n )\n return np.concatenate([primes_in_limit, primes_in_last_fragment], axis=None)",
"def gen_primes(limit=10000):\n\n candidates = set(range(2, limit))\n primes = []\n\n while len(candidates) > 0:\n prime = min(candidates)\n primes.append(prime)\n for number in range(prime, limit, prime):\n candidates.discard(number)\n\n return primes",
"def eratosthenes_np(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit+1, dtype=np.bool)\n mask[:2] = False\n for i in range(2, int(np.sqrt(limit))+1):\n if mask[i]:\n mask[i*i::i] = False\n return np.nonzero(mask)[0]",
"def list_primes(limit):\n sieve = [False]*2 + [True] * (limit-2)\n n = 2\n while n <= sqrt(limit):\n if sieve[n]:\n yield n\n for m in xrange(n**2, limit, n): # multiples\n sieve[m] = False # mark multiples as non prime\n n += 1\n while n < limit:\n if sieve[n]:\n yield n\n n += 1",
"def primes(lim):\n limsqrt = ceil(sqrt(lim))\n s = [ True ] * (lim + 1)\n for i in range(2, ceil(sqrt(lim))):\n if s[i]:\n k = 0\n while True:\n l = i * i + k * i\n if l > lim: break\n k += 1\n s[l] = False\n return [i for i in range(2, lim + 1) if s[i]]",
"def eratosthenes_npo(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit//2, dtype=np.bool)\n for i in range(3, int(limit**0.5)+1, 2):\n if mask[i//2]:\n mask[i*i//2::i] = False\n return np.r_[2, 2*np.nonzero(mask)[0][1::]+1]",
"def calculate_prime_numbers(max_number: int) -> list[int]:\n\n is_prime = [True] * max_number\n for i in range(2, isqrt(max_number - 1) + 1):\n if is_prime[i]:\n for j in range(i**2, max_number, i):\n is_prime[j] = False\n\n return [i for i in range(2, max_number) if is_prime[i]]",
"def get_primes_over(limit):\n candidate = 1000000\n count = 0\n while count < limit:\n if is_prime(candidate):\n yield candidate\n count += 1\n candidate += 1\n else:\n candidate += 1",
"def getPrimes(start, end):\n # This list will contain every 4-digit prime numbers\n primes = []\n\n for i in range(start, end):\n if isPrime(i):\n primes.append(i)\n return primes",
"def primes(n_max: int = 100) -> List[int]:\n if n_max < 2:\n raise ValueError\n\n t = list(range(2, n_max + 1))\n for i in t:\n for j in (k for k in t if k > i):\n if j % i == 0:\n t.remove(j)\n\n return sorted(t)",
"def create_primes(threshold):\n if threshold == 2:\n return [2]\n\n elif threshold < 2:\n return []\n\n numbers = list(range(3, threshold + 1, 2))\n root_of_threshold = threshold**0.5\n half = int((threshold + 1) / 2 - 1)\n idx = 0\n counter = 3\n while counter <= root_of_threshold:\n if numbers[idx]:\n idy = int((counter * counter - 3) / 2)\n numbers[idy] = 0\n while idy < half:\n numbers[idy] = 0\n idy += counter\n idx += 1\n counter = 2 * idx + 3\n return [2] + [number for number in numbers if number]",
"def prime_array(number_of_primes) -> array:\n p = array('i',list(primes(number_of_primes)))\n return p",
"def eratosthenes(upperbound: int) -> list:\n if upperbound < 0 or type(upperbound) != int:\n raise ValueError(\"The value is not valid. The upperbound should be a positive integer.\")\n numbers = list(range(2, upperbound + 1)) # create a list between 0 and the upperbound inclusive\n counter = 0 # begin the counter at 2 as 1 and zero are not prime numbers\n while numbers[counter] < upperbound ** (1/2): # loop thru numbers until it reaches the square root of upperbound\n numbers = remove_multiples(numbers, numbers[counter]) # update numbers by removing multiples of current number\n counter += 1 # move on to the next number to check\n return numbers",
"def get_primes(n):\n\n return list(primes_sieve(n))",
"def prime_numpy_version(n: int) -> List[int]:\n arm = range(2, np.floor(n / 2).astype(int) + 1)\n x, y = np.meshgrid(*([arm] * 2))\n\n Z = range(2, n + 1)\n D = x * y\n Diff = np.setdiff1d\n\n P = Diff(Z, D[D <= n].ravel())\n return P.tolist()",
"def primes(n):\n return [i for i in xrange(1, n + 1) if mr_prime(i)]",
"def get_prime_array(high):\n\n # Array of pre-generated primes less than high\n primes = []\n\n with open(\"../pre_generated_primes/primes-to-100k.txt\") as f:\n for line in f:\n hundred = [int(i) for i in line.split()]\n primes.extend(hundred)\n\n if (high > 100000):\n with open(\"../pre_generated_primes/primes-to-200k.txt\") as f2:\n for line in f2:\n two_hundred = [int(i) for i in line.split()]\n primes.extend(two_hundred)\n\n if (high > 200000):\n with open(\"../pre_generated_primes/primes-to-300k.txt\") as f:\n for line in f:\n three_hundred = [int(i) for i in line.split()]\n primes.extend(three_hundred)\n\n if (high > 300000):\n with open(\"../pre_generated_primes/primes-to-400k.txt\") as f:\n for line in f:\n four_hundred = [int(i) for i in line.split()]\n primes.extend(four_hundred)\n\n if (high > 400000):\n with open(\"../pre_generated_primes/primes-to-500k.txt\") as f:\n for line in f:\n five_hundred = [int(i) for i in line.split()]\n primes.extend(five_hundred)\n\n for x in reversed(range(0, len(primes))):\n if primes[x] > high:\n primes.pop(x)\n else:\n break\n\n return primes",
"def sieve(upto):\n return list(prime_numbers(upto))",
"def primesfrom2to(max):\n sieve = numpy.ones(max // 3 + (max % 6 == 2), dtype=numpy.bool)\n for i in range(1, int(max ** 0.5) // 3 + 1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return numpy.r_[2, 3, ((3 * numpy.nonzero(sieve)[0][1:] + 1) | 1)]",
"def primes(upper_bound):\n global cache\n lower_bound = 2\n prime_set = new_primes(upper_bound, cache, lower_bound)\n prime_set.update(cache)\n cache = prime_set\n\n return prime_set",
"def get_primes(maxi):\n\n is_prime = [True] * (maxi + 1)\n \n is_prime[0] = False\n is_prime[1] = False\n # is_prime[2] = True and all other even numbers are not prime\n for i in range(2,maxi+1):\n if is_prime[i]: # if current is prime, set multiples to current not prime\n for j in range(2*i, maxi+1, i):\n is_prime[j] = False\n\n return is_prime",
"def make_sieve(upper):\n\n if upper <= 0:\n return []\n\n sieve = [True for i in range(upper + 1)]\n limit = math.floor(math.sqrt(upper))\n sieve[0], sieve[1] = False, False\n\n for i in range(2, limit + 1):\n if sieve[i]:\n for j in range(i * 2, upper + 1, i):\n sieve[j] = False\n\n primes = []\n for num, is_prime in enumerate(sieve):\n if is_prime:\n primes.append(num)\n\n return primes",
"def primesfrom2to(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]",
"def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n if n == 1:\n return []\n elif n == 2:\n return []\n elif n == 3:\n return [2]\n elif n == 4:\n return [2, 3]\n elif n == 5:\n return [2, 3]\n sieve = np.ones(n/3 + (n % 6 == 2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return map(int, np.r_[2, 3, ((3*np.nonzero(sieve)[0]+1) | 1)])",
"def get_probable_prime(n: int) -> [int]:\n return [6*n-1, 6*n+1]"
] | [
"0.80535877",
"0.7970838",
"0.7566367",
"0.75646406",
"0.74854904",
"0.7452761",
"0.73869497",
"0.7217808",
"0.72028553",
"0.7137117",
"0.69901395",
"0.69098914",
"0.6839922",
"0.66989404",
"0.6691934",
"0.6605368",
"0.65635985",
"0.64167565",
"0.6413339",
"0.6396981",
"0.63700086",
"0.6362434",
"0.63288206",
"0.6313341",
"0.63128763",
"0.62797415",
"0.62711865",
"0.6261318",
"0.6250775",
"0.6234691"
] | 0.846045 | 0 |
Takes fragment lowest and highest limits as an integers and get all the prime numbers in that range, NOT including the limit itself. Returns a numpy array of the primes. Needs the primes from the first fragment of the program as input. | def get_primes_in_fragment(fragment_lowest, fragment_highest,
primes_in_first_fragment):
fragment_range = np.arange(fragment_lowest, fragment_highest)
prime_mask = np.ones(len(fragment_range), dtype=bool)
for p in primes_in_first_fragment:
if fragment_lowest % p == 0:
first_multiple = fragment_lowest // p
else:
first_multiple = fragment_lowest // p + 1
first_multiple_index = first_multiple * p - fragment_lowest
prime_mask[first_multiple_index::p] = False
return fragment_range[prime_mask] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_primes_in_big_limit(limit, fragmentation=1):\n print(\"Getting primes...\")\n print(\"Fragmentation set to\", fragmentation)\n fragment_limit = int(np.sqrt(limit))\n fragment_lowest = 0\n fragment_highest = fragment_lowest + fragment_limit\n primes_in_limit = np.array([], dtype=int)\n while fragment_highest < limit:\n if fragment_lowest == 0:\n fragment_highest += 1\n primes_in_first_fragment = get_primes_in(fragment_highest)\n primes_in_limit = np.concatenate([primes_in_limit,\n primes_in_first_fragment],\n axis=None)\n else:\n primes_in_fragment = get_primes_in_fragment(fragment_lowest,\n fragment_highest,\n primes_in_first_fragment\n )\n primes_in_limit = np.concatenate([primes_in_limit,\n primes_in_fragment],\n axis=None)\n fragment_lowest = fragment_highest\n fragment_highest += (fragment_limit * fragmentation)\n primes_in_last_fragment = get_primes_in_fragment(fragment_lowest,\n limit+1,\n primes_in_first_fragment\n )\n return np.concatenate([primes_in_limit, primes_in_last_fragment], axis=None)",
"def get_primes_in(limit):\n range_limit = np.arange(limit)\n prime_mask = np.ones(limit, dtype=bool)\n prime_mask[0:2] = False\n for i in range_limit[:int(np.sqrt(limit))+1]:\n if prime_mask[i]:\n prime_mask[2*i::i] = False\n return range_limit[prime_mask]",
"def eratosthenes(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = []\n mask = [1]*(limit+1)\n for i in range(2, limit+1):\n if mask[i]:\n primes.append(i)\n for j in range(i*i, limit+1, i):\n mask[j] = 0\n return np.asarray(primes)",
"def getPrimes(start, end):\n # This list will contain every 4-digit prime numbers\n primes = []\n\n for i in range(start, end):\n if isPrime(i):\n primes.append(i)\n return primes",
"def eratosthenes_mem(limit):\n if isinstance(limit, (int, float)) and limit == int(limit):\n limit = int(limit)\n else:\n raise ValueError\n primes = [2]\n multiples = [2]\n limit += 1\n for candidate in range(3, limit):\n if candidate not in multiples:\n primes.append(candidate)\n multiples.append(2*candidate)\n for i, m in enumerate(multiples):\n if m <= candidate:\n multiples[i] += primes[i]\n return np.asarray(primes)",
"def getPrimes(limit): \n a = range(2,int(sqrt(limit)+1))\n isPrime = [True]*limit\n for n in a:\n if isPrime[n]:\n # for all primes, each multiple of prime from prime*prime to the end must not be prime\n for i in xrange(n*n, limit, n): \n isPrime[i] = False\n primes = [i for i in xrange(2,len(isPrime)) if isPrime[i]]\n return primes",
"def primes(lim):\n limsqrt = ceil(sqrt(lim))\n s = [ True ] * (lim + 1)\n for i in range(2, ceil(sqrt(lim))):\n if s[i]:\n k = 0\n while True:\n l = i * i + k * i\n if l > lim: break\n k += 1\n s[l] = False\n return [i for i in range(2, lim + 1) if s[i]]",
"def sieve(limit):\n primes = []\n\n s = xrange(2, limit + 1)\n while len(s) != 0:\n primes.append(s[0])\n s = [n for n in s if (n % s[0]) != 0]\n\n return primes",
"def primes(upper_bound):\n global cache\n lower_bound = 2\n prime_set = new_primes(upper_bound, cache, lower_bound)\n prime_set.update(cache)\n cache = prime_set\n\n return prime_set",
"def calculate_prime_numbers(max_number: int) -> list[int]:\n\n is_prime = [True] * max_number\n for i in range(2, isqrt(max_number - 1) + 1):\n if is_prime[i]:\n for j in range(i**2, max_number, i):\n is_prime[j] = False\n\n return [i for i in range(2, max_number) if is_prime[i]]",
"def gen_primes(limit=10000):\n\n candidates = set(range(2, limit))\n primes = []\n\n while len(candidates) > 0:\n prime = min(candidates)\n primes.append(prime)\n for number in range(prime, limit, prime):\n candidates.discard(number)\n\n return primes",
"def get_prime_array(high):\n\n # Array of pre-generated primes less than high\n primes = []\n\n with open(\"../pre_generated_primes/primes-to-100k.txt\") as f:\n for line in f:\n hundred = [int(i) for i in line.split()]\n primes.extend(hundred)\n\n if (high > 100000):\n with open(\"../pre_generated_primes/primes-to-200k.txt\") as f2:\n for line in f2:\n two_hundred = [int(i) for i in line.split()]\n primes.extend(two_hundred)\n\n if (high > 200000):\n with open(\"../pre_generated_primes/primes-to-300k.txt\") as f:\n for line in f:\n three_hundred = [int(i) for i in line.split()]\n primes.extend(three_hundred)\n\n if (high > 300000):\n with open(\"../pre_generated_primes/primes-to-400k.txt\") as f:\n for line in f:\n four_hundred = [int(i) for i in line.split()]\n primes.extend(four_hundred)\n\n if (high > 400000):\n with open(\"../pre_generated_primes/primes-to-500k.txt\") as f:\n for line in f:\n five_hundred = [int(i) for i in line.split()]\n primes.extend(five_hundred)\n\n for x in reversed(range(0, len(primes))):\n if primes[x] > high:\n primes.pop(x)\n else:\n break\n\n return primes",
"def primesfrom2to(max):\n sieve = numpy.ones(max // 3 + (max % 6 == 2), dtype=numpy.bool)\n for i in range(1, int(max ** 0.5) // 3 + 1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return numpy.r_[2, 3, ((3 * numpy.nonzero(sieve)[0][1:] + 1) | 1)]",
"def linear_sieve(max_n):\n smallest_factors = [0] * max_n\n primes = []\n\n for i in range(2, max_n):\n if smallest_factors[i] == 0:\n smallest_factors[i] = i\n primes.append(i)\n\n for p in primes:\n if p > smallest_factors[i] or i * p >= max_n:\n break\n smallest_factors[i * p] = p\n return primes, smallest_factors",
"def eratosthenes(upperbound: int) -> list:\n if upperbound < 0 or type(upperbound) != int:\n raise ValueError(\"The value is not valid. The upperbound should be a positive integer.\")\n numbers = list(range(2, upperbound + 1)) # create a list between 0 and the upperbound inclusive\n counter = 0 # begin the counter at 2 as 1 and zero are not prime numbers\n while numbers[counter] < upperbound ** (1/2): # loop thru numbers until it reaches the square root of upperbound\n numbers = remove_multiples(numbers, numbers[counter]) # update numbers by removing multiples of current number\n counter += 1 # move on to the next number to check\n return numbers",
"def create_primes(threshold):\n if threshold == 2:\n return [2]\n\n elif threshold < 2:\n return []\n\n numbers = list(range(3, threshold + 1, 2))\n root_of_threshold = threshold**0.5\n half = int((threshold + 1) / 2 - 1)\n idx = 0\n counter = 3\n while counter <= root_of_threshold:\n if numbers[idx]:\n idy = int((counter * counter - 3) / 2)\n numbers[idy] = 0\n while idy < half:\n numbers[idy] = 0\n idy += counter\n idx += 1\n counter = 2 * idx + 3\n return [2] + [number for number in numbers if number]",
"def get_primes_list(start, end):\r\n primes_list_obj = PrimesList(start, end)\r\n primes_list = primes_list_obj.primes_list()\r\n return primes_list",
"def get_primes_in(self, grange):\n for n in grange:\n if self.is_prime(n):\n yield n",
"def primes(n_max: int = 100) -> List[int]:\n if n_max < 2:\n raise ValueError\n\n t = list(range(2, n_max + 1))\n for i in t:\n for j in (k for k in t if k > i):\n if j % i == 0:\n t.remove(j)\n\n return sorted(t)",
"def get_primes(lower: int, upper: int) -> typing.Generator[int, None, None]:\r\n for num in range(lower, upper + 1):\r\n if num > 1:\r\n for i in range(2, int(math.sqrt(num)) + 1):\r\n if num % i == 0:\r\n break\r\n else:\r\n yield num",
"def get_primes_by_limit_number(self, limit_number):\n if int(limit_number) < 2:\n print \"this method needs number >= 2\"\n return []\n ret = []\n prime = self._generate_prime()\n next = prime.next()\n while next <= limit_number:\n ret.append(next)\n next = prime.next()\n return ret",
"def prime_array(number_of_primes) -> array:\n p = array('i',list(primes(number_of_primes)))\n return p",
"def eratosthenes_np(limit):\n if isinstance(limit, (int, float)):\n limit = int(limit)\n else:\n raise ValueError\n mask = np.ones(limit+1, dtype=np.bool)\n mask[:2] = False\n for i in range(2, int(np.sqrt(limit))+1):\n if mask[i]:\n mask[i*i::i] = False\n return np.nonzero(mask)[0]",
"def mult_parities_python(bound, verbose=False):\n v = [None] * bound\n v[0] = None\n v[1] = int(0)\n P = [int(p) for p in prime_range(bound)]\n for p in P:\n v[p] = int(1)\n last = P\n last_parity = int(1)\n loops = floor(log(bound, 2)) + 1\n bound = int(bound)\n for k in range(loops):\n cur = []\n cur_parity = (last_parity + int(1)) % int(2)\n if verbose:\n print(\"loop {0} (of {1}); last = {2}\".format(k, loops, len(last)))\n for n in last:\n for p in P:\n m = n * p\n if m >= bound:\n break\n if v[m] is None:\n v[m] = cur_parity\n cur.append(m)\n last_parity = cur_parity\n last = cur\n return v",
"def primi(n):\n numVec = []\n for x in range(n-1):\n numVec.append(x+2)\n for num in numVec[:(n//2-1)]:\n if numVec[num-2] != 0:\n numVec[slice(2*num-2, n-1, num)] = [0]*(n//num-1)\n numVec = [x for x in numVec if x!=0]\n return numVec",
"def list_primes(limit):\n sieve = [False]*2 + [True] * (limit-2)\n n = 2\n while n <= sqrt(limit):\n if sieve[n]:\n yield n\n for m in xrange(n**2, limit, n): # multiples\n sieve[m] = False # mark multiples as non prime\n n += 1\n while n < limit:\n if sieve[n]:\n yield n\n n += 1",
"def get_primes(s):\n primes = bytearray([1] * s)\n for i in range(2, s):\n if primes[i] == 1:\n for j in range(i, s):\n if i * j < s:\n primes[i * j] = 0\n else:\n break\n return primes",
"def get_primes(maxi):\n\n is_prime = [True] * (maxi + 1)\n \n is_prime[0] = False\n is_prime[1] = False\n # is_prime[2] = True and all other even numbers are not prime\n for i in range(2,maxi+1):\n if is_prime[i]: # if current is prime, set multiples to current not prime\n for j in range(2*i, maxi+1, i):\n is_prime[j] = False\n\n return is_prime",
"def make_sieve(upper):\n\n if upper <= 0:\n return []\n\n sieve = [True for i in range(upper + 1)]\n limit = math.floor(math.sqrt(upper))\n sieve[0], sieve[1] = False, False\n\n for i in range(2, limit + 1):\n if sieve[i]:\n for j in range(i * 2, upper + 1, i):\n sieve[j] = False\n\n primes = []\n for num, is_prime in enumerate(sieve):\n if is_prime:\n primes.append(num)\n\n return primes",
"def primesfrom2to(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]"
] | [
"0.7555909",
"0.745268",
"0.72320384",
"0.6928359",
"0.6911003",
"0.6905713",
"0.67729235",
"0.6719026",
"0.6628001",
"0.66014606",
"0.65840936",
"0.6533467",
"0.6412444",
"0.64070624",
"0.64064884",
"0.63762397",
"0.63470197",
"0.6316131",
"0.6301665",
"0.6293129",
"0.62916404",
"0.628601",
"0.62382615",
"0.6158372",
"0.6149993",
"0.6138645",
"0.60854316",
"0.60767126",
"0.60731333",
"0.60654074"
] | 0.80316025 | 0 |
Takes a tuple where the first element is the dividend and the second element is the divisor. Both element sould be int. Performs a long division | def long_division(dividend_divisor_tuple, decimal_limit=5):
natural, decimal = [], []
dividend, divisor = dividend_divisor_tuple[0], dividend_divisor_tuple[1]
assert isinstance(dividend, int), "Dividend not int"
assert isinstance(divisor, int), "Divisor not int"
floor_div = dividend // divisor
rest = dividend % divisor
# Natural part of the division
while floor_div > 0:
natural.append(str(floor_div))
dividend = rest
floor_div = dividend // divisor
rest = dividend % divisor
if rest == 0: # Divisor is factor of dividend
print("Divisor is factor of dividend")
return ("".join(natural), None, None)
# Decimal part of the division
dividend_list = []
recurring_index = None
while len(decimal) < decimal_limit:
dividend_list.append(dividend)
dividend *= 10
floor_div = dividend // divisor
decimal.append(str(floor_div))
rest = dividend % divisor
if rest == 0: # Terminating decimal reached
return ("".join(natural), "".join(decimal), None)
elif rest in dividend_list: # Recurring cycle found
recurring_index = dividend_list.index(rest)
print("Recurring cycle found")
break
else:
dividend = rest
if recurring_index is not None:
recurring = decimal[recurring_index:]
decimal = decimal[:recurring_index]
return ("".join(natural), "".join(decimal), "".join(recurring))
else:
print("Decimal limit reached")
return ("".join(natural), "".join(decimal), None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def div_numbers(a: int, b: int) -> int:\n return a / b",
"def test_div():\n l = [1, 2, 3, 4]\n assert s7.div(*l) == 1 / 2 / 3 / 4\n assert s7.div(100, 20) == 5\n assert s7.div(100.0, 20) == 5.0\n assert s7.div(100, 20.0) == 5.0",
"def div(seq):\n for (i, n) in enumerate(seq):\n # try dividing this number with all others\n # (in fact, we can only consider the subsequent numbers,\n # and check the ratio both ways)\n for j in range(i+1, len(seq)):\n ratio1 = seq[j] / seq[i]\n ratio2 = seq[i] / seq[j]\n for result in [ratio1, ratio2]:\n # is the result an integer? if so, done\n if is_int(result):\n return int(result)",
"def division_algo(a, b):\n return a / b, a % b",
"def kkDiv(*args):\n if (None in args):\n return None\n quot = float(args[0]) / float(args[1])\n if (quot > 1):\n return quot\n else:\n return 1/quot",
"def divider_ref(dividend, divisor):\n rom_size = 2**8\n rom = [0 for _ in range(rom_size)]\n rom = [0] + [int(round(((2**16)-1)/float(ii)))\n for ii in range(1, rom_size)]\n rom = tuple(rom)\n divisor_reciprocal = rom[divisor]\n if dividend < 0:\n dividend_d1 = -dividend\n else:\n dividend_d1 = dividend\n mult = (dividend_d1 * divisor_reciprocal)\n mult_s = mult/(2**16)\n if dividend < 0:\n mult_s = -mult_s\n round_ = int((mult/(2**15)) % 2)\n if round_ == 1:\n if dividend >= 0:\n mult_s = mult_s + 1\n else:\n mult_s = int(mult_s - 1)\n return int(mult_s)",
"def dividir(value, arg):\n return int(value) /int(arg)",
"def div(a,b):\r\n return a/b",
"def finddiv(x):\r\n \r\n div = (1, x)\r\n for i in range(2, x//2+1):\r\n if x%i==0:\r\n div+=(i,)\r\n return div",
"def longDiv(c, e, n):\n if n == 0:\n return Scientific(c, e)\n else:\n # TODO: Use a logarithm here!\n # TODO: Can't use tail recursion like this in python!\n if n < d:\n return longDiv(c * 10, e - 1, n * 10)\n else:\n (q, r) = quotRemInteger(n, d)\n return longDiv(c+q, e, r)",
"def division(a, b):\n return (a // b, a / b)",
"def div(self, a, b):\n return (a / b, a % b)",
"def div(x, y):\n return x / y",
"def lcm(a: int, b: int) -> int:\n return (a * b) // gcd(a, b)",
"def int_div_inplace(a, b):",
"def multiple(a, b):\n from fractions import gcd\n def lcm(x,y):\n \treturn (x*y)//gcd(x,y)\n #return lcm(a,b)\n \n def gcd(x,y):\n if y > x:\n x, y = y, x\n while y != 0:\n x, y = y, x % y\n return x\n return (a*b) // gcd(a,b)",
"def _lcm_f(a, b):\n return int((a * b) / _gcd_f(a, b))",
"def divide(self, dividend, divisor):\n MAX_INT = 0x7FFFFFFF\n MIN_INT = 0x80000000\n\n if divisor == 0:\n return MAX_INT\n\n sign = 1 if dividend > 0 and divisor > 0 or dividend < 0 and divisor < 0 else -1\n dividend, divisor = abs(dividend), abs(divisor)\n res = 0\n while dividend >= divisor:\n pow2 = 1\n tmp = divisor\n while dividend >= tmp:\n tmp <<= 1\n pow2 <<= 1\n tmp >>= 1\n pow2 >>= 1\n dividend -= tmp\n res += pow2\n \n res = sign * res\n return res if res <= MAX_INT else MAX_INT",
"def divide(num):\n return (int(num / 2))",
"def divisors(n):\n return tuple(_divisor_gen(n))",
"def divide(*args):\n body = ['<h1>Divison Calculator</h1>']\n try:\n quotient = reduce(lambda x,y: x / y, map(int,args))\n body.append(f'Total equals: {quotient}')\n except ZeroDivisionError:\n raise ZeroDivisionError\n return '\\n'.join(body)",
"def divide(self, dividend: int, divisor: int) -> int:\n sig = (dividend < 0) == (divisor < 0)\n a, b, res = abs(dividend), abs(divisor), 0\n while a >= b:\n shift = 0\n while a >= b << (shift + 1):\n print(a, res)\n shift += 1\n res += 1 << shift\n a -= b << shift\n return min(res if sig else -res, (1 << 31) - 1)",
"def ceil_intdiv(a, b):\r\n # If a and b are int with not many significant bits, we could\r\n # cast them to float to avoid doing the modulo. We do not know if this\r\n # is faster or not. But this is not safe for int64 as the cast will\r\n # lose precision.\r\n # e.g.: cast(cast(a, scalar.upcast(a, 'float32')) / b, scal.upcast(a, b))\r\n\r\n # We cast for the case when a and b are uint*. Otherwise neq will\r\n # force their upcast to int.\r\n div = int_div(a, b)\r\n ret = cast(neq(a % b, 0), div.dtype) + div\r\n assert ret.dtype == scal.upcast(div.owner.inputs[0], div.owner.inputs[1])\r\n return ret",
"def divide(*args):\n\n # TODO: Fill sum with the correct value, based on the\n # args provided.\n quotient = str(args[0] / args[1])\n return quotient",
"def div(a: Decimal, b: Decimal) -> Decimal:\n return a / b",
"def division(a, b):\n if b != 0:\n return a//b",
"def lcm(a: int, b: int):\n return (a * b) // euclid(a, b)",
"def div_value(self, lv, rv):",
"def div(a, b):\n a = float(a)\n b = float(b)\n return a / b",
"def ceildiv(a, b):\n return - (-a // b)"
] | [
"0.65800995",
"0.6371114",
"0.63462335",
"0.6256875",
"0.6244074",
"0.62214583",
"0.6192333",
"0.61851525",
"0.6185146",
"0.61841047",
"0.6174636",
"0.6085751",
"0.6082528",
"0.603412",
"0.6029013",
"0.6028425",
"0.6017244",
"0.6011632",
"0.60035694",
"0.59994143",
"0.5984466",
"0.598289",
"0.59781003",
"0.596462",
"0.5962144",
"0.5948469",
"0.5946427",
"0.5941916",
"0.5941576",
"0.59414226"
] | 0.6508378 | 1 |
Get length of number in digits. | def get_number_length(number):
return len(str(number)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ndigits(n):\n return len(str(abs(n)))",
"def count_digits(n):\n return len(str(n))",
"def _number_of_digits(number: int) -> int:\n return int(log10(number)) + 1",
"def get_int_width(integer):\n return len(str(integer))",
"def get_length(x):\n\n try:\n return int(x)\n except Exception:\n return len(x)",
"def getLength(self):\n return self.n",
"def count_digits(n):\n count = 0\n n=abs(n)\n while n!=0:\n count += 1\n n = n // 10\n return count",
"def count_digits(num):\n total = 0\n while num is not 0:\n total += num % 10\n num //= 10\n return total",
"def get_length(val):\n if isinstance(val, str):\n return len(val)\n if isinstance(val, int):\n return len('%8s' % val)\n if isinstance(val, float):\n return len('%15.4f' % val)\n if isinstance(val, bool):\n return 5",
"def get_length(self):\n\n return self._length",
"def digit_count(x):\n return int(math.floor(math.log10(x)) + 1)",
"def get_length(self):\n return self._length",
"def get_length(self):\n return self._length",
"def get_length(self):\n\n return self.length",
"def length(self) -> 'int':\n return self._info.len",
"def _get_length(self):\n return self._length",
"def length(n):\n a, b, c = n / 100, (n % 100) / 10 * 10, n % 10\n total = 0\n if a:\n total += num_to_len[a] + len('hundred')\n if b or c:\n total += len('and')\n if b == 10:\n total += num_to_len[b+c]\n else :\n if b > 1:\n total += num_to_len[b]\n if c:\n total += num_to_len[c]\n return total",
"def getLength(self) -> float:\n return self.length",
"def length(self):\n return self.n * self.t.length()",
"def _get_length(self):\n from math import sqrt\n\n if self._length is None:\n sum1 = 0\n for a in self.diff:\n sum1 += a * a\n self._length = sqrt(sum1)\n return self._length",
"def length(self):\n\t\treturn self.n",
"def nr_digits_number(armstrong_candidate: int) -> int:\n number_of_digits = 0\n while armstrong_candidate != 0:\n armstrong_candidate = int(armstrong_candidate / 10)\n number_of_digits += 1\n\n return number_of_digits",
"def length(value):\n\n # Try to return the length\n return len(value)",
"def get_long_len(nums):\n return len(str(max(nums + [sum(nums)])))",
"def length(self):\n return self._info.length # pylint: disable=E1101",
"def total_length(self):\n return abs(self.length)",
"def num_digits(num):\r\n if num == 0:\r\n return 1\r\n return int(log10(num)+1)",
"def bitlen(number):\n assert(isinstance(number, int))\n if number == 0:\n return 1\n else:\n return floor(log2(number)) + 1",
"def getLength(self):\n return self.length",
"def length(self) -> ir.IntegerValue:\n return ops.MapLength(self).to_expr()"
] | [
"0.7974036",
"0.78436166",
"0.75876623",
"0.72771937",
"0.68577904",
"0.68538064",
"0.677399",
"0.6735333",
"0.6720063",
"0.6719415",
"0.6715743",
"0.6707653",
"0.6707653",
"0.6627682",
"0.6612727",
"0.65931964",
"0.6514543",
"0.64946806",
"0.6464933",
"0.6453461",
"0.6445275",
"0.6431447",
"0.6412844",
"0.6403521",
"0.6393142",
"0.6386502",
"0.63793707",
"0.63760835",
"0.6364863",
"0.6319634"
] | 0.8823181 | 0 |
opens the chosen exceldocument and returns it as sheet | def get_excel(exceldocument):
sheet = xlrd.open_workbook(exceldocument).sheet_by_index(0)
return sheet | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def openExcelSheet(outputFileName):\n workbook = Workbook()\n worksheet = workbook.add_sheet(\"Sheet 1\")\n return workbook, worksheet",
"def _get_spreadsheet(i):\n path = io_mgr.get_parties_spreadsheet(i)\n if not os.path.exists(path):\n raise IOError()\n\n return openpyxl.load_workbook(path, read_only=True)",
"def open_workbook(self, workbook):\n mylog.debug('Opening workbook %s' % workbook)\n workbook = openpyxl.load_workbook(filename = workbook)\n return workbook",
"def open_xlsx_file(filepath, mode=\"rb\"):\n archive = XslxFile(filepath)\n archive.open()\n return archive",
"def _read_xls(self, options, datas):\n book = xlrd.open_workbook(file_contents=datas)\n return self._read_xls_book(book)",
"def open( self, filename ):\r\n #http://www.oooforum.org/forum/viewtopic.phtml?t=35344\r\n properties = []\r\n properties.append( OpenOfficeDocument._makeProperty( 'Hidden', True ) ) \r\n properties = tuple( properties )\r\n self.oodocument = self.openoffice.loadComponentFromURL( uno.systemPathToFileUrl( os.path.abspath( filename ) ), \"_blank\", 0, properties )",
"def openExcelSheet(outputFileName):\n\tworkbook = Workbook(encoding='utf-8')\n\tworksheet = workbook.add_sheet(\"Sheet 1\")\n\tworksheet.col(0).width = 8000\n\tworksheet.col(1).width = 3000\n\tworksheet.col(2).width = 6000\n\tworksheet.col(3).width = 6000\n\tworksheet.col(4).width = 6000\n\tworksheet.col(5).width = 15000\n\tworksheet.col(6).width = 15000\n\tworksheet.col(7).width = 15000\n\tworksheet.col(8).width = 6000\n\tworksheet.col(9).width = 15000\n\tworksheet.col(10).width = 15000\n\tworksheet.col(11).width = 6000\n\tworksheet.col(12).width = 6000\n\tworksheet.col(13).width = 10000\n\tworksheet.col(14).width = 6000\n\treturn workbook, worksheet",
"def handle(self):\n return pandas.ExcelFile(str(self.source))",
"def lectxl(NOM):\n #NOM=input(\"nom du fichier:\")#interactif\n #NOM=str(NOM +\".xlsx\")\n workbook = xlrd.open_workbook(NOM)\n SheetNameList = workbook.sheet_names()\n worksheet = workbook.sheet_by_name(SheetNameList[0])\n num_rows = worksheet.nrows \n f=[NOM]\n for curr_row in range(0,num_rows):\n row = worksheet.row(curr_row)\n f.append(row)\n return f",
"def load_data(fname='SeatTest_New.xlsx'):\n return pd.ExcelFile(fname)",
"def get_sheet(excel_fname, sheet_name=None):\r\n book = xlrd.open_workbook(excel_fname)\r\n\r\n if sheet_name:\r\n\r\n if sheet_name in book.sheet_names():\r\n sheet = book.sheet_by_name(sheet_name)\r\n return sheet\r\n else:\r\n print(\"ERROR: Sheet '{0}' cannot be found in workbook '{1}'\".format(\r\n sheet_name, excel_fname))\r\n sys.exit(1)\r\n\r\n else:\r\n # Get the first worksheet.\r\n sheet = book.sheet_by_index(0)\r\n return sheet",
"def open_file(path):\n book = xlrd.open_workbook(path)\n # print number of sheets\n #print book.nsheets\n # print sheet names\n #print book.sheet_names()\n # get the first worksheet\n first_sheet = book.sheet_by_index(0)\n # read a row\n #print first_sheet.row_values(0)\n # read a cell\n cell = first_sheet.cell(1,0)\n #print cell\n #print cell.value\n # read a row slice\n #print first_sheet.row_slice(rowx=0,start_colx=0,end_colx=2)\n\n \"\"\"\n if Junipter.search_junipter_rule(first_sheet,1) == 0:\n print \"Juniper rule doesn't match\"\n else:\n print \"Juniper rule match\"\n \"\"\"\n\n \"\"\"\n if Mitac.search_mitac_rule(first_sheet,1) == 0:\n print \"Mitac rule doesn't match\"\n else:\n print \"Mitac rule match\"\n \"\"\"\n\n if Fabrinet.search_fabrinet_rule(first_sheet,3) == 0:\n print \"fabrinet rule doesn't match\"\n else:\n print \"fabrinet rule match\"",
"def fromxlsx(filename, sheet=None, range=None, **kwargs):\n \n return XLSXView(filename, sheet=sheet, range=range, **kwargs)",
"def get_workbook(path):\n wb = openpyxl.load_workbook(path, read_only=True)\n return wb",
"def load_sheet(sheet_name):\n workbook_path = get_workbook_path()\n wb = openpyxl.load_workbook(workbook_path)\n sheet_obj = wb[sheet_name]\n return sheet_obj, wb",
"def test_open_order_sheet(self):\n order_processor = OrderProcessor()\n order_processor.open_order_sheet('COMP_3522_A4_orders.xlsx')\n self.assertTrue(self, isinstance(order_processor.orders_data_frame,\n DataFrame))",
"def get_worksheet(self, workbook):\n for worksheet_name in workbook.sheet_names():\n return workbook.sheet_by_name(worksheet_name)",
"def generate_spreadsheet(request, id):\n election = get_object_or_404(Election, pk=id)\n response = render_to_response(\"django_elect/spreadsheet.html\", {\n 'full_stats': election.get_full_statistics(),\n })\n filename = \"election%s.xls\" % (election.pk)\n response['Content-Disposition'] = 'attachment; filename='+filename\n response['Content-Type'] = 'application/vnd.ms-excel; charset=utf-8'\n return response",
"def login_open_sheet(oauth_key_file, spreadsheet):\n try:\n scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\n credentials = ServiceAccountCredentials.from_json_keyfile_name(oauth_key_file, scope)\n gc = gspread.authorize(credentials)\n worksheet = gc.open(spreadsheet).sheet1 # pylint: disable=redefined-outer-name\n return worksheet\n except Exception as ex: # pylint: disable=bare-except, broad-except\n print('Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, \\\n and make sure spreadsheet is shared to the client_email address in the OAuth .json file!')\n print('Google sheet login failed with error:', ex)\n sys.exit(1)",
"def OpenFileExcel(self, *args, **kwargs):\n directory = None\n if kwargs is not None:\n for key, value in kwargs.items():\n if key == 'directory':\n directory = value\n\n\n\n with wx.FileDialog(self, \"Open report file\", directory,\n wildcard=\"excel files (*.xlsx)|*.xlsx|(*.xls)|*.xlsx|(*.csv)|*.csv\",\n style=wx.FD_OPEN) as fileDialog:\n \n if fileDialog.ShowModal() == wx.ID_CANCEL:\n return \n\n\n else:\n\n pathname = fileDialog.GetPath()\n print('the file to be opened is :'+ pathname)\n\n def openWorkbook(xlapp, xlfile):\n try:\n xlwb = xlapp.Workbooks(xlfile)\n except Exception as e:\n try:\n xlwb = xlapp.Workbooks.Open(xlfile)\n except Exception as e:\n print(e)\n xlwb = None\n return (xlwb)\n\n pathname = os.path.normcase(pathname)\n\n\n try:\n excel = win32.gencache.EnsureDispatch('Excel.Application')\n wb = openWorkbook(excel, pathname)\n #ws = wb.Worksheets('Sheet1')\n excel.Visible = True\n except Exception as e:\n print(e)\n\n finally:\n # RELEASES RESOURCES\n ws = None\n wb = None\n excel = None",
"def creaXl(nombre):\r\n return xlw.Workbook(nombre)",
"def to_spreadsheet(self) -> sc.Spreadsheet:\n\n f, wb = self.to_workbook()\n wb.close() # Close the workbook to flush any xlsxwriter content\n spreadsheet = sc.Spreadsheet(f) # Wrap it in a spreadsheet instance\n return spreadsheet",
"def _read_workbook_2007(maldoc):\n\n # Read in the 2007+ cells.\n color_print.output('g', \"Analyzing Excel 2007+ file ...\")\n workbook_info = XLM.excel2007.read_excel_2007_XLM(maldoc) \n color_print.output('g', \"Extracted XLM from ZIP archive.\")\n if (workbook_info is None):\n return (None, None, None)\n if (len(workbook_info) == 0):\n color_print.output('y', \"WARNING: No XLM macros found.\")\n return (None, None, None)\n\n if debug:\n print(\"=========== START 2007+ CONTENTS ==============\")\n for sheet in workbook_info.keys():\n print(\"\\n------\")\n print(sheet)\n print(\"\")\n for c in workbook_info[sheet].keys():\n print(str(c) + \" ---> \" + str(workbook_info[sheet][c]))\n print(\"=========== DONE 2007+ CONTENTS ==============\")\n \n # Figure out which sheet probably has the XLM macros.\n xlm_sheet_name = None\n max_formulas = -1\n for sheet in workbook_info.keys():\n if (len(workbook_info[sheet]) > max_formulas):\n max_formulas = len(workbook_info[sheet])\n xlm_sheet_name = sheet\n\n # Parse each formula and add it to a sheet object.\n xlm_cells = {}\n for cell_index in workbook_info[xlm_sheet_name].keys():\n\n # Value only cell?\n row = cell_index[0]\n col = cell_index[1]\n if (row not in xlm_cells):\n xlm_cells[row] = {}\n raw_formula = workbook_info[xlm_sheet_name][cell_index][0]\n if (raw_formula is None):\n\n # Do we have a value?\n formula_val = workbook_info[xlm_sheet_name][cell_index][1]\n if (formula_val is not None):\n\n # Just save the value in the cell.\n xlm_cells[row][col] = formula_val\n continue\n \n # Parse the formula into an XLM object.\n formula_str = b\"=\" + raw_formula\n formula = XLM.ms_stack_transformer.parse_ms_xlm(formula_str)\n\n # Set the value of the formula if we know it.\n formula_val = workbook_info[xlm_sheet_name][cell_index][1]\n if (formula_val is not None):\n formula.value = formula_val\n\n # Save the XLM object.\n formula.update_cell_id(cell_index)\n xlm_cells[row][col] = formula\n color_print.output('g', \"Parsed MS XLM macros.\")\n \n # Merge the XLM cells with the value cells into a single unified spereadsheet\n # object.\n workbook, xlm_cell_indices, xlm_sheet = _merge_XLM_cells(maldoc, xlm_cells)\n if (workbook is None):\n color_print.output('r', \"ERROR: Merging XLM cells failed. Emulation aborted.\")\n return (None, None, None)\n \n # Done.\n return (workbook, xlm_cell_indices, xlm_sheet)",
"def openDoc (self):\n fileName = QFileDialog.getOpenFileName(self,\n self.tr(\"Open File\"), \"\", \"All documents (*.%s;*.%s;*.%s;*.%s;*.%s;*.%s;*.%s);;Tests abstract (*.%s);;Tests unit (*.%s);;Tests suite (*.%s);;Tests plan (*.%s);;Tests global (*.%s);;Tests config (*.%s);;Tests data (*.%s)\" %\n ( TestAbstract.TYPE, TestUnit.TYPE, TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE, TestData.TYPE, \n TestAbstract.TYPE, TestUnit.TYPE, TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE, TestData.TYPE) )\n \n # new in v17.1\n if QtHelper.IS_QT5:\n _fileName, _type = fileName\n else:\n _fileName = fileName\n # end of new\n \n if not len(_fileName):\n return\n \n extension = str(_fileName).rsplit(\".\", 1)[1]\n if not ( extension.lower() in [ TestSuite.TYPE, TestPlan.TYPE, TestPlan.TYPE_GLOBAL, TestConfig.TYPE,\n TestData.TYPE, TestUnit.TYPE, TestAbstract.TYPE ] ):\n QMessageBox.critical(self, self.tr(\"Open Failed\") , self.tr(\"File not supported\") )\n return\n \n tmp = str(_fileName).rsplit(\"/\", 1)\n path = tmp[0]\n if len(tmp) > 1:\n _filename = tmp[1].rsplit(\".\", 1)[0]\n else:\n _filename = tmp[0].rsplit(\".\", 1)[0]\n self.newTab( path = path, filename = _filename, \n extension = extension, repoDest=UCI.REPO_UNDEFINED)",
"def login_open_sheet(oauth_key_file, spreadsheet):\r\n try:\r\n scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']\r\n credentials = ServiceAccountCredentials.from_json_keyfile_name(oauth_key_file, scope)\r\n gc = gspread.authorize(credentials)\r\n worksheet = gc.open(spreadsheet).sheet1\r\n return worksheet\r\n\r\n except Exception as ex:\r\n print('Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!')\r\n print('Google sheet login failed with error:', ex)\r\n sys.exit(1)",
"def login_open_sheet(oauth_key_file, spreadsheet):\r\n try:\r\n scope = ['https://spreadsheets.google.com/feeds','https://www.googleapis.com/auth/drive']\r\n credentials = ServiceAccountCredentials.from_json_keyfile_name(oauth_key_file, scope)\r\n gc = gspread.authorize(credentials)\r\n worksheet = gc.open(spreadsheet).sheet1\r\n return worksheet\r\n except Exception as ex:\r\n print('Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!')\r\n print('Google sheet login failed with error:', ex)\r\n print(datetime.datetime.now())\r\n sys.exit(1)",
"def get_xlsx_report(url, sheet_name):\n r = requests.get(url, verify=False)\n data = pyexcel_xlsx.get_data(io.BytesIO(r.content))\n return data[sheet_name]",
"def get_workbook(self):\n return self.workbook",
"def login_open_sheet(oauth_key_file, spreadsheet):\n\ttry:\n\t\tjson_key = json.load(open(oauth_key_file))\n\t\tcredentials = SignedJwtAssertionCredentials(json_key['client_email'],\n\t\t\t\t\t\t\t\t\t\t\t\t\tjson_key['private_key'],\n\t\t\t\t\t\t\t\t\t\t\t\t\t['https://spreadsheets.google.com/feeds'])\n\t\tgc = gspread.authorize(credentials)\n\t\tworksheet = gc.open(spreadsheet).sheet1\n\t\treturn worksheet\n\texcept Exception as ex:\n\t\tprint 'Unable to login and get spreadsheet. Check OAuth credentials, spreadsheet name, and make sure spreadsheet is shared to the client_email address in the OAuth .json file!'\n\t\tprint 'Google sheet login failed with error:', ex\n\t\tsys.exit(1)",
"def __load( self, raw_content ):\n return( pd.read_excel( BytesIO( raw_content ) ) )"
] | [
"0.69280106",
"0.6738904",
"0.6419168",
"0.6310797",
"0.605773",
"0.6051629",
"0.6044591",
"0.6008777",
"0.5982114",
"0.5974647",
"0.59715974",
"0.5966216",
"0.59524924",
"0.59430736",
"0.5873261",
"0.5856575",
"0.57233405",
"0.5704794",
"0.569487",
"0.5689535",
"0.5683357",
"0.56626683",
"0.5650288",
"0.5643976",
"0.56220996",
"0.5621131",
"0.5582671",
"0.557879",
"0.5575977",
"0.5569652"
] | 0.77836186 | 0 |
creates an xml structure with root and motherelements | def createxmlmall():
root = ET.Element("state")
model = ET.SubElement(root, "model")
model.text = r""
dataid = ET.SubElement(root, "dataids")
application = ET.SubElement(root, "application")
application.text = "SIBS Configurator"
safecookie = ET.SubElement(root, "safecookie")
steps = ET.SubElement(root, "steps")
prev = ET.SubElement(steps, "prev")
lastproxy = ET.SubElement(root, "last-proxy").text = "tcserver0"
tree = ET.ElementTree(root) # saves tree in variable "tree"
return tree, safecookie, steps, prev | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def new_xml(self, root_name):\n\n self.tree = ET.ElementTree(ET.fromstring('<?xml version=\"1.0\" encoding=\"UTF-8\"?><%s></%s>'%(\n root_name, root_name)))\n return self.tree.getroot()",
"def create_roots(self):\n self.root = SchemaNode.element(\"nmt:netmod-tree\",\n interleave=False, occur=2)\n self.confdata = SchemaNode.element(\"nmt:top\", self.root,\n interleave=True, occur=2)\n self.rpcs = SchemaNode.element(\"nmt:rpc-methods\", self.root,\n interleave=False, occur=2)\n self.notifications = SchemaNode.element(\"nmt:notifications\", self.root,\n interleave=True, occur=2)",
"def build(self):\n root = ET.Element(\"package\", **self.attr)\n self.build_meta(root)\n self.build_manifest(root)\n self.build_spine(root)\n return root",
"def to_xml(self):\r\n element = ET.Element(\"node\")\r\n\r\n element.attrib['name'] = self.name\r\n element.attrib['description'] = self.description\r\n\r\n return element",
"def CreateKmlDoc():\n\n kml_doc = xml.dom.minidom.Document()\n kml_element = kml_doc.createElementNS('http://www.opengis.net/kml/2.2', 'kml')\n kml_element.setAttribute('xmlns', 'http://www.opengis.net/kml/2.2')\n kml_element = kml_doc.appendChild(kml_element)\n document = kml_doc.createElement('Document')\n kml_element.appendChild(document)\n return kml_doc",
"def _create_nrml():\n return etree.Element(NRML04_ROOT_TAG, nsmap=NSMAP)",
"def createElements(self):\n if self.__builder.checkRootTag(self.__content):\n elements = self.__content.findall(\"*\")\n\n for el in elements:\n self.parseXml(el, {})\n\n return self.__builder.getRoot()\n else:\n print(\"The Element \", self.__content.tag, \" is unkown.\")\n return None",
"def build(self):\n root = ET.Element(\"container\", xmlns=self.namespace,\n version=self.version)\n rfs = ET.SubElement(root, \"rootfiles\")\n attrs = {\"full-path\": self.full_path, \"media-type\": self.media_type, }\n dummy = ET.SubElement(rfs, # pragma pylint: disable=W0612\n \"rootfile\", **attrs)\n # pragma pylint: enable=W0612\n return root",
"def buildxml(self):\n # assume self._objslock is already held here\n logger.info(\"Emane.buildxml()\")\n self.buildplatformxml()\n self.buildnemxml()\n self.buildtransportxml()\n self.buildeventservicexml()",
"def GenerateXML(dictionary, fileName=\"labelling.xml\") : \n root = gfg.Element(\"annotation\") \n #the big section is called Annotation\n for key in dictionary:\n #for every polygon list in inside object witho subelement name and attributes and the type \"polygon\"\n objectElement = gfg.Element(\"object\") \n root.append(objectElement) \n subElement1 = gfg.SubElement(objectElement, \"name:\".strip(\":\"))\n subElement1.text = str(dictionary[key][\"name\"])\n subElement2 = gfg.SubElement(objectElement, \"attributes\".strip(\":\"))\n subElement2.text = str(dictionary[key][\"attributes\"])\n subElement3 = gfg.SubElement(objectElement, \"polygon\")\n \n for i in range(0, len(dictionary[key])-2):\n #for every vertex of the polygon list it's rounded x, y on xml\n SubInsidePolygon = gfg.SubElement(subElement3, \"pt\")\n sub_x = gfg.SubElement(SubInsidePolygon, \"x\")\n sub_y = gfg.SubElement(SubInsidePolygon, \"y\")\n sub_x.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][0])))\n sub_y.text = str(int(round(dictionary[key][\"x_y_\" + str(i)][1])))\n tree = gfg.ElementTree(root) \n #create the xml tree\n with open (fileName, \"wb\") as files : \n tree.write(files) \n #if xml does not exist create one otherwise rewrite to it",
"def build(self):\n root = ET.Element(\"html\", xmlns=self.xmlns)\n self.build_head(root)\n self.build_body(root)\n return root",
"def gen_tree(path):\n # print(\"CALLING.. Tree\")\n parser = etree.XMLParser(remove_blank_text=True)\n tree = etree.parse(path, parser)\n root = tree.getroot() \n return root, tree",
"def build(self):\n root = ET.Element(\"ncx\", xmlns=self.namespace, version=self.version)\n head = ET.SubElement(root, \"head\")\n ET.SubElement(head, \"meta\",\n content=\"urn:uuid:%s\" % self.bookid,\n name=\"dtb:uid\",\n )\n ET.SubElement(head, \"meta\",\n content=\"1\",\n name=\"dtb:depth\",\n )\n ET.SubElement(head, \"meta\",\n content=\"0\",\n name=\"dtb:totalPageCount\",\n )\n ET.SubElement(head, \"meta\",\n content=\"0\",\n name=\"dtb:maxPageNumber\",\n )\n doctitle = ET.SubElement(root, \"docTitle\")\n ET.SubElement(doctitle, \"text\").text = self.title\n navmap = ET.SubElement(root, \"navMap\")\n seq = 1\n for sid, label, src in self.items:\n navpt = ET.SubElement(navmap, \"navPoint\", id=sid,\n playOrder=str(seq))\n navlabel = ET.SubElement(navpt, \"navLabel\")\n ET.SubElement(navlabel, \"text\").text = label\n ET.SubElement(navpt, \"content\", src=src)\n seq += 1\n return root",
"def generate_xml_tree(self):\n try:\n tree = et.parse(self.file)\n self.root = tree.getroot()\n self.blast_output = self.root[8]\n self.iteration = self.blast_output[0]\n self.iteration_hit = self.iteration[4]\n\n for i in self.iteration_hit:\n self.hits.append(i)\n\n for i in self.hits:\n h = []\n for j in i:\n h.append(j)\n\n for hsp in h[5]:\n procent = \"{0:.2f}\".format(int(hsp[10].text) / int(hsp[13].text) * 100)\n procent = float(procent)\n self.aligns.append(Alignment(h[2].text,\n hsp[1].text,\n procent,\n hsp[12].text,\n hsp[10].text,\n hsp[13].text,\n hsp[14].text,\n hsp[15].text,\n hsp[16].text))\n self.main_alignments.append(MainAlignment(i[2].text,\n self.aligns))\n self.aligns = []\n except IndexError:\n \"Bad file.\"",
"def __create_document(self):\n doc = xml.dom.minidom.Document()\n kml = doc.createElement('kml')\n kml.setAttribute('xmlns', 'http://www.opengis.net/kml/2.2')\n doc.appendChild(kml)\n document = doc.createElement('Document')\n kml.appendChild(document)\n docName = doc.createElement('name')\n document.appendChild(docName)\n docName_text = doc.createTextNode(self['name'])\n docName.appendChild(docName_text)\n docDesc = doc.createElement('description')\n document.appendChild(docDesc)\n docDesc_text = doc.createTextNode(self['description'])\n docDesc.appendChild(docDesc_text)\n return doc",
"def new_xmldoc_opml():\n xmldoc = XMLDoc()\n opml = OPML()\n xmldoc.root_element = opml\n\n return (xmldoc, opml)",
"def getXML(self):\n\n def _getElementForMappingEntry(entry, mappingStyle):\n xmlDocTmp = Document()\n element = xmlDocTmp.createElement(mappingStyle)\n for k, v in viewitems(entry):\n # ignore empty, None or compiled regexp items into output\n if not v or (k == \"path-match-expr\"):\n continue\n element.setAttribute(k, str(v))\n return element\n\n xmlDoc = Document()\n root = xmlDoc.createElement(\"storage-mapping\") # root element name\n for mappingStyle, mappings in viewitems(self):\n for mapping in mappings:\n mapElem = _getElementForMappingEntry(mapping, mappingStyle)\n root.appendChild(mapElem)\n return root.toprettyxml()",
"def generate_xml(self, movement):\n\n ET.SubElement(self.root, 'generator').text = __revision__\n ET.SubElement(self.root, 'generated_at').text = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n xmlroot = self.root\n xml_movement = ET.SubElement(xmlroot, 'movement')\n\n ET.SubElement(xml_movement, \"location_from\").text = format_locname(movement.location_from)\n ET.SubElement(xml_movement, \"location_to\").text = format_locname(movement.location_to)\n ET.SubElement(xml_movement, \"movement_id\").text = format_locname(movement.id)\n ET.SubElement(xml_movement, \"created_at\").text \\\n = unicode(movement.created_at.strftime('%Y-%m-%d %H:%M:%S'))\n\n xml_product = ET.SubElement(xml_movement, 'product')\n for fieldname in ['artnr', 'name', 'einheit', 'ean', 'products_per_export_package',\n 'pallet_height']:\n ET.SubElement(xml_product, fieldname).text = u''\n\n xml_unit = ET.SubElement(xml_movement, 'unit')\n ET.SubElement(xml_unit, \"mui\").text = unicode(movement.mui)\n ET.SubElement(xml_unit, 'height').text = unicode(movement.unit_height)\n ET.SubElement(xml_unit, 'quantity').text = unicode(movement.quantity)\n ET.SubElement(xml_unit, 'created_at').text = movement.unit_created_at.strftime('%Y-%m-%d %H:%M:%S')\n return xmlroot",
"def __init__(self, xml_file, root_name, tags=[]):\n self.xml_file = xml_file\n self.tree = ET.ElementTree(ET.Element(root_name))\n self.root = self.tree.getroot()\n\n for tag in tags:\n self.root.set(tag[0], tag[1])",
"def _preprocess(self, shapes):\n # Add root element\n root = Element('root')\n root.append(\n Comment('Generated xml shapes')\n )\n\n # Add elements for each shape\n for shape in shapes:\n root.append(\n Comment('Generated shape: {name}'.format(name=shape.shape_name()))\n )\n\n child = SubElement(root, 'shape', { 'attr-1': 'attribute content' })\n child.set('attr-b', 'other attribute content')\n child.text = 'tag content'\n\n return root",
"def generate_xml(self, locations):\n\n ET.SubElement(self.root, 'generator').text = __revision__\n ET.SubElement(self.root, 'generated_at').text = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n\n xmlroot = self.root\n kernel = Kerneladapter()\n\n for locname in locations:\n xml_location = ET.SubElement(xmlroot, 'location')\n location = kernel.location_info(locname)\n ET.SubElement(xml_location, \"location\").text = unicode(locname)\n ET.SubElement(xml_location, \"height\").text = unicode(location['height'])\n ET.SubElement(xml_location, \"attributes\").text = unicode(location['attributes'])\n ET.SubElement(xml_location, \"floorlevel\").text = unicode(location['floorlevel'])\n ET.SubElement(xml_location, \"preference\").text = unicode(location['preference'])\n ET.SubElement(xml_location, \"info\").text = unicode(location['info'])\n ET.SubElement(xml_location, \"reserved_for\").text = unicode(location['reserved_for'])\n\n for mui in location['allocated_by']:\n unit = kernel.unit_info(mui)\n xml_unit = ET.SubElement(xml_location, \"unit\")\n ET.SubElement(xml_unit, \"mui\").text = unicode(unit['mui'])\n ET.SubElement(xml_unit, \"quantity\").text = unicode(unit['quantity'])\n ET.SubElement(xml_unit, \"artnr\").text = unicode(unit['product'])\n ET.SubElement(xml_unit, \"height\").text = unicode(unit['height'])\n ET.SubElement(xml_unit, \"pick_quantity\").text = unicode(unit['pick_quantity'])\n ET.SubElement(xml_unit, 'created_at').text = unit['created_at'].strftime('%Y-%m-%d %H:%M:%S')\n ET.SubElement(xml_unit, \"movements\").text = unicode(unit['movements'])\n ET.SubElement(xml_unit, \"picks\").text = unicode(unit['picks'])\n ET.SubElement(xml_unit, \"attributes\").text = unicode(unit['attributes'])\n try:\n product = produktpass.models.Product.objects.get(artnr=unit['product'])\n ET.SubElement(xml_unit, \"product_name\").text = unicode(product.name)\n except produktpass.models.Product.DoesNotExist:\n ET.SubElement(xml_unit, \"product_name\").text = '???'\n\n return xmlroot",
"def create_osm_tree():\n osm = etree.Element(\"osm\", {'version': '0.6', 'generator': 'create-legend'})\n osm.append(etree.Element(\"bounds\", {'minlat': '-85', 'maxlat': '85', 'minlon': '-180', 'maxlon': '180'}))\n return etree.ElementTree(osm)",
"def start_serialization(self):\n self.xml = SimplerXMLGenerator(self.stream, self.options.get(\"encoding\", settings.DEFAULT_CHARSET))\n self.xml.startDocument()\n self.xml.startElement(\"xliff\", {\n \"version\": \"1.2\",\n \"xmlns\": \"urn:oasis:names:tc:xliff:document:1.2\",\n \"xmlns:d\": \"https://docs.djangoproject.com/\"\n })",
"def getXML(self):\n nodes = list(self.nodes(data=True))\n nodes.sort()\n node_string = ''\n for n in nodes:\n attribute_string = ''\n keys = list(n[1].keys())\n keys.sort()\n for k in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(k, n[1][k], k)\n modification_string = ''\n modified_by = self.predecessors(n[0])\n if modified_by:\n for mod in modified_by:\n modification_string += \"\"\"<modified_by>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifyingNode> %s </modifyingNode>\\n\"\"\"%mod.getTagID()\n modification_string += \\\n \"\"\"<modifyingCategory> %s </modifyingCategory>\\n\"\"\"%mod.getCategory()\n modification_string += \"\"\"</modified_by>\\n\"\"\"\n modifies = self.successors(n[0])\n if modifies:\n for modified in modifies:\n modification_string += \"\"\"<modifies>\\n\"\"\"\n modification_string += \\\n \"\"\"<modifiedNode> {0} </modifiedNode>\\n\"\"\".format(modified.getTagID())\n modification_string += \\\n \"\"\"</modifies>\\n\"\"\"\n node_string += \\\n NODE_XML_SKEL.format(attribute_string+\"{0}\".format(n[0].getXML()) +\\\n modification_string)\n edges = list(self.edges(data=True))\n edges.sort()\n edge_string = ''\n for edge in edges:\n keys = list(edge[2].keys())\n keys.sort()\n attribute_string = ''\n for key in keys:\n attribute_string += \"\"\"<{0}> {1} </{2}>\\n\"\"\".format(key, edge[2][key], key)\n edge_string += \"{0}\".format(EDGE_XML_SKEL.format(edge[0].getTagID(),\n edge[1].getTagID(),\n attribute_string))\n\n return CONTEXT_MARKUP_XML_SKEL.format(xmlScrub(self.getRawText()),\n xmlScrub(self.getText()),\n node_string,\n edge_string)",
"def create_xml_patient(self, data=None):\n data = data or {}\n now = datetime.datetime.now()\n delta = datetime.timedelta(days=random.randint(1, 10) * -1)\n enrolled = now - delta\n delta = datetime.timedelta(days=random.randint(1, 10))\n next_visit = now + delta\n defaults = {\n 'Subject_Number': self.random_string(10),\n 'Pin_Code': self.random_number_string(4),\n 'Date_Enrolled': enrolled.strftime('%b %d %Y '),\n 'Next_Visit': next_visit.strftime('%b %d %Y '),\n 'Mobile_Number': '12223334444',\n }\n defaults.update(data)\n empty_items = [k for k, v in defaults.iteritems() if not v]\n for item in empty_items:\n del defaults[item]\n root = self._node('Table')\n for key, value in defaults.iteritems():\n root.append(self._node(key, value))\n return root",
"def saveToXml(self) -> org.jdom.Element:\n ...",
"def xml_item(cls, item):\n xml = cls.xml_root_open(item)\n xml += cls.xml_add_links(item)\n xml += cls.xml_dict(item)\n xml += cls.xml_root_close()\n return xml",
"def build_xml(self, **kwargs):\r\n\r\n # Retrieve keyward arguments\r\n question_text = kwargs.get('question_text', '')\r\n explanation_text = kwargs.get('explanation_text', '')\r\n script = kwargs.get('script', None)\r\n num_responses = kwargs.get('num_responses', 1)\r\n num_inputs = kwargs.get('num_inputs', 1)\r\n\r\n # The root is <problem>\r\n root = etree.Element(\"problem\")\r\n\r\n # Add a script if there is one\r\n if script:\r\n script_element = etree.SubElement(root, \"script\")\r\n script_element.set(\"type\", \"loncapa/python\")\r\n script_element.text = str(script)\r\n\r\n # The problem has a child <p> with question text\r\n question = etree.SubElement(root, \"p\")\r\n question.text = question_text\r\n\r\n # Add the response(s)\r\n for i in range(0, int(num_responses)):\r\n response_element = self.create_response_element(**kwargs)\r\n root.append(response_element)\r\n\r\n # Add input elements\r\n for j in range(0, int(num_inputs)):\r\n input_element = self.create_input_element(**kwargs)\r\n if not (None == input_element):\r\n response_element.append(input_element)\r\n\r\n # The problem has an explanation of the solution\r\n if explanation_text:\r\n explanation = etree.SubElement(root, \"solution\")\r\n explanation_div = etree.SubElement(explanation, \"div\")\r\n explanation_div.set(\"class\", \"detailed-solution\")\r\n explanation_div.text = explanation_text\r\n\r\n return etree.tostring(root)",
"def __init__(self, output, encoding='utf-8', short_empty_elements=True):\n document = XMLGenerator(output, encoding) # Python 3.2 : short_empty_elements\n document.startDocument()\n self._document = document\n self._output = output\n self._encoding = encoding\n self._short_empty_elements = short_empty_elements\n self._open_elements = []\n return",
"def get_xml(self):\n\t\t# get the XML description of the VM\n\t\tvm_xml = self.clonezilla_vm_obj.XMLDesc(0)\n\t\troot = ET.fromstring(vm_xml)\n\t\treturn root"
] | [
"0.6970181",
"0.63764966",
"0.6287799",
"0.6225016",
"0.6220165",
"0.6217637",
"0.6206258",
"0.619495",
"0.6160407",
"0.6134908",
"0.61195517",
"0.6091069",
"0.6088098",
"0.60488194",
"0.59819216",
"0.59459764",
"0.59355354",
"0.5920509",
"0.5884173",
"0.5878814",
"0.5870564",
"0.5760805",
"0.57490104",
"0.5732845",
"0.57127476",
"0.57099485",
"0.5706973",
"0.56900024",
"0.56886786",
"0.5688089"
] | 0.75603426 | 0 |
Creates a folder and saves xml tree in a specific path | def save_xml(tree, file_name, folder_name):
import os # ändrar plats för filer
os.chdir(folder_name)
tree.write(file_name) # Namnet på ny fil
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def mkdir(path):",
"def create_folder(path):\n command = ['mkdir', TEST_DIR]\n file_operation(path, command)",
"def create_folder(path):\n if not os.path.exists(path):\n os.makedirs(path)",
"def create_folder(self):\n Path(self.root_name).mkdir(parents=True, exist_ok=True)\n Path(self.root_name + \"/VOC2021/\").mkdir(parents=True, exist_ok=True)\n Path(self.image_folder_path).mkdir(parents=True, exist_ok=True)\n Path(self.annot_path).mkdir(parents=True, exist_ok=True)\n Path(self.root_name + \"/VOC2021/ImageSets/\").mkdir(parents=True, exist_ok=True)\n Path(self.txt_path).mkdir(parents=True, exist_ok=True)",
"def create_folder(self):\n self.config.csv_path.mkdir(parents=True, exist_ok=True)\n self.config.images_path.mkdir(parents=True, exist_ok=True)",
"def create_folder(path):\n if not exists(path):\n os.makedirs(path)",
"def create_folder(path):\n try:\n os.listdir(path)\n except:\n os.makedirs(path)\n else:\n shutil.rmtree(path)\n os.makedirs(path)\n return path",
"def dlt_create_dir(path): \n shutil.rmtree(path,ignore_errors=True)\n os.makedirs(path, exist_ok = True)",
"def createFolder(self):\n raise NotImplementedError",
"def create_folder(path: str):\n if not os.path.exists(path):\n os.makedirs(path)",
"def prepare_folder(path):\n if not os.path.isdir(path):\n os.makedirs(path)",
"def create_path(self, path):\n path_list = path.split(\"/\")\n done_path = self.parent_folder + \"/\"\n\n for directory in path_list:\n try:\n os.mkdir(done_path + directory + \"/\")\n except FileExistsError:\n done_path += directory + \"/\"",
"def createFolder(self):\n self.destination = self.getPath() #Find the destination to create the folder\n try:\n os.makedirs(self.destination) #Try and make a folder\n except FileExistsError:\n pass #Otherwise continue if an error is encountered because the file exists already",
"def create_directories(self, path):\n os.makedirs(path)\n print('Directory created at:', path)\n return path",
"def mkdir(self, path):\n os.mkdir(path)",
"def make_dir(self, path):\n import os\n if not os.path.exists(path):\n os.makedirs(path)",
"def mkdir(self, path):\n try:\n postdata = codecs.encode(json.dumps({ 'dir': path }), 'utf-8')\n self._urlopen('/api/fileops/mkdir', postdata).read()\n except HTTPError as err:\n raise RuntimeError(\"Unable to create '{}'\".format(path))",
"def createFolder(folder):\n folder_ = os.path.join(os.getcwd(),folder)\n if not(os.path.isdir(folder_)):\n os.mkdir(folder_)",
"def create_folder(self, c_path):\n raise NotImplementedError",
"def create_dir(path):\n if not os.path.exists(path):\n os.makedirs(path)",
"def mkdir(path):\n\n # Simple use of make dir function\n os.mkdir(abspath(path))",
"def create_folder(self, unformatted_path):\n os.makedirs(self.format_path(unformatted_path), exist_ok=True)",
"def create_new_dir(path):\n logger.debug('Function Successful: % s',\n 'create_new_dir: create_new_dir successfully called from save_single_file_locally', extra=d)\n\n if not os.path.exists(path):\n logger.debug('Calling Function: % s',\n 'create_new_dir: create_new_dir calling makedirs', extra=d)\n os.makedirs(path)\n logger.debug('Function Successful: % s',\n 'create_new_dir: create_new_dir successfully called makedirs', extra=d)",
"def createFolder(self):\n\n self.directory = \"D:\\\\CompositionHelper\"\n if not os.path.exists(self.directory):\n os.makedirs(self.directory)\n print ('Created new folder')",
"def create_dir(dir_path):\n if not os.path.exists(dir_path):\n os.makedirs(dir_path)",
"def createDir(self, dir_name):\n os.mkdir(os.path.join(self.user[\"Save\"], dir_name))",
"def createdatafolder(name):\n folder = os.path.join(pathtofolder(),name)\n os.makedirs(folder)\n pass",
"def createFolder(self, path):\n yield \"\\n\" # to avoid timeout\n log(\"creating folder\")\n if \"flix\" in path:\n yield \"%s\" % self.fileService.createFolder(path)\n return\n yield 0\n return",
"def mkdir_p(cls, path):\n os.makedirs(path)",
"def MakeDir(self, path: str) -> None:\n ..."
] | [
"0.7024828",
"0.700881",
"0.6860256",
"0.68011755",
"0.6756072",
"0.6738238",
"0.66949385",
"0.66890776",
"0.6670779",
"0.6649409",
"0.6647468",
"0.66387165",
"0.66373086",
"0.66274315",
"0.66152924",
"0.66047573",
"0.6588086",
"0.6582058",
"0.656134",
"0.65589786",
"0.6516234",
"0.65015745",
"0.6493719",
"0.6480533",
"0.64678985",
"0.64626414",
"0.6458174",
"0.6457365",
"0.6452825",
"0.6414028"
] | 0.7450946 | 0 |
Calculate the approximation of a contour shape to another shape with less number of vertices depending upon the precision we specify. | def __CalculateApproximation(self, contour):
epsilon = 0.1 * cv2.arcLength(contour, True)
return cv2.approxPolyDP(contour, epsilon, True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def contourApprox(cnt, epsilon = 0.005):\n\tepsilon = epsilon*cv2.arcLength(cnt, True)\n\tapprox = cv2.approxPolyDP(cnt, epsilon, True)\n\treturn approx",
"def approx_poly(self, mask):\n\n mask_expand = mask.copy()\n contours, _ = cv2.findContours(mask_expand, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n approx_curve = []\n if self.max_area_only:\n contour_areas = [cv2.contourArea(contour) for contour in contours]\n if len(contour_areas) == 0:\n return []\n max_index = np.argmax(np.array(contour_areas))\n max_contour = contours[max_index]\n if self.use_rotated_box:\n # In minimum rotated rectangle\n min_rect = cv2.minAreaRect(max_contour)\n poly = cv2.boxPoints(min_rect)\n poly = np.int0(poly)\n else:\n # In polygon contours\n perimeter = cv2.arcLength(max_contour, True) * 0.01\n poly = cv2.approxPolyDP(max_contour, perimeter, True)\n approx_curve.append(poly)\n else:\n for contour in contours:\n perimeter = cv2.arcLength(contour, True) * 0.01\n poly = cv2.approxPolyDP(contour, perimeter, True)\n approx_curve.append(poly)\n return approx_curve",
"def fun_contours(self, params):\n shape_coeffs = params[:self.num_shape_params]\n blendshape_end = self.num_shape_params + self.numObservations * self.num_blendshape_params\n blendshape_coeffs = params[self.num_shape_params:blendshape_end].reshape((self.numObservations, self.num_blendshape_params))\n trans_mats = params[blendshape_end:].reshape((self.numObservations, 7))\n\n vertices3d = self.vertices3d\n vertices3d_from_mesh = np.zeros_like(vertices3d)\n vertices3d_inner, vertices3d_right, vertices3d_left = self.transform_meshes(shape_coeffs, blendshape_coeffs, trans_mats)\n\n inner_idx = 0\n for idx in range(vertices3d.shape[0]):\n lm_idx = idx % 66\n obs_num = int(np.floor(idx/66))\n\n if lm_idx in self.contour_lms_list[0]:\n vertices3d_from_mesh[idx] = self.find_closest_vertex3D(vertices3d[idx],\n vertices3d_right[obs_num])\n elif lm_idx in self.contour_lms_list[1]:\n vertices3d_from_mesh[idx] = self.find_closest_vertex3D(vertices3d[idx],\n vertices3d_left[obs_num])\n else:\n vertices3d_from_mesh[idx] = vertices3d_inner[obs_num][inner_idx]\n inner_idx += 1\n if inner_idx == 50:\n inner_idx = 0\n\n return (vertices3d_from_mesh - vertices3d).ravel()",
"def find_contour(ctx: Context):\n cv2.copyTo(ctx.filter_image, np.ones_like(ctx.temp_image1), ctx.temp_image1)\n contours, _ = cv2.findContours(ctx.temp_image1, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n\n # take the 5 biggest areas\n contours = sorted(contours, key=lambda c: math.fabs(cv2.contourArea(c)), reverse=True)[:5]\n\n # approximate contours with poly line\n ctx.contours = [cv2.approxPolyDP(c, 2, True) for c in contours]",
"def computeNormalAndCurvature():\n radius = 50\n for i,j in pts:\n nb_pts = ti.cast(0, ti.f32)\n accu_0 = ti.cast(0, ti.f32)\n accu_1 = ti.cast(0, ti.f32)\n accu_2 = ti.cast(0, ti.f32)\n accu_3 = ti.cast(0, ti.f32)\n accu_4 = ti.cast(0, ti.f32)\n accu_5 = ti.cast(0, ti.f32)\n accu_6 = ti.cast(0, ti.f32)\n accu_7 = ti.cast(0, ti.f32)\n accu_8 = ti.cast(0, ti.f32)\n z = 0\n for x in range(i-radius, i+radius):\n for y in range(j-radius, j+radius):\n if ti.is_active(block1, [x,y]):\n accu_0 += x * x\n accu_1 += x * y\n accu_2 += x * z\n accu_3 += y * y\n accu_4 += y * z\n accu_5 += z * z\n accu_6 += x\n accu_7 += y\n accu_8 += z\n nb_pts += 1\n accu_0 /= nb_pts\n accu_1 /= nb_pts\n accu_2 /= nb_pts\n accu_3 /= nb_pts\n accu_4 /= nb_pts\n accu_5 /= nb_pts\n accu_6 /= nb_pts\n accu_7 /= nb_pts\n accu_8 /= nb_pts\n cov_mat_0 = accu_0 - accu_6 * accu_6\n cov_mat_1 = accu_1 - accu_6 * accu_7\n cov_mat_2 = accu_2 - accu_6 * accu_8\n cov_mat_4 = accu_3 - accu_7 * accu_7\n cov_mat_5 = accu_4 - accu_7 * accu_8\n cov_mat_8 = accu_5 - accu_8 * accu_8\n cov_mat_3 = cov_mat_1\n cov_mat_6 = cov_mat_2\n cov_mat_7 = cov_mat_5\n\n # Compute eigen value and eigen vector\n # Make sure in [-1, 1]\n scale = ti.max(1.0, ti.abs(cov_mat_0))\n scale = ti.max(scale, ti.abs(cov_mat_1))\n scale = ti.max(scale, ti.abs(cov_mat_2))\n scale = ti.max(scale, ti.abs(cov_mat_3))\n scale = ti.max(scale, ti.abs(cov_mat_4))\n scale = ti.max(scale, ti.abs(cov_mat_5))\n scale = ti.max(scale, ti.abs(cov_mat_6))\n scale = ti.max(scale, ti.abs(cov_mat_7))\n scale = ti.max(scale, ti.abs(cov_mat_8))\n if scale > 1.0:\n cov_mat_0 /= scale\n cov_mat_1 /= scale\n cov_mat_2 /= scale\n cov_mat_3 /= scale\n cov_mat_4 /= scale\n cov_mat_5 /= scale\n cov_mat_6 /= scale\n cov_mat_7 /= scale\n cov_mat_8 /= scale\n \n # Compute roots\n eigen_val_0 = ti.cast(0, ti.f32)\n eigen_val_1 = ti.cast(0, ti.f32)\n eigen_val_2 = ti.cast(0, ti.f32)\n \n c0 = cov_mat_0 * cov_mat_4 * cov_mat_8 \\\n + 2 * cov_mat_3 * cov_mat_6 * cov_mat_7 \\\n - cov_mat_0 * cov_mat_7 * cov_mat_7 \\\n - cov_mat_4 * cov_mat_6 * cov_mat_6 \\\n - cov_mat_8 * cov_mat_3 * cov_mat_3\n c1 = cov_mat_0 * cov_mat_4 \\\n - cov_mat_3 * cov_mat_3 \\\n + cov_mat_0 * cov_mat_8 \\\n - cov_mat_6 * cov_mat_6 \\\n + cov_mat_4 * cov_mat_8 \\\n - cov_mat_7 * cov_mat_7\n c2 = cov_mat_0 + cov_mat_4 + cov_mat_8\n \n if ti.abs(c0) < 0.00001:\n eigen_val_0 = 0\n d = c2 * c2 - 4.0 * c1\n if d < 0.0: # no real roots ! THIS SHOULD NOT HAPPEN!\n d = 0.0\n sd = ti.sqrt(d)\n eigen_val_2 = 0.5 * (c2 + sd)\n eigen_val_1 = 0.5 * (c2 - sd)\n else:\n s_inv3 = ti.cast(1.0 / 3.0, ti.f32)\n s_sqrt3 = ti.sqrt(3.0)\n c2_over_3 = c2 * s_inv3\n a_over_3 = (c1 - c2 * c2_over_3) * s_inv3\n if a_over_3 > 0:\n a_over_3 = 0\n \n half_b = 0.5 * (c0 + c2_over_3 * (2 * c2_over_3 * c2_over_3 - c1))\n q = half_b * half_b + a_over_3 * a_over_3 * a_over_3\n if q > 0:\n q = 0\n \n rho = ti.sqrt(-a_over_3)\n theta = ti.atan2(ti.sqrt(-q), half_b) * s_inv3\n cos_theta = ti.cos(theta)\n sin_theta = ti.sin(theta)\n eigen_val_0 = c2_over_3 + 2 * rho * cos_theta\n eigen_val_1 = c2_over_3 - rho * (cos_theta + s_sqrt3 * sin_theta)\n eigen_val_2 = c2_over_3 - rho * (cos_theta - s_sqrt3 * sin_theta)\n temp_swap = ti.cast(0, ti.f32)\n \n # Sort in increasing order.\n if eigen_val_0 >= eigen_val_1:\n temp_swap = eigen_val_1\n eigen_val_1 = eigen_val_0\n eigen_val_0 = temp_swap\n if eigen_val_1 >= eigen_val_2:\n temp_swap = eigen_val_2\n eigen_val_2 = eigen_val_1\n eigen_val_1 = temp_swap\n if eigen_val_0 >= eigen_val_1:\n temp_swap = eigen_val_1\n eigen_val_1 = eigen_val_0\n eigen_val_0 = temp_swap\n \n if eigen_val_0 <= 0:\n eigen_val_0 = 0\n d = c2 * c2 - 4.0 * c1\n if d < 0.0: # no real roots ! THIS SHOULD NOT HAPPEN!\n d = 0.0\n sd = ti.sqrt(d)\n eigen_val_2 = 0.5 * (c2 + sd)\n eigen_val_1 = 0.5 * (c2 - sd)\n # end of compute roots\n\n eigen_value = eigen_val_1 * scale # eigen value for 2D SDF\n # eigen value for 3D SDF\n #eigen_value = eigen_val_0 * scale\n\n #print(\"eigen_val_0 \", eigen_val_0)\n #print(\"eigen_val_1 \", eigen_val_1)\n #print(\"eigen_val_2 \", eigen_val_2)\n \n # TODO\n #scaledMat.diagonal ().array () -= eigenvalues (0)\n #eigenvector = detail::getLargest3x3Eigenvector<Vector> (scaledMat).vector;\n\n # Compute normal vector (TODO)\n #visual_norm[i,j][0] = eigen_val_0 #eigen_vector[0]\n #visual_norm[i,j][1] = eigen_val_1 #eigen_vector[1]\n #visual_norm[i,j][2] = eigen_val_2 #eigen_vector[2]\n\n # Compute the curvature surface change\n eig_sum = cov_mat_0 + cov_mat_1 + cov_mat_2\n visual_curv[i,j][0] = 0\n if eig_sum != 0:\n visual_curv[i,j][0] = eigen_val_1 # true curvature is: ti.abs(eigen_value / eig_sum)",
"def fracture(self, max_points=_max_points, precision=1e-3):\n if max_points > 4:\n ii = 0\n while ii < len(self.polygons):\n if len(self.polygons[ii]) > max_points:\n pts0 = sorted(self.polygons[ii][:, 0])\n pts1 = sorted(self.polygons[ii][:, 1])\n ncuts = len(pts0) // max_points\n if pts0[-1] - pts0[0] > pts1[-1] - pts1[0]:\n # Vertical cuts\n cuts = [\n pts0[int(i * len(pts0) / (ncuts + 1.0) + 0.5)]\n for i in range(1, ncuts + 1)\n ]\n chopped = clipper._chop(self.polygons[ii], cuts, 0,\n 1 / precision)\n else:\n # Horizontal cuts\n cuts = [\n pts1[int(i * len(pts1) / (ncuts + 1.0) + 0.5)]\n for i in range(1, ncuts + 1)\n ]\n chopped = clipper._chop(self.polygons[ii], cuts, 1,\n 1 / precision)\n self.polygons.pop(ii)\n layer = self.layers.pop(ii)\n datatype = self.datatypes.pop(ii)\n self.polygons.extend(\n numpy.array(x)\n for x in itertools.chain.from_iterable(chopped))\n npols = sum(len(c) for c in chopped)\n self.layers.extend(layer for _ in range(npols))\n self.datatypes.extend(datatype for _ in range(npols))\n else:\n ii += 1\n return self",
"def _bounding_precision(self) :\n if not self.precision().is_infinite() :\n return self.precision()\n \n return self.parent().monoid().minimal_composition_filter( self.coefficients().keys(),\n [self.parent().monoid().zero_element()] )",
"def getContours(image, copyImage):\n contours, heirarchy = cv.findContours(image, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_NONE)\n for contour in contours:\n area = cv.contourArea(contour)\n \n if area > 500.0:\n cv.drawContours(copyImage, contour, -1, (255,0,0),3)\n perimeter = cv.arcLength(contour, True)\n \n # Approximates to the nearest polygon\n approx = cv.approxPolyDP(contour,0.02*perimeter, True)\n objectCoordinates = len(approx)\n\n # Returns the x, y and height width of the polygon\n x, y, w, h = cv.boundingRect(approx)\n\n if objectCoordinates == 3:\n objectShape = \"Triangle\"\n elif objectCoordinates == 4:\n ratio = w / float(h)\n if ratio >= 0.95 and ratio <= 1.05:\n objectShape = \"Square\"\n else: objectShape = \"Rectangle\"\n else: objectShape = \"Circle\" \n\n \n\n # Draw rectangles around the images\n cv.rectangle(copyImage, (x,y), (x+w, y+h), (0,255,0), 2)\n cv.putText(copyImage, objectShape, (x + (w//2), y + (h//2)),cv.FONT_HERSHEY_COMPLEX, 0.5, (0,0,0))",
"def projection(poly1, dim, solver=None, abs_tol=ABS_TOL, verbose=0):\n if isinstance(poly1, Region):\n ret = Polytope()\n for i in range(len(poly1.list_poly)):\n p = projection(\n poly1.list_poly[i], dim,\n solver=solver, abs_tol=abs_tol)\n ret = ret + p\n return ret\n # flat ?\n if (poly1.dim < len(dim)) or is_empty(poly1):\n return poly1\n # `poly1` isn't flat\n poly_dim = poly1.dim\n dim = np.array(dim)\n org_dim = range(poly_dim)\n new_dim = dim.flatten() - 1\n del_dim = np.setdiff1d(org_dim, new_dim) # Index of dimensions to remove\n # logging\n logger.debug('polytope dim = ' + str(poly_dim))\n logger.debug('project on dims = ' + str(new_dim))\n logger.debug('original dims = ' + str(org_dim))\n logger.debug('dims to delete = ' + str(del_dim))\n mA, nA = poly1.A.shape\n # fewer rows than dimensions ?\n if mA < poly_dim:\n msg = 'fewer rows in A: ' + str(mA)\n msg += ', than polytope dimension: ' + str(poly_dim)\n logger.warning(msg)\n # enlarge A, b with zeros\n A = poly1.A.copy()\n poly1.A = np.zeros((poly_dim, poly_dim))\n poly1.A[0:mA, 0:nA] = A\n # stack\n poly1.b = np.hstack([poly1.b, np.zeros(poly_dim - mA)])\n logger.debug('m, n = ' + str((mA, nA)))\n # Compute cheby ball in lower dim to see if projection exists\n norm = np.sum(poly1.A * poly1.A, axis=1).flatten()\n norm[del_dim] = 0\n c = np.zeros(len(org_dim) + 1, dtype=float)\n c[len(org_dim)] = -1\n G = np.hstack([poly1.A, norm.reshape(norm.size, 1)])\n h = poly1.b\n sol = lpsolve(c, G, h)\n if sol['status'] != 0:\n # Projection not fulldim\n return Polytope()\n if sol['x'][-1] < abs_tol:\n return Polytope()\n # select projection solver\n if solver == \"esp\":\n return projection_esp(poly1, new_dim, del_dim)\n elif solver == \"exthull\":\n return projection_exthull(poly1, new_dim)\n elif solver == \"fm\":\n return projection_fm(poly1, new_dim, del_dim)\n elif solver == \"iterhull\":\n return projection_iterhull(poly1, new_dim)\n elif solver is not None:\n logger.warning('unrecognized projection solver \"' +\n str(solver) + '\".')\n # `solver` undefined or unknown\n # select method based on dimension criteria\n if len(del_dim) <= 2:\n logger.debug(\"projection: using Fourier-Motzkin.\")\n return projection_fm(poly1, new_dim, del_dim)\n elif len(org_dim) <= 4:\n logger.debug(\"projection: using exthull.\")\n return projection_exthull(poly1, new_dim)\n else:\n logger.debug(\"projection: using iterative hull.\")\n return projection_iterhull(poly1, new_dim)",
"def getContourRep(self):\n\t\tvertex1 = [[self.startX, self.startY]]\n\t\tvertex2 = [[self.startX, self.endY]]\n\t\tvertex3 = [[self.endX, self.startY]]\n\t\tvertex4 = [[self.endX, self.endY]]\n\t\tvertices = [vertex1, vertex2, vertex3, vertex4]\n\t\treturn convexHull(np.asarray(vertices, dtype = np.int32))",
"def compute_coverage_for_contour_pair(\n contour1: np.ndarray,\n contour2: np.ndarray,\n max_size: int = DEFAULT_MAX_CONTOUR_MASK_SIZE,\n):\n im1, im2 = compute_contour_binary_masks(contour1, contour2, max_size=max_size)\n return (im1 & im2).sum() / im1.sum()",
"def shape_contour(contour):\n width = max(contour[1][0]-contour[0][0], contour[3][0]-contour[2][0])\n height = max(contour[3][1]-contour[0][1],contour[2][1]-contour[1][1])\n return height,width",
"def setup_contour_input():\n from bfieldtools.utils import load_example_mesh\n\n mesh = load_example_mesh(\"unit_disc\")\n\n r = np.linalg.norm(mesh.vertices, axis=1)\n scalars = (1 - r) ** 2\n scalars *= mesh.vertices[:, 0]\n\n return mesh, scalars",
"def __CalculateExtend(self, contour):\r\n area = self.__CalculateArea(contour)\r\n boundingBox = self.__CalculateBoundingBox(contour)\r\n return area / (boundingBox[2] * boundingBox[3])",
"def func_curvature(self):\n return u.Curvature.CONVEX",
"def polyclip(i, j, pol_x, pol_y, area=False):\n n = len(pol_x)\n nout = n + 4\n px_out, py_out = [0] * nout, [0] * nout\n clip_vals = [i, i + 1, j + 1, j]\n\n for ctype in range(4):\n cv = clip_vals[ctype]\n if ctype == 0:\n inside = [px > i for px in pol_x]\n elif ctype == 1:\n inside = [(px < i + 1) for px in pol_x]\n elif ctype == 2:\n inside = [(py < j + 1) for py in pol_y]\n else:\n inside = [py > j for py in pol_y]\n if all(inside):\n continue\n\n shiftp1 = inside.copy()\n shiftp1.insert(0, shiftp1.pop(-1))\n crosses = [i1 != i2 for (i1, i2) in zip(inside, shiftp1)]\n pind = 0\n for k in range(n):\n px, py = pol_x[k], pol_y[k]\n if crosses[k]: # out->in or in->out, add intersection\n ind = n - 1 if k == 0 else k - 1\n sx, sy = pol_x[ind], pol_y[ind]\n try:\n if ctype <= 1: # left or right\n px_out[pind] = cv\n py_out[pind] = sy + ((py - sy) / (px - sx)) * (cv - sx)\n else: # top or bottom\n px_out[pind] = sx + ((px - sx) / (py - sy)) * (cv - sy)\n py_out[pind] = cv\n except ZeroDivisionError: # pragma: no cover\n px_out[pind] = np.nan\n py_out[pind] = np.nan\n pind += 1\n\n if inside[k]: # out->in or in->in, add 2nd point\n px_out[pind] = px\n py_out[pind] = py\n pind += 1\n\n if pind >= nout - 2:\n nout *= 2\n px_out = px_out + [0] * nout\n py_out = py_out + [0] * nout\n nout *= 2\n\n if pind == 0: # polygon is entirely outside this line\n return None, None\n n = pind\n pol_x = px_out[:n].copy()\n pol_y = py_out[:n].copy()\n\n if area:\n if pol_x is None: # pragma: no cover\n return 0.0\n shiftx = pol_x.copy()\n shifty = pol_y.copy()\n shiftx.append(shiftx.pop(0))\n shifty.append(shifty.pop(0))\n a1 = [p[0] * p[1] for p in zip(pol_x, shifty)]\n a2 = [p[0] * p[1] for p in zip(pol_y, shiftx)]\n a = [p[0] - p[1] for p in zip(a1, a2)]\n return abs(sum(a)) / 2\n\n return pol_x, pol_y",
"def curvature(contour,fn = 3, bn = 3):\n\n clen = contour.shape[0]\n E = np.zeros((clen,), np.float32)\n thetai = np.zeros((clen,), np.float32)\n\n for k in range(1,clen):\n \n # first and last few points\n if k < bn:\n bnd = 0\n fnd = k + fn\n elif k + fn > clen-1:\n bnd = k - bn\n fnd = clen-1\n else:\n bnd = k - bn\n fnd = k + fn\n\n # calculate curvature\n lb = math.sqrt( (contour[k,0]-contour[bnd,0])**2 + (contour[k,1]-contour[bnd,1])**2 )\n lf = math.sqrt( (contour[k,0]-contour[fnd,0])**2 + (contour[k,1]-contour[fnd,1])**2 )\n\n if contour[k,1]-contour[bnd,1]!=0:\n thetab=math.atan( np.double(abs(contour[k,0]-contour[bnd,0])) / np.double(abs(contour[k,1]-contour[bnd,1])) )\n else:\n thetab=math.atan( np.double(abs(contour[k,0]-contour[bnd,0])) / np.double(abs(contour[k,1]-contour[bnd,1])) )\n thetab = math.pi/2 - thetab\n\n if contour[k,1]-contour[fnd,1]!=0:\n thetaf=math.atan( np.double(abs(contour[k,0]-contour[fnd,0])) / np.double(abs(contour[k,1]-contour[fnd,1])) )\n else:\n thetaf=math.atan( np.double(abs(contour[k,0]-contour[fnd,0])) / np.double(abs(contour[k,1]-contour[fnd,1])) )\n thetaf = math.pi/2 - thetaf\n\n thetai[k]=(thetab+thetaf)/2\n detlaf=abs(thetaf-thetai[k])\n detlab=abs(thetai[k]-thetab)\n E[k]=detlaf/lf/2+detlab/lb/2\n\n E[0]=E[1]\n E[clen - 1]=E[clen - 2]\n thetai[0]=thetai[1]\n thetai[clen - 1]=thetai[clen - 2]\n\n return (E,thetai)",
"def _mn_contour_ ( self , npoint , par1 , par2 , nsigma = 1 ) :\n if npoint < 4 : raise ValueError ( 'contour: npoint (%s) must be >= 4' % npoint )\n if not par1 in self : raise ValueError ( 'contour: par1(%s) is not in Minuit' % par1 )\n if not par2 in self : raise ValueError ( 'contour: par2(%s) is not in Minuit' % par2 )\n if par1 == par2 : raise ValueError ( 'contour: par1 == par2(%s) ' % par2 )\n #\n ## save old error defintion\n #\n old_err_def = self.GetErrorDef()\n #\n ## set new error definition\n #\n self.SetErrorDef ( nsigma * nsigma )\n \n graph = self.Contour ( npoint , par1 , par2 )\n\n #\n ## restore old error defininion\n #\n status = self.GetStatus()\n self.SetErrorDef ( old_err_def ) \n #\n if graph and 0 == status : return graph\n logger.error ( 'TMinuit::Contour: status %i' % status ) \n return graph",
"def simplify(self, tolerance, preserve_topology=...): # -> BaseGeometry:\n ...",
"def __CalculateCircle(self, contour):\r\n return cv2.minEnclosingCircle(contour)",
"def find_optimal_components_subset(contours, edges):\n c_info = props_for_contours(contours, edges)\n c_info.sort(key=lambda x: -x['sum'])\n total = np.sum(edges) / 255\n area = edges.shape[0] * edges.shape[1]\n\n c = c_info[0]\n del c_info[0]\n this_crop = c['x1'], c['y1'], c['x2'], c['y2']\n crop = this_crop\n covered_sum = c['sum']\n\n while covered_sum < total:\n changed = False\n recall = 1.0 * covered_sum / total\n prec = 1 - 1.0 * crop_area(crop) / area\n f1 = 2 * (prec * recall / (prec + recall))\n # print '----'\n for i, c in enumerate(c_info):\n this_crop = c['x1'], c['y1'], c['x2'], c['y2']\n new_crop = union_crops(crop, this_crop)\n new_sum = covered_sum + c['sum']\n new_recall = 1.0 * new_sum / total\n new_prec = 1 - 1.0 * crop_area(new_crop) / area\n new_f1 = 2 * new_prec * new_recall / (new_prec + new_recall)\n\n # Add this crop if it improves f1 score,\n # _or_ it adds 25% of the remaining pixels for <15% crop expansion.\n # ^^^ very ad-hoc! make this smoother\n remaining_frac = c['sum'] / (total - covered_sum)\n new_area_frac = 1.0 * crop_area(new_crop) / crop_area(crop) - 1\n if new_f1 > f1 or (\n remaining_frac > 0.25 and new_area_frac < 0.15):\n print('%d %s -> %s / %s (%s), %s -> %s / %s (%s), %s -> %s' % (\n i, covered_sum, new_sum, total, remaining_frac,\n crop_area(crop), crop_area(new_crop), area, new_area_frac,\n f1, new_f1))\n crop = new_crop\n covered_sum = new_sum\n del c_info[i]\n changed = True\n break\n\n if not changed:\n break\n\n return crop",
"def projection_fm(poly1, new_dim, del_dim, abs_tol=ABS_TOL):\n # Remove last dim first to handle indices\n del_dim = -np.sort(-del_dim)\n if not poly1.minrep:\n poly1 = reduce(poly1)\n poly = poly1.copy()\n for i in del_dim:\n positive = np.nonzero(poly.A[:, i] > abs_tol)[0]\n negative = np.nonzero(poly.A[:, i] < -abs_tol)[0]\n null = np.nonzero(np.abs(poly.A[:, i]) < abs_tol)[0]\n nr = len(null) + len(positive) * len(negative)\n nc = np.shape(poly.A)[0]\n C = np.zeros([nr, nc])\n A = poly.A[:, i].copy()\n row = 0\n for j in positive:\n for k in negative:\n C[row, j] = -A[k]\n C[row, k] = A[j]\n row += 1\n for j in null:\n C[row, j] = 1\n row += 1\n keep_dim = np.setdiff1d(\n range(poly.A.shape[1]),\n np.array([i]))\n poly = Polytope(\n np.dot(C, poly.A)[:, keep_dim],\n np.dot(C, poly.b))\n if not is_fulldim(poly):\n return Polytope()\n poly = reduce(poly)\n return poly",
"def poly_enclose(points, color, inc=1.2, rad=0.3, lw=2):\n points = np.log(points)\n hull = ConvexHull(points)\n\n cent = np.mean(points, 0)\n pts = []\n for pt in points[hull.simplices]:\n pts.append(pt[0].tolist())\n pts.append(pt[1].tolist())\n \n pts.sort(key=lambda p: np.arctan2(p[1] - cent[1],\n p[0] - cent[0]))\n pts = pts[0::2] # Deleting duplicates\n pts.insert(len(pts), pts[0])\n \n \n verts = inc*(np.array(pts)- cent) + cent\n verts2 = np.zeros((3*verts.shape[0]-2,2))\n verts2[0::3] = verts\n verts2[1::3,:] = (1-rad)*verts[0:-1,:] + rad*verts[1:,:]\n verts2[2::3,:] = rad*verts[0:-1,:] + (1-rad)*verts[1:,:]\n verts2[0:-1] = verts2[1:]\n verts2[-1] = verts2[0]\n\n\n \n codes = [Path.MOVETO, Path.LINETO, Path.CURVE3,]\n for j in range(len(pts)-2):\n codes.extend([Path.CURVE3, Path.LINETO, Path.CURVE3,])\n codes.append(Path.CURVE3)\n \n \n path = Path(verts2, codes)\n patch = patches.PathPatch(path, facecolor=color, lw=0, alpha=0.2)\n edge = patches.PathPatch(path, edgecolor=color, facecolor='none', lw=lw)\n patch._path._vertices = np.exp(patch._path._vertices)\n return patch, edge",
"def bound_shapes(contours):\r\n\r\n contours_poly = [None]*len(contours)\r\n boundRect = [None]*len(contours)\r\n centers = [None]*len(contours)\r\n radius = [None]*len(contours)\r\n for i, c in enumerate(contours):\r\n contours_poly[i] = cv2.approxPolyDP(c, 3, True)\r\n boundRect[i] = cv2.boundingRect(contours_poly[i])\r\n centers[i], radius[i] = cv2.minEnclosingCircle(contours_poly[i])\r\n \r\n return (contours_poly, boundRect, centers, radius)",
"def _estimateCubicCurveLength(pt0, pt1, pt2, pt3, precision=10):\n points = []\n length = 0\n step = 1.0 / precision\n factors = range(0, precision + 1)\n for i in factors:\n points.append(_getCubicPoint(i * step, pt0, pt1, pt2, pt3))\n for i in range(len(points) - 1):\n pta = points[i]\n ptb = points[i + 1]\n length += _distance(pta, ptb)\n return length",
"def compute(self, *args, **kwargs):\n vertices = args[0]\n xpts = vertices[2] # z plays the 'x' part\n ypts = vertices[0] # x plays the 'y' part\n #zpts = vertices[1]\n #********************************************\n # switcharoo: using z in place of x\n # using x in place of y\n # i.e.\n #\n # y <- x\n # x <- z\n #\n qxdot = np.dot(xpts,self.localBasis[1])\n qxddot = np.dot(xpts,self.localBasis[2])\n qydot = np.dot(ypts,self.localBasis[1])\n qyddot = np.dot(ypts,self.localBasis[2])\n store = (qxdot*qyddot - qydot*qxddot)\n temp = np.sqrt(qxdot**2 + qydot**2)\n if isinstance(temp, ia):\n if temp.inf<=0:\n temp.inf = 0.\n denom = temp*((temp)**2)#**.5## #problem foud with sqrt\n #\n curvature = store/denom#((np.sqrt(qxdot*qxdot + qydot*qydot))**3.)\n return curvature",
"def show_conts(cont, shape, tolerance):\n cont_image = np.zeros(shape)\n approx_image = np.zeros(shape)\n rr, cc = polygon_perimeter(cont[:, 0], cont[:, 1])\n cont_image[rr, cc] = 1\n poly_approx = approximate_polygon(cont, tolerance=tolerance)\n rra, cca = polygon_perimeter(poly_approx[:, 0], poly_approx[:, 1])\n approx_image[rra, cca] = 1\n plt.imshow(cont_image)\n plt.show()\n plt.imshow(approx_image)\n plt.show()",
"def get_corners_from_contours(contours, corner_amount=4):\n\tcoefficient = .05\n\tepsilon = coefficient * cv2.arcLength(contours, True)\n\n\twhile True:\n\t\t# print(contours)\n\t\tprint(\"epsilon:\", epsilon)\n\n\t\tpoly_approx = cv2.approxPolyDP(contours, epsilon, True)\n\t\t\n\t\t#Выпуклая оболочка, описывающая точки poly_approx\n\t\thull = cv2.convexHull(poly_approx)\n\t\tif len(hull) == corner_amount:\n\t\t\treturn hull\n\t\telse:\n\t\t\tif len(hull) > corner_amount:\n\t\t\t\tcoefficient += .01\n\t\t\telse:\n\t\t\t\tcoefficient -= .01\n\t\tepsilon = coefficient * cv2.arcLength(contours, True)\n\t\tif epsilon < 0: return hull",
"def restrict(self):\n\n cg = self.grid.coarse_like(2)\n\n c_edge_coeffs = EdgeCoeffs(cg, None, empty=True)\n\n c_eta_x = cg.scratch_array()\n c_eta_y = cg.scratch_array()\n\n fg = self.grid\n\n c_eta_x[cg.ilo:cg.ihi+2,cg.jlo:cg.jhi+1] = \\\n 0.5*(self.x[fg.ilo:fg.ihi+2:2,fg.jlo :fg.jhi+1:2] +\n self.x[fg.ilo:fg.ihi+2:2,fg.jlo+1:fg.jhi+1:2])\n\n # redo the normalization\n c_edge_coeffs.x = c_eta_x*fg.dx**2/cg.dx**2\n\n c_eta_y[cg.ilo:cg.ihi+1,cg.jlo:cg.jhi+2] = \\\n 0.5*(self.y[fg.ilo :fg.ihi+1:2,fg.jlo:fg.jhi+2:2] +\n self.y[fg.ilo+1:fg.ihi+1:2,fg.jlo:fg.jhi+2:2])\n\n c_edge_coeffs.y = c_eta_y*fg.dy**2/cg.dy**2\n\n return c_edge_coeffs",
"def _createpoly(self):\n return self.cv.create_polygon((0, 0, 0, 0, 0, 0), fill=\"\", outline=\"\")"
] | [
"0.6073888",
"0.5673097",
"0.5591408",
"0.5486369",
"0.5361581",
"0.53499943",
"0.5272163",
"0.52631676",
"0.5168495",
"0.51550204",
"0.5142952",
"0.51347584",
"0.51139116",
"0.5096012",
"0.5069017",
"0.50188994",
"0.5007644",
"0.5004071",
"0.50028485",
"0.49921945",
"0.49831757",
"0.49590698",
"0.49588722",
"0.49582735",
"0.4947933",
"0.487605",
"0.48688474",
"0.4868501",
"0.48627523",
"0.48605618"
] | 0.6092272 | 0 |
Calculate the contour area by the function cv2.contourArea() or from moments, M["m00"]. | def __CalculateArea(self, contour):
return cv2.contourArea(contour) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def area(cnt):\n\treturn cv2.contourArea(cnt)",
"def get_contour_area(contour):\n assert isinstance(contour, np.ndarray), 'contour should be a numpy array'\n return cv2.contourArea(contour)",
"def __CalculateMoments(self, contour):\r\n return cv2.moments(contour)",
"def get_max_area(contours):\n max_area = 0\n for c in contours:\n temp = cv2.contourArea(c)\n if temp > max_area:\n max_area = temp\n\n return max_area",
"def calcZmArea(self):\n #-- NO EXTRAPOLATION\n if self.extrapolation == \"none\":\n self.ZmArea = sum(self.zmareas)\n #-- AREA EXTRAPOLATION\n if self.extrapolation == \"area\":\n self.ZmArea = sum(self.zmareas) * self.stratum.A2 / self.stratum.Aij\n #-- LINEAR EXTRAPOLATION\n if self.extrapolation == \"linear\":\n self.ZmArea = self.stratum.LT / self.stratum.LN * self.meanZmArea() * self.stratum.Ni\n return self.ZmArea",
"def calcZmAreaVar(self):\n #-- NO EXTRAPOLATION\n if self.extrapolation == \"none\":\n self.ZmAreaVar = sum(self.zmvars)\n #-- AREA EXTRAPOLATION\n if self.extrapolation == \"area\":\n self.ZmAreaVar = 0\n #-- LINEAR EXTRAPOLATION\n if self.extrapolation == \"linear\":\n self.ZmAreaVar = ( (self.stratum.LT / self.stratum.LN) ** 2 ) * (((self.stratum.Ni ** 2) * (1 - self.ni / self.stratum.Ni) * self.variance()) / self.ni) + ((self.stratum.Ni / self.ni) * sum(self.zmvars))\n return self.ZmAreaVar",
"def max_contour(contours):\n if len(contours) == 0:\n return []\n else:\n max_cnt = []\n max_area = 0\n for cnt in contours:\n area = cv2.contourArea(cnt)\n # print(area)\n if area > 1000 and area > max_area:\n max_area = area\n max_cnt = cnt\n return max_cnt",
"def center_of_contour(contorno):\n M = cv2.moments(contorno)\n # Usando a expressão do centróide definida em: https://en.wikipedia.org/wiki/Image_moment\n if M[\"m00\"]!=0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n return (int(cX), int(cY))\n else:\n return (200,150)",
"def get_area_box(contours_points):\n rect = cv2.minAreaRect(np.array(contours_points))\n box = cv2.cv.BoxPoints(rect)\n box = np.array(box)\n return map(tuple, box)",
"def filter_area( contours, debug=False ):\r\n ret = []\r\n\r\n for x in contours:\r\n area = cv2.contourArea( x )\r\n if area > MIN_AREA and area < MAX_AREA:\r\n if debug:\r\n print \"Area\", area\r\n ret.append( x )\r\n return( ret )",
"def __CalculateCentroid(self, contour):\r\n moments = cv2.moments(contour)\r\n\r\n centroid = (-1, -1)\r\n if moments[\"m00\"] != 0:\r\n centroid = (int(round(moments[\"m10\"] / moments[\"m00\"])),\r\n int(round(moments[\"m01\"] / moments[\"m00\"])))\r\n\r\n return centroid",
"def __CalculateEllipse(self, contour):\r\n if len(contour) > 5:\r\n return cv2.fitEllipse(contour)\r\n\r\n return cv2.minAreaRect(contour)",
"def __CalculateApproximation(self, contour):\r\n epsilon = 0.1 * cv2.arcLength(contour, True)\r\n return cv2.approxPolyDP(contour, epsilon, True)",
"def calculate_area(surfname,fwhm):\n try:\n subprocess.call(\"depth_potential -area_voronoi \" + surfname + \" /tmp/tmp_area.txt\",shell=True)\n subprocess.call(\"depth_potential -smooth \" + str(fwhm) + \" /tmp/tmp_area.txt \" + surfname + \" /tmp/sm_area.txt\",shell=True)\n area=np.loadtxt(\"/tmp/sm_area.txt\")\n subprocess.call(\"rm /tmp/sm_area.txt /tmp/tmp_area.txt\",shell=True)\n except OSError:\n print(\"depth_potential not found, please install CIVET tools or replace with alternative area calculation/data smoothing\")\n return 0;\n return area;",
"def moments(cnt):\n\treturn cv2.moments(cnt)",
"def moments(cnt):\n\treturn cv2.moments(cnt)",
"def convex_hull_area( contours, debug= False ):\r\n ret_areas = []\r\n ret_hulls = []\r\n for c in contours:\r\n hull = cv2.convexHull( c )\r\n area = cv2.contourArea( hull )\r\n ret_areas.append( area )\r\n ret_hulls.append( hull )\r\n if( debug ):\r\n print( \"Hull area: {0}\".format( area ) )\r\n\r\n return ( ret_areas, ret_hulls )",
"def update_contour():\n global contour_center\n global contour_area\n\n image = rc.camera.get_color_image()\n\n if image is None:\n contour_center = None\n contour_area = 0\n else:\n # Find all of the orange contours\n contours = rc_utils.find_contours(image, ORANGE[0], ORANGE[1])\n\n # Select the largest contour\n contour = rc_utils.get_largest_contour(contours, MIN_CONTOUR_AREA)\n\n if contour is not None:\n # Calculate contour information\n contour_center = rc_utils.get_contour_center(contour)\n contour_area = rc_utils.get_contour_area(contour)\n\n # Draw contour onto the image\n rc_utils.draw_contour(image, contour)\n rc_utils.draw_circle(image, contour_center)\n\n else:\n contour_center = None\n contour_area = 0\n\n # Display the image to the screen\n rc.display.show_color_image(image)",
"def __CalculatePerimeter(self, curve):\r\n return cv2.arcLength(curve, True)",
"def shape_contour(contour):\n width = max(contour[1][0]-contour[0][0], contour[3][0]-contour[2][0])\n height = max(contour[3][1]-contour[0][1],contour[2][1]-contour[1][1])\n return height,width",
"def __CalculateCircularity(self, contour):\r\n if len(contour) < 2:\r\n return 0\r\n\r\n perimeter = cv2.arcLength(contour, False)\r\n area = self.__CalculateArea(contour)\r\n return (4 * math.pi * area) / (perimeter * perimeter)",
"def get_image_moments(image=None, contour=None, threshold=3):\n\tif contour is None and image is not None:\n\t\tcontour = get_contour(image, threshold)\n\treturn cv2.moments(contour)",
"def area(self) -> npt.NDArray[np.float_]:\n points = self._normalized_projection()\n a = sum(det(points[..., [0, i, i + 1], :]) for i in range(1, points.shape[-2] - 1))\n return 1 / 2 * np.abs(a)",
"def __CalculateExtend(self, contour):\r\n area = self.__CalculateArea(contour)\r\n boundingBox = self.__CalculateBoundingBox(contour)\r\n return area / (boundingBox[2] * boundingBox[3])",
"def __CalculateBoundingBox(self, contour):\r\n return cv2.boundingRect(contour)",
"def get_dimensions_from_contour(img, cntr, kernel):\n\tmask = np.zeros_like(img) # mask will contain the fitted and adjusted ellipse of a single obstacle\n\tellipse = cv2.fitEllipse(cntr)\n\tx, y, obj_length, obj_height = cv2.boundingRect(cntr)\n\trect = cv2.minAreaRect(cntr)\n\n\tequi_diameter = obj_length # bounding rectangle gives a better approximation of diameter\n\n\tbox = cv2.boxPoints(rect)\n\tbox = np.int0(box)\n\tmask = cv2.ellipse(mask, ellipse, (255, 255, 255), -1) # draw the fitted ellipse\n\trows = mask.shape[0]\n\tcols = mask.shape[1]\n\tM = np.float32([[1, 0, 0], [0, 1, equi_diameter / 4]]) # shift mask down to match obstacle, not edge\n\tmask = cv2.warpAffine(mask, M, (cols, rows))\n\tmask = cv2.erode(mask, kernel, iterations=3) # erode the mask to remove background points\n\treturn mask, box, x, y, obj_length, obj_height",
"def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = int((X*data).sum()/total)\n y = int((Y*data).sum()/total)\n col = data[:, int(y)]\n \n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n \n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return(height, x, y, width_x, width_y, 0.0)\n #return(1, 15, 15, 2, 2, 0.0)",
"def compute_area(self):\r\n\r\n \"\"\"Косое произведение векторов\r\n A = (x2-x1; y2-y1; z2-z1)\r\n B = (x3-x1; y3-y1; z3-z1)\r\n S = 0.5*sqrt((Ay*Bz - Az*By)^2 + (Az*Bx - Ax*Bz)^2 + (Ax*By - Ay*Bx)^2 )\r\n \"\"\"\r\n a_x = self.x2 - self.x1\r\n a_y = self.y2 - self.y1\r\n a_z = self.z2 - self.z1\r\n\r\n b_x = self.x3 - self.x1\r\n b_y = self.y3 - self.y1\r\n b_z = self.z3 - self.z1\r\n\r\n self.area = 0.5 * math.sqrt((a_y * b_z - a_z * b_y) ** 2 + (a_z * b_x - a_x * b_z) ** 2 + (a_x * b_y - a_y * b_x) ** 2)\r\n\r\n \"\"\"По теореме Герона\"\"\"\r\n # a = math.sqrt((self.x1-self.x2)**2 + (self.y1-self.y2)**2 + (self.z1-self.z2)**2)\r\n # b = math.sqrt((self.x1-self.x3)**2 + (self.y1-self.y3)**2 + (self.z1-self.z3)**2)\r\n # c = math.sqrt((self.x2-self.x3)**2 + (self.y2-self.y3)**2 + (self.z2-self.z3)**2)\r\n # p = 0.5 * (a + b + c)\r\n # self.area = math.sqrt(p * (p - a) * (p - b) * (p - c))\r",
"def moments(data):\n# =============================================================================\n# total = data.sum()\n# X, Y = np.indices(data.shape)\n# x = (X*data).sum()/total\n# y = (Y*data).sum()/total\n# col = data[:, int(y)]\n# \n# width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n# \n# row = data[int(x), :]\n# width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n# height = data.max()\n# height1 = height\n# =============================================================================\n return(1, 15, 14, 3, 3, 1, 14, 16, 3, 2)",
"def compute_mesh_area_numpy(mesh):\n pass"
] | [
"0.7210734",
"0.66500163",
"0.6628211",
"0.6365675",
"0.614254",
"0.61276436",
"0.6062467",
"0.5952984",
"0.5937475",
"0.59279317",
"0.58706164",
"0.57940084",
"0.5752224",
"0.5744717",
"0.5743859",
"0.5743859",
"0.5719784",
"0.5644189",
"0.56355894",
"0.5590524",
"0.5581619",
"0.5569854",
"0.5557568",
"0.5541649",
"0.5538348",
"0.55332536",
"0.5527276",
"0.55238736",
"0.5488976",
"0.5474375"
] | 0.7479222 | 0 |
Calculate the bouding rectangle. It is a straight rectangle, it doesn't consider the rotation of the object. So area of the bounding rectangle won't be minimum. It is found by the function cv2.boundingRect(). | def __CalculateBoundingBox(self, contour):
return cv2.boundingRect(contour) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def boundingRect(self):\n return self.rect().adjusted(-2, -2, 2, 2)",
"def boundingRect(cnt):\n\tx, y, w, h = cv2.boundingRect(cnt)\n\treturn {\"x\":x, \"y\": y, \"w\": w, \"h\": h}",
"def boundingBox(self):\n minx, miny, maxx, maxy = self.substrates.bounds\n return pcbnew.BOX2I(\n pcbnew.VECTOR2I(int(minx), int(miny)),\n pcbnew.VECTOR2I(int(maxx - minx), int(maxy - miny)))",
"def get_bounding_box(current_building_contour):\n x, y, w, h, = cv.boundingRect(current_building_contour[0])\n return x, y, w, h",
"def boundingBoxArea(box):\n return (box[2] - box[0] + 1) * (box[3] - box[1] + 1)",
"def fitRectangle(self):\n \n #TODO MAKE SOMETHING MORE GENERIC!!\n \n fA, (fXg, fYg) = self.getArea_and_CenterOfMass()\n \n x1,y1, x2,y2 = self.getBoundingBox()\n #build a rectangle with same \"width\" as the polygon... is-it good enough??\n w = x2 - x1\n \n #but this width should not lead to go out of the bounding box!\n fW = min(w, (x2-fXg)*2, (fXg-x1)*2)\n \n #same area\n fH = fA / fW\n \n x1,y1, x2,y2 = [ int(round(v)) for v in [ fXg - fW/2.0, fYg - fH/2\n , fXg + fW/2.0, fYg + fH/2 ]]\n \n return x1,y1, x2,y2",
"def boundingRect(self) -> QRectF:\n return self._rect.adjusted(-10, -10, 10, 10)",
"def boundingRect(self):\n return QRectF(-self.innerRectangleSize + self.edge_size,\n -self.innerRectangleSize + self.edge_size,\n (self.innerRectangleSize * 2) - (self.edge_size * 2),\n (self.innerRectangleSize * 2) - (self.edge_size * 2)).normalized()",
"def boundingRect(self):\n extra = self._halfLength / 2.0\n return QRectF(self._origin, QSizeF(self._end.x() - self._origin.x(),\n self._end.y() - self._origin.y())\n ).normalized().adjusted(-extra, -extra, extra, extra)",
"def get_bounding_box(conture, img=None):\n\t# get approx, return index\n\t# epsilon = 0.1 * cv2.arcLength(x, True)\n\t# approx_box = cv2.approxPolyDP(x, epsilon, True)\n\t# print 'app box', approx_box # Min [[[ 56 85]] [[318 231]]]\n\t# leftpointX = approx_box[0][0][0]\n\t# print 'app box 2', leftpointX # Min [[[ 56 85]] Max [[318 231]]]\n\t# approx_box_s = int(0.9*approx_box)\n\t# print 'app box s',approx_box_s\n\t\n\t# get rectangle\n\tx, y, w, h = cv2.boundingRect(conture) # x,y: top-left coordinate\n\t# draw rectangle\n\tcv2.rectangle(img, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\tcv2.waitKey(10)\n\treturn (x, y, w, h)",
"def get_bounding_box(img):\n rows = np.any(img, axis=1)\n cols = np.any(img, axis=0)\n rmin, rmax = np.where(rows)[0][[0, -1]]\n cmin, cmax = np.where(cols)[0][[0, -1]]\n # due to python indexing, need to add 1 to max\n # else accessing will be 1px in the box, not out\n rmax += 1\n cmax += 1\n return [rmin, rmax, cmin, cmax]",
"def get_bounding_box(uv_coor, shape):\r\n\txmin = ymin = 99999\r\n\txmax = ymax = 0\r\n\tfor x, y in uv_coor:\r\n\t\txmin = min(xmin, int(x))\r\n\t\txmax = max(xmax, int(x))\r\n\t\tymin = min(ymin, int(y))\r\n\t\tymax = max(ymax, int(y))\r\n\txmin = max(0, xmin - 20)\r\n\tymin = max(0, ymin - 20)\r\n\r\n\txmax = min(shape[1], xmax + 20)\r\n\tymax = min(shape[0], ymax + 20)\r\n\r\n\treturn xmin, xmax, ymin, ymax",
"def boundingRectPoints(cnt):\n\tx, y, w, h = cv2.boundingRect(cnt)\n\tfirst = (x, y)\n\tend = (x+w, y+h)\n\treturn {\"top-left\": first, \"bottom-right\":end}",
"def get_boundingbox(face, width, height, scale=1.3, minsize=None):\n x1 = face.left()\n y1 = face.top()\n x2 = face.right()\n y2 = face.bottom()\n size_bb = int(max(x2 - x1, y2 - y1) * scale)\n if minsize:\n if size_bb < minsize:\n size_bb = minsize\n center_x, center_y = (x1 + x2) // 2, (y1 + y2) // 2\n\n # Check for out of bounds, x-y top left corner\n x1 = max(int(center_x - size_bb // 2), 0)\n y1 = max(int(center_y - size_bb // 2), 0)\n # Check for too big bb size for given x, y\n size_bb = min(width - x1, size_bb)\n size_bb = min(height - y1, size_bb)\n\n return x1, y1, size_bb",
"def bounding_box(self):\n return None",
"def boundingBox(self):\n pmodel = (glm.vec3(1, -self.y_sign, 0)\n * self.model.pos * self.transform.scale)\n x, y, _ = self.transform.pos + pmodel\n y += -self.y_sign * self.font.table['ascent'] * self.transform.scale[1]\n return x, y, self.pixwidth(), self.pixheight()",
"def bounding_box(self):\n # We use the solution described in\n # https://stackoverflow.com/a/14163413\n cos_theta = np.cos(self.angle)\n sin_theta = np.sin(self.angle)\n width_x = 0.5 * self.width * cos_theta\n width_y = 0.5 * self.width * sin_theta\n height_x = 0.5 * self.height * -sin_theta\n height_y = 0.5 * self.height * cos_theta\n dx = np.sqrt(width_x**2 + height_x**2)\n dy = np.sqrt(width_y**2 + height_y**2)\n\n xmin = self.center.x - dx\n xmax = self.center.x + dx\n ymin = self.center.y - dy\n ymax = self.center.y + dy\n\n return RegionBoundingBox.from_float(xmin, xmax, ymin, ymax)",
"def boundingBox(self):\n y_max = np.max(self.points[:,0])\n x_max = np.max(self.points[:,1])\n y_min = np.min(self.points[:,0])\n x_min = np.min(self.points[:,1])\n \n return ((x_max, y_max), (x_min, y_min))",
"def boundingRect(self):\n # TODO (#2398) this rectangle makes no sense, it should be\n # top left x, top left y, width, height. But for some reason\n # that doesn't play nicely with the coordinate system.\n #\n # Instead it is bottom left x, bottom left y, width height.\n return QtCore.QRectF(-9000, -6000, 18000, 12000)",
"def get_bounding_box(self):\n if len(self.polygons) == 0:\n return None\n return numpy.array(((min(pts[:, 0].min() for pts in self.polygons),\n min(pts[:, 1].min() for pts in self.polygons)),\n (max(pts[:, 0].max() for pts in self.polygons),\n max(pts[:, 1].max() for pts in self.polygons))))",
"def rgb_image_bounding_box(image_full_path, boundingBox, convert_bgr=False, autocrop=False):\n imgraw = cv2.imread(image_full_path, 1)\n if len(boundingBox) > 0:\n imgraw = imgraw[boundingBox[1]:boundingBox[3], boundingBox[0]:boundingBox[2], :]\n\n if autocrop:\n imgshape = imgraw.shape\n mindim = np.argmin([imgshape[0], imgshape[1]])\n cropdim = mindim\n boundingBox = [0, 0, imgshape[1], imgshape[0]]\n xtra = np.abs(imgshape[0] - imgshape[1])\n boundingBox[cropdim] = xtra // 2\n boundingBox[cropdim + 2] -= xtra // 2\n imgcrop = imgraw[boundingBox[1]:boundingBox[3], boundingBox[0]:boundingBox[2], :]\n else:\n imgcrop = imgraw\n\n if convert_bgr:\n imgcrop = cv2.cvtColor(imgcrop, cv2.COLOR_BGR2RGB)\n return imgcrop",
"def get_bounding_box(self):\n lon, lat = self.coordinates\n\n ll = (np.min(lon),np.min(lat))\n ul = (np.min(lon),np.max(lat))\n ur = (np.max(lon),np.max(lat))\n lr = (np.max(lon),np.min(lat))\n\n return (ll, ul, ur, lr)",
"def _get_bounding_box(self, frame, bounding_offset):\n\n # Try to find board if the boundingbox is not set\n center, ellipse, mask = self.board.detect(frame)\n\n # Should not be None\n if center is None:\n print(\"skipping frame\")\n return None\n if ellipse is None:\n print(\"skipping frame\")\n return None\n if mask is None:\n print(\"skipping frame\")\n return None\n\n self.point_mask = mask\n # cv2.imshow(\"mask\", mask)\n\n x_offset = (ellipse[1][0] / 2)\n x_center = ellipse[0][0]\n\n y_offset = ellipse[1][1] / 2\n y_center = ellipse[0][1]\n\n minx = max(0, x_center - x_offset - bounding_offset)\n maxx = min(self.width, x_center + x_offset + bounding_offset)\n miny = max(0, y_center - y_offset - bounding_offset)\n maxy = min(self.height, y_center + y_offset + bounding_offset)\n return ((int(minx), int(miny)), (int(maxx), int(maxy)))",
"def _get_rounded_bounding_box(\n geom: BasePolygon, width: Numeric\n ) -> Tuple[int, int, int, int]:\n return (\n geom.bounds[0] - (geom.bounds[0] % width),\n geom.bounds[1] - (geom.bounds[1] % width),\n geom.bounds[2] + (-geom.bounds[2] % width),\n geom.bounds[3] + (-geom.bounds[3] % width),\n )",
"def get_bounding_box(im):\n coords = np.where(im)\n \n return np.array([np.min(coords[0]), np.max(coords[0]), \n np.min(coords[1]), np.max(coords[1])])",
"def boundingRect(self):\n return QRectF()",
"def getbbox(self):\r\n img_ = (self._instance > 0)\r\n rows = np.any(img_, axis=1)\r\n cols = np.any(img_, axis=0)\r\n rmin, rmax = np.argmax(rows), img_.shape[0] - 1 - np.argmax(np.flipud(rows))\r\n cmin, cmax = np.argmax(cols), img_.shape[1] - 1 - np.argmax(np.flipud(cols))\r\n return (rmin, rmax, cmin, cmax)",
"def compute_bounding_box(homography, w, h):\n corners = np.array([[0,0],\n [0,h],\n [w,0],\n [w,h]])\n t_corners = apply_homography(corners, homography)\n return np.array([t_corners.min(axis=0),t_corners.max(axis=0)],\n dtype= np.int)\n #[top-left, btm-right]",
"def draw_bounding_box(self):\n # Gets the bounding box\n xmin, ymin, xmax, ymax = self.get_bounding_box()\n\n # Gets the actual coordinates\n width = xmax - xmin\n height = ymax - ymin\n center_x = xmin + (width)/2\n center_y = ymin + (height)/2\n\n arcade.draw_rectangle_outline(center_x, center_y, width, height, (255, 0, 0))",
"def getRectangularKernel(size = (5,5)):\n\treturn cv2.getStructuringElement(cv2.MORPH_RECT, size)"
] | [
"0.7676917",
"0.72490233",
"0.71938384",
"0.7107463",
"0.70778424",
"0.7075241",
"0.6987138",
"0.698335",
"0.69716114",
"0.691494",
"0.6906388",
"0.6903976",
"0.68773127",
"0.685709",
"0.6815819",
"0.68036884",
"0.6765353",
"0.6762905",
"0.6715452",
"0.66673505",
"0.66570497",
"0.66543585",
"0.66437966",
"0.6637142",
"0.6635336",
"0.6630181",
"0.66187555",
"0.6617563",
"0.6607038",
"0.6564456"
] | 0.74042195 | 1 |
Calculates the centroid of the contour. Moments up to the third order of a polygon or rasterized shape. | def __CalculateCentroid(self, contour):
moments = cv2.moments(contour)
centroid = (-1, -1)
if moments["m00"] != 0:
centroid = (int(round(moments["m10"] / moments["m00"])),
int(round(moments["m01"] / moments["m00"])))
return centroid | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def centroid(self):\n return self.contours_to_matrix().mean(axis=0)",
"def calculateCentroid(self,image):\n\t\tim=cv2.imread(image,0) #reads it in greyscale\n\t\tret,thresh = cv2.threshold(img_copy,128,255,cv2.THRESH_OTSU)\n\t\tim2,contours,hierarchy = cv2.findContours(thresh, 1, 2)\n\t\tcnt = contours[0]\n\t\tM = cv2.moments(cnt)\n\t\tcx = int(M['m10']/M['m00'])\n\t\tcy = int(M['m01']/M['m00'])\n\t\tcentroid=(cx,cy)\n\t\treturn centroid",
"def calc_centroid(self):\n num = 0\n centroid = numpy.zeros(3, float)\n for atm in self:\n if atm.position is not None:\n centroid += atm.position\n num += 1\n return centroid / num",
"def centroid(self): # -> BaseGeometry:\n ...",
"def getContourCentroid(x, y, w, h):\n coordXCentroid = (x+x+w)/2\n coordYCentroid = (y+y+h)/2\n objectCentroid = (int(coordXCentroid),int(coordYCentroid))\n return objectCentroid",
"def centroid(self) -> Point:\n points = self.normalized_array\n centroids = [np.average(points[[0, i, i + 1], :-1], axis=0) for i in range(1, points.shape[0] - 1)]\n weights = [det(self._normalized_projection()[[0, i, i + 1]]) / 2 for i in range(1, points.shape[0] - 1)]\n return Point(*np.average(centroids, weights=weights, axis=0))",
"def centroid(self):\n x, y = self.coordinates\n A = 0.5 * sum(x[i]*y[i+1] - x[i+1]*y[i] for i in range(-1, len(self)-1))\n cx = sum((x[i] + x[i+1]) * (x[i]*y[i+1] - x[i+1]*y[i])\n for i in range(-1, len(self)-1)) / (6*A)\n cy = sum((y[i] + y[i+1]) * (x[i]*y[i+1] - x[i+1]*y[i])\n for i in range(-1, len(self)-1)) / (6*A)\n return Point((cx, cy), properties=self.properties, crs=self.crs)",
"def getCentroid(self):\n if len(self.points) == 0:\n # None\n return None\n elif len(self.points) == 1:\n # Same point\n return self.points[0]\n elif len(self.points) == 2:\n # Middle of a segment\n return Segment(*self.points).middle\n elif len(self.points) == 3:\n # Intersection point of 2 medians\n return Point.average(self.points)\n else:\n # Geometric decomposition to compute centroids (wikipedia)\n n = len(self.points) # n is the number of points\n # There are n-2 forms\n forms = [Form([self.points[0]] + self.points[i:i + 2]) for i in range(1, n - 1)]\n # So n-2 centroids and areas, except if some of the points are one upon another, no area is null\n centroids = [form.center for form in forms]\n areas = [form.area for form in forms]\n # we compute the average centroid weighted by the areas\n weighted_centroid = Point.sum([a * c for (c, a) in zip(centroids, areas)])\n centroid = weighted_centroid / sum(areas)\n return centroid",
"def centroid(cnt):\n\tM = cv2.moments(cnt)\n\tcx = int(M['m10']/M['m00'])\n\tcy = int(M['m01']/M['m00'])\n\treturn (cx, cy)",
"def get_contour_centroid(contour):\n M = cv2.moments(contour)\n cx = int(M[\"m10\"] / M[\"m00\"])\n cy = int(M[\"m01\"] / M[\"m00\"])\n return (cx, cy)",
"def centroid(im, mask=None, w=None, x=None, y=None):\n from numpy import ones, arange, meshgrid\n # 2009-09-02 13:35 IJC: Created\n if mask==None:\n mask = ones(im.shape)\n if w==None:\n w = ones(im.shape)\n if not (im.shape==mask.shape and im.shape==w.shape):\n print \"Image, mask, and weights must have same shape! Exiting.\"\n return -1\n if x==None or y==None:\n xx = arange(im.shape[1])\n yy = arange(im.shape[0])\n x,y = meshgrid(xx,yy)\n x0 = (x*im*mask*w).sum()/(im*mask*w).sum()\n y0 = (y*im*mask*w).sum()/(im*mask*w).sum()\n\n return (x0,y0)",
"def centroid(im, mask=None, w=None, x=None, y=None):\n from numpy import ones, arange, meshgrid\n # 2009-09-02 13:35 IJC: Created\n if mask==None:\n mask = ones(im.shape)\n if w==None:\n w = ones(im.shape)\n if not (im.shape==mask.shape and im.shape==w.shape):\n print \"Image, mask, and weights must have same shape! Exiting.\"\n return -1\n if x==None or y==None:\n xx = arange(im.shape[1])\n yy = arange(im.shape[0])\n x,y = meshgrid(xx,yy)\n x0 = (x*im*mask*w).sum()/(im*mask*w).sum()\n y0 = (y*im*mask*w).sum()/(im*mask*w).sum()\n\n return (x0,y0)",
"def compute_polygon_centroid_2d(polygon):\r\n return geometry.gmComputePolygonCentroid(polygon)",
"def centroid(t, v):\n c = numpy.zeros(v[0].shape)\n total_area = 0\n for i in range(len(t)):\n p = vertices(t[i], v)\n ct = triangle.centroid(p)\n area = triangle.area(p)\n c += area * ct\n total_area += area\n c /= total_area\n return c",
"def getCentroid(self):\n centroid = 0.0\n sumMagnitude = 0.0\n\n for i in range(0,self.nUniquePoints):\n freq,magnitude = self.fDomain[i]\n\n centroid += freq*magnitude\n sumMagnitude += magnitude\n \n centroid /= sumMagnitude\n return centroid",
"def centroid(self):\n return _property_geo(arctern.ST_Centroid, self)",
"def centroid(sign, FS):\n\n time = compute_time(sign, FS)\n\n energy, time_energy=signal_energy(sign, time)\n\n total_energy = np.dot(np.array(time_energy),np.array(energy))\n energy_sum = np.sum(energy)\n\n if energy_sum == 0 or total_energy == 0:\n centroid = 0\n else:\n centroid = total_energy / energy_sum\n return centroid",
"def calcCentroid(self):\n size = len(self.vectors)\n # zip all features together\n zipped = zip(*self.vectors)\n # Calculate the mean for each feature/column\n centroid = [math.fsum(column)/size for column in zipped]\n \n return centroid",
"def centroid(self) -> Point[Scalar]:\n return self._context.multipoint_centroid(self)",
"def calculate_polygon_centroid(polygon):\n\n # Make sure it is numeric\n P = numpy.array(polygon)\n\n # Get area - needed to compute centroid\n A = calculate_polygon_area(P, signed=True)\n\n # Extract x and y coordinates\n x = P[:, 0]\n y = P[:, 1]\n\n # Exercise: Compute C as shown in http://paulbourke.net/geometry/polyarea\n a = x[:-1] * y[1:]\n b = y[:-1] * x[1:]\n\n cx = x[:-1] + x[1:]\n cy = y[:-1] + y[1:]\n\n Cx = numpy.sum(cx * (a - b)) / (6. * A)\n Cy = numpy.sum(cy * (a - b)) / (6. * A)\n\n # Create Nx2 array and return\n C = numpy.array([Cx, Cy])\n return C",
"def centroid_for_uncomputed_shapes(shape_list: List[List[Tuple[float, float]]]) -> Tuple[float, float]:\n centroids = []\n areas = []\n for s in shape_list:\n centroids.append(convex_centroid(s))\n areas.append(convex_area(s))\n return centroid_for_shapes(centroids, areas)",
"def centroidFloat(cnt):\n M = cv2.moments(cnt)\n cx = M['m10']/M['m00']\n\tcy = M['m01']/M['m00']\n\treturn (cx, cy)",
"def calc_centroid(self):\n sumX = 0.0\n sumY = 0.0\n dis = 0.0\n for p in self.points:\n sumX += p.x\n sumY += p.y\n d = p.distance(self.centroid)\n if dis < d: dis = d\n # radius is the longest distance within points\n self.radius = dis + 0.1\n size = len(self.points)\n if size:\n return Point(x=float(sumX)/size, y=float(sumY)/size)\n else:\n return self.centroid",
"def estimate_centroid(self):\r\n\t\tstrain = self.strain_distribution_compr(self.max_pure_compresive_strain,\\\r\n\t\t\tself.max_pure_compresive_strain)\r\n\t\tself.geometric_centrod = (self.depth/2) \r\n\t\tself.plastic_centroid = (self.depth/2)+\\\r\n\t\t\t(self.sectional_moment(strain, self.depth/2)/\\\r\n\t\t\tself.sectional_force(strain))",
"def get_centroid(moments):\n if moments['m00'] > 0:\n centroid_x = moments['m10']/moments['m00']\n centroid_y = moments['m01']/moments['m00']\n else:\n centroid_x = 0.0\n centroid_y = 0.0\n return centroid_x, centroid_y",
"def centroid(self) -> PointValue:\n return ops.GeoCentroid(self).to_expr()",
"def calc_centroid(self, points):\n\t\tself.canvas.create_polygon(points)\n\t\tx = [i[0] for i in points] # all the math is wrong :(\n\t\ty = [j[1] for j in points]\n\n\t\tarea = x[0] * (y[0] - y[-1])\n\t\tx_hat = (x[0] ** 2) * (y[0] - y[-1]) / (2) \n\t\ty_hat = -(y[0] ** 2) * (x[0] - x[-1]) / (2)\n\n\t\tfor i in range(1, len(points) - 1):\n\t\t\tdt = length(x[i], y[i], x[i - 1], y[i - 1])\n\t\t\tdy = y[i] - y[i - 1]\n\t\t\tdx = x[i] - x[i - 1]\n\t\t\tarea += 2 * x[i] * dy\n\t\t\tx_hat += (x[i] ** 2) * dy\n\t\t\ty_hat -= (y[i] ** 2) * dx\n\n\t\tarea += x[-1] * (y[-1] - y[-2])\n\t\tx_hat += (x[-1] ** 2) * (y[-1] - y[-2]) / 2\n\t\ty_hat -= (y[-1] ** 2) * (x[-1] - x[-2]) / 2\n\t\tarea /= 2\n\t\tx_hat /=2\n\t\ty_hat /= 2\n\t\tprint(\"Area: %s\\nX: %s\\nY: %s\" % (area, x_hat/area, y_hat/area))\n\t\treturn x_hat/area, y_hat/area",
"def centroid_of_rect(roi):\n return int(roi.shape[0] / 2), int(roi.shape[1] / 2)",
"def test_polygon_centroids(self):\n\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]])\n C = calculate_polygon_centroid(P)\n\n msg = ('Calculated centroid was (%f, %f), expected '\n '(0.5, 0.5)' % tuple(C))\n assert numpy.allclose(C, [0.5, 0.5]), msg\n\n # Create closed simple polygon (clock wise)\n # FIXME (Ole): Not sure whether to raise an exception or\n # to return absolute value in this case\n P = numpy.array([[0, 0], [0, 1], [1, 1], [1, 0], [0, 0]])\n C = calculate_polygon_centroid(P)\n\n msg = ('Calculated centroid was (%f, %f), expected '\n '(0.5, 0.5)' % tuple(C))\n assert numpy.allclose(C, [0.5, 0.5]), msg\n\n # Not starting at zero\n # Create closed simple polygon (counter clock wise)\n P = numpy.array([[168, -2], [169, -2], [169, -1],\n [168, -1], [168, -2]])\n C = calculate_polygon_centroid(P)\n\n msg = ('Calculated centroid was (%f, %f), expected '\n '(168.5, -1.5)' % tuple(C))\n assert numpy.allclose(C, [168.5, -1.5]), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'test_polygon.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n\n P = geometry[0]\n C = calculate_polygon_centroid(P)\n\n # Check against reference centroid\n reference_centroid = [106.7036938, -6.134533855] # From qgis\n assert numpy.allclose(C, reference_centroid, rtol=1.0e-8)\n\n # Store centroid to file (to e.g. check with qgis)\n out_filename = unique_filename(prefix='test_centroid', suffix='.shp')\n V = Vector(data=None,\n projection=DEFAULT_PROJECTION,\n geometry=[C],\n name='Test centroid')\n V.write_to_file(out_filename)\n\n # Another realistic polygon\n P = numpy.array([[106.7922547, -6.2297884],\n [106.7924589, -6.2298087],\n [106.7924538, -6.2299127],\n [106.7922547, -6.2298899],\n [106.7922547, -6.2297884]])\n\n C = calculate_polygon_centroid(P)\n\n # Check against reference centroid from qgis\n reference_centroid = [106.79235602697445, -6.229849764722536]\n msg = 'Got %s but expected %s' % (str(C), str(reference_centroid))\n assert numpy.allclose(C, reference_centroid, rtol=1.0e-8), msg\n\n # Store centroid to file (to e.g. check with qgis)\n out_filename = unique_filename(prefix='test_centroid', suffix='.shp')\n V = Vector(data=None,\n projection=DEFAULT_PROJECTION,\n geometry=[C],\n name='Test centroid')\n V.write_to_file(out_filename)",
"def center(self):\n\n ca_atoms = self.ca_atoms\n ca_atom_vectors = ca_atoms[\"ca.atom\"].to_list()\n ca_atom_vectors = [i for i in ca_atom_vectors if i is not None]\n centroid = self.center_of_mass(ca_atom_vectors, geometric=False)\n centroid = Vector(centroid)\n\n return centroid"
] | [
"0.7549272",
"0.7480494",
"0.74477065",
"0.72905296",
"0.72855806",
"0.7277071",
"0.72547257",
"0.7159602",
"0.71536976",
"0.70305306",
"0.7029446",
"0.7029446",
"0.7026189",
"0.6984927",
"0.69735426",
"0.6927554",
"0.69200563",
"0.69048506",
"0.69047856",
"0.6894379",
"0.68849826",
"0.68483096",
"0.67892766",
"0.6761048",
"0.67595977",
"0.6730462",
"0.67082334",
"0.6707628",
"0.6683429",
"0.6682657"
] | 0.80175775 | 0 |
Calculate the circumcircle of an object using the function cv2.minEnclosingCircle(). It is a circle which completely covers the object with minimum area. | def __CalculateCircle(self, contour):
return cv2.minEnclosingCircle(contour) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def boundingCircle(self):\n\n try:\n import cv2\n except:\n logger.warning(\"Unable to import cv2\")\n return None\n\n # contour of the blob in image\n contour = self.contour()\n\n points = []\n # list of contour points converted to suitable format to pass into cv2.minEnclosingCircle()\n for pair in contour:\n points.append([[pair[0], pair[1]]])\n\n points = np.array(points)\n\n (cen, rad) = cv2.minEnclosingCircle(points);\n\n return (cen[0], cen[1], rad)",
"def find_min_circle(contours):\n center = (0, 0)\n radius = 0\n\n if len(contours) > 0:\n #compute the minimum enclosing circle and centroid\n c = max(contours, key=cv2.contourArea)\n (x, y), radius = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n else:\n #ball not found\n center = None\n radius = None\n return center, radius",
"def getCircleCircumscribed(self):\n p1, p2, p3 = self.points\n a1 = - (p2.x - p1.x) / (p2.y - p1.y)\n b1 = (p2.x ** 2 - p1.x ** 2 + p2.y ** 2 - p1.y ** 2) / (2 * (p2.y - p1.y))\n a2 = - (p3.x - p2.x) / (p3.y - p2.y)\n b2 = (p3.x ** 2 - p2.x ** 2 + p3.y ** 2 - p2.y ** 2) / (2 * (p3.y - p2.y))\n x = (b1 - b2) / (a2 - a1)\n y = a1 * x + b1\n radius = math.hypot(p1.x - x, p1.y - y)\n return Circle(x, y, radius=radius)",
"def circle_radius(self):\n return min([self.container.width, self.container.height]) / 4",
"def fit_central_circle(image, radius_lower_bound=170, radius_upper_bound=190):\n\n smoothed = smooth_gaussian(image.astype(np.float), sigma=5)\n edges = find_edges_sobel(smoothed)\n thresh = threshold_otsu(edges)\n\n hmm = 170, 190\n hough_radii = np.arange(140, 170, 2)\n hough_res = hough_circle(thresh, hough_radii)\n\n circles = find_n_best_hough_circles(hough_radii, hough_res, 1)\n circle = circles[0]\n\n return circle",
"def houghCircle(img: np.ndarray, min_radius: float, max_radius: float) -> list:\r\n\r\n canny_cv, canny_my = edgeDetectionCanny(img, 200, 100)\r\n edges = []\r\n\r\n for x in range(canny_cv.shape[0]):\r\n for y in range(canny_cv.shape[1]):\r\n if canny_cv[x, y] == 255:\r\n edges.append((x, y))\r\n\r\n thresh = 0.47 # at least 47% of the pixels of a circle must be detected\r\n steps = 100 # number of samples from each circle\r\n\r\n points = []\r\n for r in range(min_radius, max_radius + 1):\r\n for t in range(steps):\r\n alpha = 2 * pi * t / steps\r\n x = int(r * cos(alpha))\r\n y = int(r * sin(alpha))\r\n points.append((x, y, r))\r\n\r\n temp_circles = {} # dict{circle center, radius: counter}\r\n for x, y in edges: # iterate the pixels of the edges:\r\n for dx, dy, r in points:\r\n b = x - dx\r\n a = y - dy\r\n count = temp_circles.get((a, b, r))\r\n if count is None:\r\n count = 0\r\n temp_circles[(a, b, r)] = count + 1\r\n\r\n # now add the appropriate circles to the ans list:\r\n circles = []\r\n sorted_temp = sorted(temp_circles.items(), key=lambda i: -i[1])\r\n for circle, counter in sorted_temp:\r\n x, y, r = circle\r\n # once a circle has been selected, we reject all the circles whose center is inside that circle\r\n if counter / steps >= thresh and all((x - xc) ** 2 + (y - yc) ** 2 > rc ** 2 for xc, yc, rc in circles):\r\n circles.append((x, y, r))\r\n\r\n return circles",
"def find_largest_enclosing_circle(img):\n if img.dtype is not np.dtype(np.uint8):\n raise ValueError('The input image data type should be uint8.')\n\n # Calculate histogram.\n hist = cv.calcHist([img], [0], None, [256], [0, 256])\n\n # Find the min and max intensity value on the image.\n min_i, max_i = find_histogram_range(hist)\n\n # Threshold the image at the median intensity.\n _, binary_img = cv.threshold(img, (max_i + min_i) / 2, 255, cv.THRESH_BINARY)\n\n # Find contours.\n contours, _ = cv.findContours(binary_img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_TC89_L1)\n if len(contours) == 0:\n return (0, 0), 0\n\n # Find a minimum enclosing circle for each contour, and find the largest one.\n circles = [cv.minEnclosingCircle(contour) for contour in contours]\n max_circle = max(circles, key=lambda circle: circle[1])\n (center_x, center_y), radius = max_circle\n return (int(center_x), int(center_y)), int(radius)",
"def cropCircleROI(image, additionalCut = 5):\n Rmin = np.min(image.shape[:-1])/3\n Rmin = 1250 / 3040 * image.shape[0]\n Rmax = 1400 / 3040 * image.shape[0]\n\n #downscale image for better performance\n reduceFactor = 5 # squared\n hough_radii = np.arange(Rmin/reduceFactor, Rmax/reduceFactor, dtype = int)\n\n downSampledImage = block_reduce(cv2.cvtColor(image, cv2.COLOR_BGR2GRAY), block_size = (reduceFactor, reduceFactor), func = np.max)\n downSampledEdges = canny(downSampledImage, sigma=3, low_threshold=5, high_threshold=10)\n\n hough_res = hough_circle(downSampledEdges, hough_radii)\n downSampledCircle = np.unravel_index(np.argmax(hough_res, axis=None), hough_res.shape)\n circle = np.array([downSampledCircle[1], downSampledCircle[2], hough_radii[downSampledCircle[0]]])*reduceFactor\n\n circleMask_ = cv2.circle(np.ones(image.shape[:-1],dtype = \"uint8\"), (circle[1], circle[0]), circle[2]-additionalCut, 0, thickness = -1)\n\n return [np.ma.array(image[:,:,i], mask = circleMask_) for i in range (image.shape[2])]",
"def circle_circumference(a):\n return (2*a*math.pi)",
"def __CalculateCircularity(self, contour):\r\n if len(contour) < 2:\r\n return 0\r\n\r\n perimeter = cv2.arcLength(contour, False)\r\n area = self.__CalculateArea(contour)\r\n return (4 * math.pi * area) / (perimeter * perimeter)",
"def circle_area(self):\n return np.pi * self.ring_radius ** 2",
"def __CalculateEllipse(self, contour):\r\n if len(contour) > 5:\r\n return cv2.fitEllipse(contour)\r\n\r\n return cv2.minAreaRect(contour)",
"def center_on_box(img, radius, min_ref, xmin, xmax, ymin, ymax, na_val=-9999):\n x, y = num.meshgrid(num.arange(-radius, radius), num.arange(-radius, radius))\n coords = [(i, j) for i, j in zip(x.flatten(), y.flatten()) if (i ** 2 + j ** 2) ** 0.5 <= radius]\n fit = [num.mean(img[(xmin + i):(xmax + i), (ymin + j):(ymax + j)]) for i, j in coords]\n if num.nanmin(fit) <= min_ref:\n return num.array(coords[num.nanargmin(fit)])\n else:\n return num.array([na_val, na_val])",
"def circle_contractivity_radius(self,acc=1.e-13,rmax=1000):\n from nodepy.utils import bisect\n\n tol=1.e-14\n r=bisect(0,rmax,acc,tol,self.__num__()._is_circle_contractive)\n return r",
"def object_circularity(labelmask, label):\n # Find z slice with most pixels from object.\n z, i, j = np.where(labelmask == label)\n zmax = mode(z)[0][0]\n # Select 2D image representing object's max Z-slice.\n im = np.where(labelmask[zmax] == label, 1, 0)\n # Calculate circularity from object perimeter and area.\n regions = regionprops(im)\n perimeter = regions[0].perimeter\n area = regions[0].area\n if (perimeter == 0):\n perimeter = 0.5\n circularity = 4 * np.pi * area / (perimeter ** 2) \n return circularity",
"def circumcenter(self) -> Point:\n e1, e2, e3 = self.edges\n bisector1 = e1._line.perpendicular(e1.midpoint, plane=self._plane)\n bisector2 = e2._line.perpendicular(e2.midpoint, plane=self._plane)\n return bisector1.meet(bisector2)",
"def houghCircles(img, minDist=20, param1=50, param2=30, minRadius=0, maxRadius=0):\n\ttmp = grayscale(img)\n\ttmp = cv2.medianBlur(tmp, 5)\n\tcimg = cv2.cvtColor(tmp, cv2.COLOR_GRAY2BGR)\n\tcircles = cv2.HoughCircles(tmp, cv2.HOUGH_GRADIENT, 1, minDist, param1=param1, param2=param2, minRadius=minRadius, maxRadius=maxRadius)\n\tif circles is None:\n\t\tprint \"No circles found, please adjust params...\\n\"\n\t\treturn None\n\tcircles = np.uint16(np.around(circles))\n\treturn circles",
"def circumradius(T,binary_mask):\n (x1, y1), (x2, y2), (x3, y3) = T # extracting the points. \n \n D=2*(x1*(y2-y3)+x2*(y3-y1)+x3*(y1-y2)) # Diameter\n if D!=0:\n #Centroid of the cicumcircle\n Ux=(((x1**2+y1**2)*(y2-y3))+((x2**2+y2**2)*(y3-y1))+((x3**2+y3**2)*(y1-y2)))/D\n Uy=(((x1**2+y1**2)*(x3-x2))+((x2**2+y2**2)*(x1-x3))+((x3**2+y3**2)*(x2-x1)))/D\n \n #radius\n r = sqrt((Ux-x2)**2+(Uy-y2)**2)\n r=r+1\n \n #Determining the sign: it is positive if the centroid of the circumcricle is in the foreground\n x=np.floor(Ux).astype(int)\n y=np.floor(Uy).astype(int)\n\n if (x >=binary_mask.shape[0] or y >=binary_mask.shape[1]):\n r=-r\n elif (x<0 or y<0):\n r=-r\n elif binary_mask[x,y]:\n r=r\n else:\n r=-r\n return r\n else:\n return False",
"def getContourCentroid(x, y, w, h):\n coordXCentroid = (x+x+w)/2\n coordYCentroid = (y+y+h)/2\n objectCentroid = (int(coordXCentroid),int(coordYCentroid))\n return objectCentroid",
"def get_radius(center, rad, speed_limit):\n i = Intersection(center, rad, speed_limit)\n return i.get_radius()",
"def find_inner_circle_parameters(plane_array, rmin=200, rmax=250):\n\n xdim, ydim = plane_array.shape\n\n edges = find_edges_sobel(plane_array)\n\n hough_radii = np.arange(rmin, rmax, 3)\n hough_res = hough_circle(edges, hough_radii)\n\n # Find the two clearest circles\n c1, c2 = find_n_best_hough_circles(hough_radii, hough_res, 2)\n\n # Work out which is the inner circle\n r1 = c1[2]\n r2 = c2[2]\n if r1 > r2:\n inner_circle_radius = r2\n cx, cy, r = c2\n else:\n inner_circle_radius = r1\n cx, cy, r = c1\n\n return cx, cy, r",
"def objects_radius(self, centre, radius):",
"def fit_circle(x,y,center_estimate=(0,0)):\r\n def calc_R(center):\r\n \"\"\"\r\n Calculate the distance of each 2D point from the center (xc, yc) \r\n \"\"\"\r\n xc = center[0]\r\n yc = center[1]\r\n return np.sqrt((x-xc)**2 + (y-yc)**2)\r\n \r\n def f_2(center):\r\n \"\"\"\r\n Calculate the algebraic distance between the data points and the mean\r\n circle centered at (xc, yc)\r\n \"\"\"\r\n Ri = calc_R(center)\r\n return Ri - Ri.mean()\r\n\r\n center, ier = optimize.leastsq(f_2,center_estimate)\r\n \r\n Ri = calc_R(center)\r\n R = np.mean(Ri)\r\n residue = sum((Ri - R)**2)\r\n return R, center, residue",
"def circle_radius(a, b, c):\n # the sides cannot be negative\n if a < 0 or b < 0 or c < 0:\n return None\n else:\n # semi-perimeter of the circle\n p = (a + b + c) / 2\n\n # area of the traingle\n area = sqrt(p * (p - a) *\n (p - b) * (p - c))\n # Radius of the incircle\n radius = area / p\n # Return the radius\n return radius",
"def area_of_circle(radius):\n return radius",
"def incircle(self, a, b, c):\n m11, m12 = a.x - self.x, a.y - self.y\n m13 = m11 * m11 + m12 * m12\n m21, m22 = b.x - self.x, b.y - self.y\n m23 = m21 * m21 + m22 * m22\n m31, m32 = c.x - self.x, c.y - self.y\n m33 = m31 * m31 + m32 * m32\n det1 = m11 * (m22 * m33 - m23 * m32)\n det2 = m12 * (m21 * m33 - m23 * m31)\n det3 = m13 * (m21 * m32 - m22 * m31)\n return near(det1 - det2 + det3, 0)",
"def find_center_mass(contour):\n M = cv2.moments(contour)\n if M[\"m00\"] == 0:\n (x, y), _ = cv2.minEnclosingCircle(contour)\n cR = int(y)\n cC = int(x)\n # raise ValueError(\"Contour too small to find a new center.\")\n else:\n cR = int(M[\"m01\"] / M[\"m00\"])\n cC = int(M[\"m10\"] / M[\"m00\"])\n return (cR, cC)",
"def circumference(self):\n return math.pi * self.radius * 2",
"def circle_area(circle):\n return pi * circle.radius * circle.radius",
"def circumference(self):\n return self.width + self.height"
] | [
"0.74524856",
"0.72214043",
"0.6968209",
"0.655094",
"0.637335",
"0.62411404",
"0.61780167",
"0.61020416",
"0.6038088",
"0.6018948",
"0.6009186",
"0.59972024",
"0.5996103",
"0.5971567",
"0.5965567",
"0.5927634",
"0.5920173",
"0.5918442",
"0.59156424",
"0.5889828",
"0.58796084",
"0.5877014",
"0.5871086",
"0.586304",
"0.5827431",
"0.58237016",
"0.5807667",
"0.5807441",
"0.5785175",
"0.5781718"
] | 0.80844194 | 0 |
Calculate the countour extend. | def __CalculateExtend(self, contour):
area = self.__CalculateArea(contour)
boundingBox = self.__CalculateBoundingBox(contour)
return area / (boundingBox[2] * boundingBox[3]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def life_insurance_to_recive_total(self):\n pass",
"def extendability(self):\n self._extendability = 0.50 * self.ANA - 0.50 * self.DCC + 0.50 * self.MFA + 0.50 * self.NOP\n return round(self._extendability, 5)",
"def calculate(self):",
"def calculate(self):\n pass",
"def update_points(self):\n #Calculate Upper Section\n total = 0\n for box in self._upper_section:\n total += box.points\n self._upperSum = total\n\n if total >= 63:\n self._bonus = 35\n total += 35\n self._upperTotal = total\n\n # Calculate Lower Section\n total = 0\n for box in self._lower_section:\n total += box.points\n\n if self.get_box(\"Yahtzee\").points > 0:\n total = total + (self._yahtzee_count - 1) * 100 # Yahtzee Bonus\n\n self._lowerTotal = total\n\n #Total Points\n self._grandTotal = self._upperTotal + self._lowerTotal",
"def estimate_incumbent(self, startpoints):\n\n pass",
"def calculate(self):\r\n pass",
"def calibration(self) -> int:",
"def calculate(self):\r\n\r\n pass",
"def avg_extend_time(self):\r\n if self.total_extended:\r\n return self.total_extend_time/self.total_extended\r\n else: return 0",
"def extras_total(self):\n total = self.wides + self.no_balls + self.byes + self.leg_byes\n return total",
"def calc_calories(gpx_track, wt = 175, activity='Run'):",
"def calc_intertie_offset_generation (self):\n self.generation = \\\n self.forecast.get_generation(self.start_year,self.end_year)\n dist = self.comp_specs['distance to community']\n self.annual_transmission_loss = \\\n 1 - (\n (1- (self.comp_specs['transmission loss per mile']/ 100.0))\n ** dist)\n self.intertie_offset_generation = \\\n self.generation * (1 + self.annual_transmission_loss)\n\n gen_eff = self.intertie_generation_efficiency\n self.intertie_offset_generation_fuel_used = \\\n self.intertie_offset_generation / gen_eff\n #~ print 'self.proposed_generation',self.proposed_generation\n #~ print con",
"def overall_reduction(self):\n return 84",
"def private_pension_total(self):\n pass",
"def total_steps(self) -> global___Expression:",
"def energyplus_its(self):\n if self._energyplus_its is None:\n self._energyplus_its = 0\n return self._energyplus_its",
"def calc_base_eff_and_infl(level):\n return 2 + (level - 1)",
"def extend (self) :\n return (self.x_min, self.x_max, self.y_min, self.y_max)",
"def calculate_activities(self):\n # Sleep\n sleep = self.sleep_hours * 0.95\n\n # Work\n if self.work_intensity == self.INTENSITY_LOW:\n work_factor = 1.5\n elif self.work_intensity == self.INTENSITY_MEDIUM:\n work_factor = 1.8\n else:\n work_factor = 2.2\n work = self.work_hours * work_factor\n\n # Sport (entered in hours/week, so we must divide)\n if self.sport_intensity == self.INTENSITY_LOW:\n sport_factor = 4\n elif self.sport_intensity == self.INTENSITY_MEDIUM:\n sport_factor = 6\n else:\n sport_factor = 10\n sport = (self.sport_hours / 7.0) * sport_factor\n\n # Free time\n if self.freetime_intensity == self.INTENSITY_LOW:\n freetime_factor = 1.3\n elif self.freetime_intensity == self.INTENSITY_MEDIUM:\n freetime_factor = 1.9\n else:\n freetime_factor = 2.4\n freetime = self.freetime_hours * freetime_factor\n\n # Total\n total = (sleep + work + sport + freetime) / 24.0\n return decimal.Decimal(str(total)).quantize(TWOPLACES)",
"def _compute_cuantia_subtotal(self):\n for line in self:\n line.gasto = line.unidades * line.pvp",
"def intensity(self) -> int:",
"def get_fuel_total_saved (self):\n #~ print self.lost_heat_recovery\n #~ print self.intertie_offset_generation_fuel_used\n #~ print self.pre_intertie_generation_fuel_used\n #~ gen_eff = self.cd[\"diesel generation efficiency\"]\n #~ fuel_used = self.intertie_offset_generation / gen_eff\n\n generation_diesel_reduction = \\\n np.array(self.pre_intertie_generation_fuel_used\\\n [:self.actual_project_life])\n return - np.array(self.lost_heat_recovery[:self.actual_project_life]) +\\\n generation_diesel_reduction",
"def calculate_output(self):",
"def _get_coeffs(self):\n # lift (Clmax) and parasitic drag (Cd0max)\n self.cl = 0.0\n self.cd = 0.0\n kpp = 0.0\n\n for sail in self.sails:\n\n self.cl += sail.cl(self.awa) * sail.area * sail.bk\n self.cd += sail.cd(self.awa) * sail.area * sail.bk\n kpp += sail.cl(self.awa) ** 2 * sail.area * sail.bk * sail.kp\n\n self.cl /= self.area\n self.cd /= self.area\n\n # viscous quadratic parasitic drag and induced drag\n devisor_1 = self.area * self.cl ** 2\n devisor_2 = np.pi * self._heff(self.awa) ** 2\n self.CE = (kpp / devisor_1 if devisor_1 else 0.0) + (self.area / devisor_2 if devisor_2 else 0.0)\n\n # fraction of parasitic drag due to jib\n self.fcdj = 0.0\n for sail in self.sails:\n if sail.type == \"jib\":\n self.fcdj = (\n sail.bk * sail.cd(self.awa) * sail.area / (self.cd * self.area)\n )\n\n # final lift and drag\n self.cd = self.cd * (\n self.flat * self.fcdmult(self.flat) * self.fcdj + (1 - self.fcdj)\n ) + self.CE * self.cl ** 2 * self.flat ** 2 * self.fcdmult(self.flat)\n self.cl = self.flat * self.cl",
"def _area(self):\n self.area = 0.0\n for sail in self.sails:\n self.area += sail.area",
"def patrimony_total(self):\n pass",
"def _compute_ingreso_subtotal(self):\n for sub in self:\n sub.recurring_total = sum(\n line.ingreso for line in sub.animales_ids)",
"def total_length(self):\n # YOUR CODE HERE\n return abs(self.radius*self.angle)",
"def calc_points_office(self):\n if 'cong' in args.exp:\n if self.cnt_office >= 1:\n be = [0] * 8\n be += [1 if x == 'O' else 0 for x in self.b[ 0: 5]]\n be += [0] * 2\n be += [1 if x == 'O' else 0 for x in self.b[ 5:10]]\n be += [0] * 2\n be += [1 if x == 'O' else 0 for x in self.b[10:15]]\n be += [0] * 2\n be += [1 if x == 'O' else 0 for x in self.b[15:20]]\n be += [0] * 8\n max_points = 0\n vptab_office = (\n (0, 0, 0, 0, 0, 0),\n (0, 0, 1, 3, 6, 10),\n (0, 1, 3, 6, 10, 15),\n (0, 2, 5, 9, 14, 20),\n (0, 3, 7, 12, 18, 25),\n (0, 4, 9, 15, 22, 30)\n )\n for bi in range(20):\n if self.b[bi] == 'U':\n be[(bi // 5 + 1) * 7 + bi % 5 + 1] = 1\n total_visited = set()\n points = 0\n for i in range(8, 34):\n if be[i] and i not in total_visited:\n visited = floodfill(be, i)\n total_visited |= visited\n adj = min(len(visited), 5)\n for vi in visited:\n points += vptab_office[adj][self.f[(vi // 7 - 1) * 5 + vi % 7 - 1]]\n if points > max_points:\n max_points = points\n be[(bi // 5 + 1) * 7 + bi % 5 + 1] = 0\n return max_points\n else:\n if self.cnt_office >= 2:\n be = [0] * 8\n be += [1 if x == 'O' else 0 for x in self.b[ 0: 5]]\n be += [0] * 2\n be += [1 if x == 'O' else 0 for x in self.b[ 5:10]]\n be += [0] * 2\n be += [1 if x == 'O' else 0 for x in self.b[10:15]]\n be += [0] * 2\n be += [1 if x == 'O' else 0 for x in self.b[15:20]]\n be += [0] * 8\n points = 0\n total_visited = set()\n vptab_office = (\n (0, 0, 0, 0, 0, 0),\n (0, 0, 1, 3, 6, 10),\n (0, 1, 3, 6, 10, 15),\n (0, 2, 5, 9, 14, 20),\n (0, 3, 7, 12, 18, 25),\n (0, 4, 9, 15, 22, 30)\n )\n for i in range(8, 34):\n if be[i] and i not in total_visited:\n visited = floodfill(be, i)\n total_visited |= visited\n adj = min(len(visited), 5)\n for vi in visited:\n points += vptab_office[adj][self.f[(vi // 7 - 1) * 5 + vi % 7 - 1]]\n return points\n return 0"
] | [
"0.56703246",
"0.5525739",
"0.5475888",
"0.5367172",
"0.5338628",
"0.53284657",
"0.53263986",
"0.53008175",
"0.529843",
"0.5273235",
"0.52313775",
"0.5210509",
"0.5181706",
"0.51630366",
"0.5138338",
"0.51348615",
"0.50815606",
"0.50513184",
"0.504204",
"0.50351846",
"0.50212395",
"0.49979383",
"0.49934825",
"0.49804574",
"0.49557364",
"0.49522033",
"0.49494788",
"0.49321595",
"0.49305394",
"0.49235284"
] | 0.6228183 | 0 |
Check if a curve is convex or not. | def __IsConvex(self, contour):
return cv2.isContourConvex(contour) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def convex(self):\n # Convex has positive curvature (2nd derivative)\n # f\"(x) = 2a, so a > 0 corresponds to convex\n return (self.a > 0)",
"def _check_curve(layer: ogr.Layer) -> None:\n # Check if the feature geometry is polygonal:\n feature_defn = layer.GetLayerDefn()\n layer.ResetReading()\n feature = layer.GetNextFeature()\n while feature is not None:\n geom = feature.GetGeometryRef()\n name_wkt = geom.ExportToWkt()\n\n # Approximate a curvature by a polygon geometry:\n if 'curv' in name_wkt.lower():\n linear_geom = geom.GetLinearGeometry()\n new_feature = ogr.Feature(feature_defn)\n new_feature.SetGeometryDirectly(linear_geom)\n layer.CreateFeature(new_feature)\n layer.DeleteFeature(feature.GetFID())\n\n feature = layer.GetNextFeature()",
"def convex(self, *args, **kwargs) -> Any:\n pass",
"def convex(self):\n x, y = self.center\n angles = []\n l = len(self.points)\n for i in range(l - 1):\n A = self.points[(i + l - 1) % l]\n B = self.points[i % l]\n C = self.points[(i + 1) % l]\n u = Vector.createFromTwoPoints(A, B)\n v = Vector.createFromTwoPoints(C, B)\n angle = v ^ u\n if angle > pi:\n return True\n return False",
"def isSetCurve(self):\n return _libsbml.GeneralGlyph_isSetCurve(self)",
"def is_on_curve(self):\n if self.infinity:\n return True\n left = self.y * self.y\n right = self.x * self.x * self.x + self.ec.a * self.x + self.ec.b\n\n return left == right",
"def isConvex(data, boundaryPointsDict, triangleDict, approximation ,demo = True):\n\n # This loop the boundary points:\n for bdrPntIdx in list(boundaryPointsDict.keys()):\n point = np.array(data[bdrPntIdx]['Coordinate'])\n #The flag showing whether a point is on at least of the triangles after looping all the triangles:\n PntonConvexFlag = False\n # print('PntonConvexFlag ghable tri ha', PntonConvexFlag)\n for tri in list(origIdxtriangleDict.keys()):\n # print('triidx', tri)\n triangle = np.zeros([3,3])\n for corner in range(0,3):\n triangle[corner, :] = data[origIdxtriangleDict[tri][corner]]['Coordinate']\n dis, ptp = distFromPtToTri(point, triangle)\n # isIn = ptInTriangle(ptp, triangle, approximation_treshold)\n\n # if we find a triangle for the selected point such that their distance is zero, we dont need to check the\n # distance of that particular point to the rest of triangles, so we continue by selecting the next point\n if dis <= epsilon:\n PntonConvexFlag = True\n break\n # If at the end of the loop still the flag is Flag is false means that the particular point is not on none of the\n # triangles, so we can immediately decide the shape is non convex, and there is no need to check other points\n\n if not PntonConvexFlag:\n if demo:\n plotDemo(data, point, bdrPntIdx)\n return False\n\n # at the end of checking all the points, if there is no false as return we conclude that all the points are on the\n # convex hall and the shape is convex\n plotDemoIfConvex()\n return True",
"def isSetCurve(self):\n return _libsbml.ReferenceGlyph_isSetCurve(self)",
"def is_point_on_curve(self, P):\n x, y, = P[0], P[1]\n left = y * y\n right = (x * x * x) + (self.a * x) + self.b\n return (left - right) % self.p == 0",
"def isConvexApproximate(data, boundaryPointsDict, triangleDict, approximation, tolerance):\n outliersAllowed = int(np.floor(tolerance * len(list(boundaryPointsDict.keys()))))\n\n outliersCount = 0\n # This loop the boundary points:\n for bdrPntIdx in list(boundaryPointsDict.keys()):\n point = np.array(data[bdrPntIdx]['Coordinate'])\n #The flag showing whether a point is on at least of the triangles after looping all the triangles:\n PntonConvexFlag = False\n # print('PntonConvexFlag ghable tri ha', PntonConvexFlag)\n for tri in list(origIdxtriangleDict.keys()):\n # print('triidx', tri)\n triangle = np.zeros([3,3])\n for corner in range(0,3):\n triangle[corner, :] = data[origIdxtriangleDict[tri][corner]]['Coordinate']\n dis, ptp = distFromPtToTri(point, triangle)\n # isIn = ptInTriangle(ptp, triangle, approximation_treshold)\n\n # if we find a triangle for the selected point such that their distance is zero, we dont need to check the\n # distance of that particular point to the rest of triangles, so we continue by selecting the next point\n if dis <= approximation:\n PntonConvexFlag = True\n break\n # If at the end of the loop still the flag is Flag is false means that the particular point is not on none of the\n # triangles, so we can immediately decide the shape is non convex, and there is no need to check other points\n\n if not PntonConvexFlag:\n outliersCount += 1\n if outliersCount > outliersAllowed:\n return False\n\n # at the end of checking all the points, if there is no false as return we conclude that all the points are on the\n # convex hall and the shape is convex\n plotDemoIfConvex()\n return True",
"def isSetCurve(self):\n return _libsbml.ReactionGlyph_isSetCurve(self)",
"def assert_continuous(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_continuous() cannot be called on an empty list\")\n\n previous_curve = curves[0]\n for curve in curves[1:]:\n if previous_curve.p1 != curve.p0:\n return False\n previous_curve = curve\n return True",
"def isSetCurve(self):\n return _libsbml.SpeciesReferenceGlyph_isSetCurve(self)",
"def is_polygon_convex(polygon):\n c = center_of_mass_polygon(polygon)\n for i in range(-1, len(polygon) - 1):\n p0 = polygon[i]\n p1 = polygon[i - 1]\n p2 = polygon[i + 1]\n v0 = subtract_vectors(c, p0)\n v1 = subtract_vectors(p1, p0)\n v2 = subtract_vectors(p2, p0)\n a1 = angle_smallest_vectors(v1, v0)\n a2 = angle_smallest_vectors(v0, v2)\n if a1 + a2 > pi:\n return False\n return True",
"def is_curve(geo):\n geo = geo.strip().upper()\n\n for a_geo_type_in_curve_geo_types_list in CURVE_TYPES:\n if geo.startswith(a_geo_type_in_curve_geo_types_list):\n return True\n\n continue\n\n return False",
"def ispoint(x):\n if isvect(x) and x[3] > 0.0:\n return True\n return False",
"def is_point_on_curve(a, b, p, x, y):\n assert isinstance(a, Bn)\n assert isinstance(b, Bn)\n assert isinstance(p, Bn) and p > 0\n assert (isinstance(x, Bn) and isinstance(y, Bn)) \\\n or (x == None and y == None)\n\n if x == None and y == None:\n return True\n\n lhs = (y * y) % p\n rhs = (x*x*x + a*x + b) % p\n on_curve = (lhs == rhs)\n\n return on_curve",
"def ok(self, point):\n [x1, x2, x3, x4, x5, x6] = point.decisions\n if x1 + x2 -2 < 0:\n return False\n if 6 - x1 - x2 < 0:\n return False\n if 2 - x2 + x1 < 0:\n return False\n if 2 - x1 + 3*x2 < 0:\n return False\n if 4 - (x3 - 3)**2 - x4 < 0:\n return False\n if (x5 - 3)**3 + x6 - 4 < 0:\n return False\n for i, d in enumerate(point.decisions):\n if d < self.decisions[i].low or d > self.decisions[i].high:\n print i, d, self.decisions[i].low, self.decisions[i].high\n return False\n return True",
"def in_ellipse(x,y,a,b):\n return ellipse(x,y,a,b) <= 1",
"def is_valid(self):\n if len(self.exterior) < 3:\n return False\n return self.to_shapely_polygon().is_valid",
"def is_convex(reg, abs_tol=ABS_TOL):\n if not is_fulldim(reg):\n return True\n if len(reg) == 0:\n return True\n outer = envelope(reg)\n if is_empty(outer):\n # Probably because input polytopes were so small and ugly..\n return False, None\n Pl, Pu = reg.bounding_box\n Ol, Ou = outer.bounding_box\n bboxP = np.hstack([Pl, Pu])\n bboxO = np.hstack([Ol, Ou])\n if (\n sum(abs(bboxP[:, 0] - bboxO[:, 0]) > abs_tol) > 0 or\n sum(abs(bboxP[:, 1] - bboxO[:, 1]) > abs_tol) > 0):\n return False, None\n if is_fulldim(outer.diff(reg)):\n return False, None\n else:\n return True, outer",
"def hasCollinearPoints(listOfPoints):\r\n for points in listOfPoints:\r\n if isCollinear(points[0], points[1], points[2]): #If any of the points are collinear\r\n return True\r\n else:\r\n pass\r\n return False #If none of the points are collinear\r",
"def point_in_poly(x_point: float, y_point: float) -> bool:\n\n # Semi-F47 extended states all devices should be able to ride out a sag of up to 1 cycle.\n if x_point <= 1:\n return False\n\n point = shapely.geometry.Point(x_point, y_point)\n return POLYGON.contains(point) or POLYGON.intersects(point)",
"def ispolygonXY(a):\n return ispolygon(a) and isXYPlanar(a)",
"def checkconvexity(self): # 3\n res = self.__obj.checkconvexity()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)",
"def check_convexity(hull, used_pivots):\n for instance in used_pivots:\n if not check_inside_hull(hull, instance):\n return False\n return True",
"def interior_contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n except AttributeError:\n pass\n \n if Vobj.is_line(): \n return self.polyhedron()._is_zero( self.eval(Vobj) )\n elif Vobj.is_vertex(): \n return self.polyhedron()._is_positive( self.eval(Vobj) ) \n else: # Vobj.is_ray()\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )",
"def below_curve(cls, curve):\n def _shape(site):\n x, y = site.pos\n return y < curve(x)\n return Shape(_shape)",
"def interior_contains(self, Vobj):\n try:\n if Vobj.is_vector(): # assume we were passed a point\n return self.polyhedron()._is_positive( self.eval(Vobj) )\n except AttributeError:\n pass\n\n if Vobj.is_line():\n return self.polyhedron()._is_zero( self.eval(Vobj) )\n elif Vobj.is_vertex():\n return self.polyhedron()._is_positive( self.eval(Vobj) )\n else: # Vobj.is_ray()\n return self.polyhedron()._is_nonneg( self.eval(Vobj) )",
"def convexify(domain):\n\n if isinstance(domain, isl.BasicSet):\n return domain\n\n dom_bsets = domain.get_basic_sets()\n if len(dom_bsets) == 1:\n domain, = dom_bsets\n return domain\n\n hull_domain = domain.simple_hull()\n if isl.Set.from_basic_set(hull_domain) <= domain:\n return hull_domain\n\n domain = domain.coalesce()\n\n dom_bsets = domain.get_basic_sets()\n if len(domain.get_basic_sets()) == 1:\n domain, = dom_bsets\n return domain\n\n hull_domain = domain.simple_hull()\n if isl.Set.from_basic_set(hull_domain) <= domain:\n return hull_domain\n\n dom_bsets = domain.get_basic_sets()\n assert len(dom_bsets) > 1\n\n print(\"PIECES:\")\n for dbs in dom_bsets:\n print(\" %s\" % (isl.Set.from_basic_set(dbs).gist(domain)))\n raise NotImplementedError(\"Could not find convex representation of set\")"
] | [
"0.7643623",
"0.6757536",
"0.67146444",
"0.65801567",
"0.6464188",
"0.6454179",
"0.635574",
"0.62782335",
"0.61885536",
"0.61603767",
"0.6148129",
"0.6033672",
"0.60245943",
"0.590163",
"0.580078",
"0.5784255",
"0.57838607",
"0.5783847",
"0.5773544",
"0.57301515",
"0.5673152",
"0.565746",
"0.56263477",
"0.56033194",
"0.55996966",
"0.5594187",
"0.5550376",
"0.5549927",
"0.54887104",
"0.5479333"
] | 0.73451805 | 1 |
Calculate the contour moments to help you to calculate some features like center of mass of the object, area of the object etc. | def __CalculateMoments(self, contour):
return cv2.moments(contour) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def moments(cnt):\n\treturn cv2.moments(cnt)",
"def moments(cnt):\n\treturn cv2.moments(cnt)",
"def moments(self):",
"def moments(data):\n# =============================================================================\n# total = data.sum()\n# X, Y = np.indices(data.shape)\n# x = (X*data).sum()/total\n# y = (Y*data).sum()/total\n# col = data[:, int(y)]\n# \n# width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n# \n# row = data[int(x), :]\n# width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n# height = data.max()\n# height1 = height\n# =============================================================================\n return(1, 15, 14, 3, 3, 1, 14, 16, 3, 2)",
"def measure_image_moments(image):\n data = image.quantity\n\n coords = image.geom.get_coord().skycoord\n x, y = coords.data.lon.wrap_at(\"180d\"), coords.data.lat\n\n A = data[np.isfinite(data)].sum()\n\n # Center of mass\n x_cms = (x * data)[np.isfinite(data)].sum() / A\n y_cms = (y * data)[np.isfinite(data)].sum() / A\n\n # Second moments\n x_var = ((x - x_cms) ** 2 * data)[np.isfinite(data)].sum() / A\n y_var = ((y - y_cms) ** 2 * data)[np.isfinite(data)].sum() / A\n x_sigma = np.sqrt(x_var)\n y_sigma = np.sqrt(y_var)\n\n return A, x_cms, y_cms, x_sigma, y_sigma, np.sqrt(x_sigma * y_sigma)",
"def get_image_moments(image=None, contour=None, threshold=3):\n\tif contour is None and image is not None:\n\t\tcontour = get_contour(image, threshold)\n\treturn cv2.moments(contour)",
"def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = int((X*data).sum()/total)\n y = int((Y*data).sum()/total)\n col = data[:, int(y)]\n \n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n \n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return(height, x, y, width_x, width_y, 0.0)\n #return(1, 15, 15, 2, 2, 0.0)",
"def center_of_contour(contorno):\n M = cv2.moments(contorno)\n # Usando a expressão do centróide definida em: https://en.wikipedia.org/wiki/Image_moment\n if M[\"m00\"]!=0:\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n return (int(cX), int(cY))\n else:\n return (200,150)",
"def find_center( contours ):\r\n ret = []\r\n\r\n for x in contours:\r\n M = cv2.moments( x )\r\n pt = Point()\r\n pt.x = int( M['m10']/M['m00'] )\r\n pt.y = int( M['m01']/M['m00'] )\r\n\r\n ret.append( pt )\r\n\r\n return( ret );",
"def moments(self, data):\n total = data.sum()\n X, Y = indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = sqrt(abs((arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = sqrt(abs((arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return x, y, width_x, width_y, height",
"def moments(data):\n height = data.max()\n background = data.min()\n data = data - np.min(data)\n total = data.sum()\n x, y = np.indices(data.shape)\n x = (x * data).sum() / total\n y = (y * data).sum() / total\n col = data[:, int(y)]\n width_x = np.sqrt(abs((np.arange(col.size) - y) ** 2 * col).sum() / col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(abs((np.arange(row.size) - x) ** 2 * row).sum() / row.sum())\n width_x /= gaussian_sigma_to_fwhm\n width_y /= gaussian_sigma_to_fwhm\n return {\n \"amplitude\": height,\n \"x\": x,\n \"y\": y,\n \"sigma_x\": width_x,\n \"sigma_y\": width_y,\n \"background\": background,\n \"theta\": 0.0,\n }",
"def __CalculateCentroid(self, contour):\r\n moments = cv2.moments(contour)\r\n\r\n centroid = (-1, -1)\r\n if moments[\"m00\"] != 0:\r\n centroid = (int(round(moments[\"m10\"] / moments[\"m00\"])),\r\n int(round(moments[\"m01\"] / moments[\"m00\"])))\r\n\r\n return centroid",
"def find2D_higher_moments(image, centroid, halfwidths, c_sum):\n \n # Unpack centroid to seperate values\n xcen, ycen = np.floor(centroid)\n xhw, yhw = halfwidths\n \n xmoment2 = 0\n xmoment3 = 0\n ymoment2 = 0\n ymoment3 = 0\n \n # Set up x and y centroid scanning ranges\n x_range = np.array((np.floor(xcen - xhw) - 1, np.ceil(xcen + xhw) - 1))\n y_range = np.array((np.floor(ycen - yhw) - 1, np.ceil(ycen + yhw) - 1))\n \n \n for ii in xrange(np.int(x_range[0]), np.int(x_range[1])):\n for jj in xrange(np.int(y_range[0]), np.int(y_range[1])):\n \n xloc = ii - np.floor(xcen)\n yloc = jj - np.floor(ycen)\n \n xweight = 0\n yweight = 0\n \n xoff = np.abs(ii - xcen)\n yoff = np.abs(jj - ycen)\n \n if xoff <= xhw:\n xweight = 1\n elif xhw < xoff < (xhw + 1):\n xweight = xhw + 1 - xoff\n \n if yoff <= yhw:\n yweight = 1\n elif yhw < yoff < (yhw + 1):\n yweight = yhw + 1 - yoff\n \n weight = xweight * yweight\n\n xmoment2 += xloc ** 2 * image[jj, ii] * weight\n xmoment3 += xloc ** 3 * image[jj, ii] * weight\n ymoment2 += yloc ** 2 * image[jj, ii] * weight\n ymoment3 += yloc ** 3 * image[jj, ii] * weight\n \n xmoment2 = xmoment2 / c_sum\n xmoment3 = xmoment3 / c_sum\n ymoment2 = ymoment2 / c_sum\n ymoment3 = ymoment3 / c_sum\n\n # Pack the x and y moments to return to main program\n x_moment = np.array((xmoment2, xmoment3))\n y_moment = np.array((ymoment2, ymoment3))\n \n return x_moment, y_moment",
"def find1D_higher_moments(image, xcen, xhw, c_sum):\n \n # Collapse input image unto x axis\n vector = np.sum(image, axis=0)\n \n xmoment2 = 0.0\n xmoment3 = 0.0\n \n # Set up x and y centroid scanning ranges\n x_range = np.array((np.floor(xcen - xhw) - 1, np.ceil(xcen + xhw) - 1))\n\n for ii in xrange(np.int(x_range[0]), np.int(x_range[1])):\n xloc = (ii + 1) - np.floor(xcen)\n \n xweight = 0\n xoff = np.abs(ii - xcen)\n \n if xoff <= xhw:\n xweight = 0\n elif xhw < xoff < xhw + 1:\n xweight = xhw + 1 - xoff\n\n xmoment2 += xloc ** 2 * vector[ii] * xweight\n xmoment3 += xloc ** 3 * vector[ii] * xweight\n \n xmoment2 = xmoment2 / c_sum\n xmoment3 = xmoment3 / c_sum\n \n # Pack moments for return to main program\n x_mom = np.array((xmoment2, xmoment3))\n \n return x_mom",
"def fun_contours(self, params):\n shape_coeffs = params[:self.num_shape_params]\n blendshape_end = self.num_shape_params + self.numObservations * self.num_blendshape_params\n blendshape_coeffs = params[self.num_shape_params:blendshape_end].reshape((self.numObservations, self.num_blendshape_params))\n trans_mats = params[blendshape_end:].reshape((self.numObservations, 7))\n\n vertices3d = self.vertices3d\n vertices3d_from_mesh = np.zeros_like(vertices3d)\n vertices3d_inner, vertices3d_right, vertices3d_left = self.transform_meshes(shape_coeffs, blendshape_coeffs, trans_mats)\n\n inner_idx = 0\n for idx in range(vertices3d.shape[0]):\n lm_idx = idx % 66\n obs_num = int(np.floor(idx/66))\n\n if lm_idx in self.contour_lms_list[0]:\n vertices3d_from_mesh[idx] = self.find_closest_vertex3D(vertices3d[idx],\n vertices3d_right[obs_num])\n elif lm_idx in self.contour_lms_list[1]:\n vertices3d_from_mesh[idx] = self.find_closest_vertex3D(vertices3d[idx],\n vertices3d_left[obs_num])\n else:\n vertices3d_from_mesh[idx] = vertices3d_inner[obs_num][inner_idx]\n inner_idx += 1\n if inner_idx == 50:\n inner_idx = 0\n\n return (vertices3d_from_mesh - vertices3d).ravel()",
"def centroidFloat(cnt):\n M = cv2.moments(cnt)\n cx = M['m10']/M['m00']\n\tcy = M['m01']/M['m00']\n\treturn (cx, cy)",
"def contours(info, color, line, mean_marker):\n\teigenval, eigenvec = np.linalg.eigh(info['covar'])\n\n\taxis11, axis12 = find_ellipse_info(info['mean'].flatten(), eigenval, eigenvec, 1)\n\taxis21, axis22 = find_ellipse_info(info['mean'].flatten(), eigenval, eigenvec, 2)\n\taxis31, axis32 = find_ellipse_info(info['mean'].flatten(), eigenval, eigenvec, 3)\n\tangle = axis12['xangle']\t\n\tangle = angle * 180 / math.pi\n\n\tellipse1 = Ellipse(xy=info['mean'], width=axis11['length'], height=axis12['length'], angle=angle, visible=True, facecolor='none', edgecolor=color, linestyle=line, linewidth=2)\t\n\tellipse2 = Ellipse(xy=info['mean'], width=axis21['length'], height=axis22['length'], angle=angle, visible=True, facecolor='none', edgecolor=color, linestyle=line, linewidth=2)\t\n\tellipse3 = Ellipse(xy=info['mean'], width=axis31['length'], height=axis32['length'], angle=angle, visible=True, facecolor='none', edgecolor=color, linestyle=line, linewidth=2)\t\n\n\tax = plt.gca()\n\tax.add_patch(ellipse3)\n\tax.add_patch(ellipse2)\n\tax.add_patch(ellipse1)\n\tax.set_xlim(-0.4, 0.4)\n\tax.set_ylim(0.5, 2.0)\n\tplt.plot(info['mean'][0], info['mean'][1], marker=mean_marker, mfc='none', mec=color, markersize=8, mew=2)\n\tsigma1 = {'ax1':axis11['length'], 'ax2':axis12['length'], 'xangle1':axis11['xangle'], 'xangle2':axis12['xangle']}\n\tsigma2= {'ax1':axis21['length'], 'ax2':axis22['length'], 'xangle1':axis21['xangle'], 'xangle2':axis22['xangle']}\n\tsigma3 = {'ax1':axis31['length'], 'ax2':axis32['length'], 'xangle1':axis31['xangle'], 'xangle2':axis32['xangle']}\n\n\treturn sigma1, sigma2, sigma3",
"def extractFeatures(bwimage):\n \n \n # circularity\n img = bwimage.copy()\n img1, contours, hierarchy = cv2.findContours(img, cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE)\n \n if len(contours)==0:\n return []\n B = contours[0]\n C = B[:,0,0]\n l = C.size\n \n \n if abs(B[0,0,0] - B[l-1,0,0]) + abs(B[0,0,1] - B[l-1,0,1]) == 2:\n P8 = math.sqrt(2)\n else:\n P8 = 1 \n for j in range(0,l-1): \n if abs((B[j+1,0,0] - B[j,0,0])) + abs(B[j+1,0,1] - B[j,0,1]) == 2:\n P8 = P8 + math.sqrt(2)\n else:\n P8 = P8 + 1\n \n n = np.count_nonzero(bwimage)\n \n circularity = P8*P8/n\n \n \n # elongation\n idx = np.nonzero(bwimage);\n c = idx[1]\n r = idx[0]\n meanx = np.mean(c)\n meany = np.mean(r)\n \n \n pows = 2*np.ones(n)\n \n sigxx = np.sum(np.power((c-meanx),pows))/n\n sigyy = np.sum(np.power((r-meany),pows))/n\n sigxy = np.sum(np.multiply((r-meany),(c-meanx)))/n\n \n covMat = np.array([[sigxx, sigxy], [sigxy, sigyy]])\n val, vects = np.linalg.eig(covMat);\n \n maxEigenValue = np.amax(val) \n minEigenValue = np.amin(val.ravel()[np.flatnonzero(val)])\n \n \n elongation = math.sqrt(maxEigenValue/minEigenValue);\n \n \n # principal axis\n maxidx = np.argmax(val)\n principalAxisVector = vects[maxidx]\n \n \n return [circularity, elongation, principalAxisVector]",
"def contour():\n # 'mayavi' is always defined on the interpreter.\n # Create a new scene.\n mayavi.new_scene()\n\n # Read a VTK (old style) data file.\n r = VTKFileReader()\n #filename = join(mayavi2.get_data_dir(dirname(abspath(__file__))),\n #'heart.vtk')\n filename = 'heart.vtk'\n r.initialize(filename)\n mayavi.add_source(r)\n\n # Create an outline for the data.\n o = Outline()\n mayavi.add_module(o)\n\n # Create three simple grid plane modules.\n # First normal to 'x' axis.\n gp = GridPlane()\n mayavi.add_module(gp)\n # Second normal to 'y' axis.\n gp = GridPlane()\n mayavi.add_module(gp)\n gp.grid_plane.axis = 'y'\n # Third normal to 'z' axis.\n gp = GridPlane()\n mayavi.add_module(gp)\n gp.grid_plane.axis = 'z'\n\n # Create one ContourGridPlane normal to the 'x' axis.\n cgp = ContourGridPlane()\n mayavi.add_module(cgp)\n # Set the position to the middle of the data.\n cgp.grid_plane.position = 15\n\n # Another with filled contours normal to 'y' axis.\n cgp = ContourGridPlane()\n mayavi.add_module(cgp)\n # Set the axis and position to the middle of the data.\n cgp.grid_plane.axis = 'y'\n cgp.grid_plane.position = 15\n cgp.contour.filled_contours = True\n\n # An isosurface module.\n iso = IsoSurface(compute_normals=True)\n mayavi.add_module(iso)\n iso.contour.contours = [220.0]\n\n # An interactive scalar cut plane.\n cp = ScalarCutPlane()\n mayavi.add_module(cp)\n cp.implicit_plane.normal = 0,0,1",
"def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-x)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-y)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y",
"def moments(self,connected=False,dimensionless=False):\n\n\t\t#First check that the instance has the gradient and hessian attributes; if not, compute them\n\t\tif not (hasattr(self,\"gradient_x\") and hasattr(self,\"gradient_y\")):\n\t\t\tself.gradient()\n\n\t\tif not (hasattr(self,\"hessian_xx\") and hasattr(self,\"hessian_yy\") and hasattr(self,\"hessian_xy\")):\n\t\t\tself.hessian()\n\n\t\t#Decide if using the full map or only the unmasked region\n\t\tif self._masked:\n\n\t\t\tif not hasattr(self,\"_full_mask\"):\n\t\t\t\tself.maskBoundaries()\n\t\t\t\n\t\t\tdata = self.data[self._full_mask]\n\t\t\tgradient_x = self.gradient_x[self._full_mask]\n\t\t\tgradient_y = self.gradient_y[self._full_mask]\n\t\t\thessian_xx = self.hessian_xx[self._full_mask]\n\t\t\thessian_yy = self.hessian_yy[self._full_mask]\n\t\t\thessian_xy = self.hessian_xy[self._full_mask]\n\n\t\telse:\n\n\t\t\tdata = self.data\n\t\t\tgradient_x = self.gradient_x\n\t\t\tgradient_y = self.gradient_y\n\t\t\thessian_xx = self.hessian_xx\n\t\t\thessian_yy = self.hessian_yy\n\t\t\thessian_xy = self.hessian_xy\n\n\t\t\n\t\t#Quadratic moments\n\t\tsigma0 = data.std()\n\t\tsigma1 = np.sqrt((gradient_x**2 + gradient_y**2).mean())\n\n\t\t#Cubic moments\n\t\tS0 = (data**3).mean()\n\t\tS1 = ((data**2)*(hessian_xx + hessian_yy)).mean()\n\t\tS2 = ((gradient_x**2 + gradient_y**2)*(hessian_xx + hessian_yy)).mean()\n\n\t\t#Quartic moments\n\t\tK0 = (data**4).mean()\n\t\tK1 = ((data**3) * (hessian_xx + hessian_yy)).mean()\n\t\tK2 = ((data) * (gradient_x**2 + gradient_y**2) * (hessian_xx + hessian_yy)).mean()\n\t\tK3 = ((gradient_x**2 + gradient_y**2)**2).mean()\n\n\t\t#Compute connected moments (only quartic affected)\n\t\tif connected:\n\t\t\tK0 -= 3 * sigma0**4\n\t\t\tK1 += 3 * sigma0**2 * sigma1**2\n\t\t\tK2 += sigma1**4\n\t\t\tK3 -= 2 * sigma1**4\n\n\t\t\n\t\t#Normalize moments to make them dimensionless\n\t\tif dimensionless:\n\t\t\tS0 /= sigma0**3\n\t\t\tS1 /= (sigma0 * sigma1**2)\n\t\t\tS2 *= (sigma0 / sigma1**4)\n\n\t\t\tK0 /= sigma0**4\n\t\t\tK1 /= (sigma0**2 * sigma1**2)\n\t\t\tK2 /= sigma1**4\n\t\t\tK3 /= sigma1**4\n\n\t\t\tsigma0 /= sigma0\n\t\t\tsigma1 /= sigma1\n\n\t\t#Return the array\n\t\treturn np.array([sigma0,sigma1,S0,S1,S2,K0,K1,K2,K3])",
"def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y",
"def moments(data):\n total = data.sum()\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(np.abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(np.abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y",
"def moments(data):\n total = data.sum()\n X, Y = indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = sqrt(abs((arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = sqrt(abs((arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n return height, x, y, width_x, width_y",
"def moments(data):\n total = data.sum()\n if total != 0.:\n X, Y = np.indices(data.shape)\n x = (X*data).sum()/total\n y = (Y*data).sum()/total\n col = data[:, int(y)]\n width_x = np.sqrt(abs((np.arange(col.size)-y)**2*col).sum()/col.sum())\n row = data[int(x), :]\n width_y = np.sqrt(abs((np.arange(row.size)-x)**2*row).sum()/row.sum())\n height = data.max()\n else:\n height=0\n x=0\n y=0\n width_x=0\n width_y=0\n return height,np.sqrt(width_x**2 + width_y**2)",
"def visualizeObs():\n fcontourf(fObs, [-2, 2], [-1, 1], [0, 10])",
"def calculateCentroid(self,image):\n\t\tim=cv2.imread(image,0) #reads it in greyscale\n\t\tret,thresh = cv2.threshold(img_copy,128,255,cv2.THRESH_OTSU)\n\t\tim2,contours,hierarchy = cv2.findContours(thresh, 1, 2)\n\t\tcnt = contours[0]\n\t\tM = cv2.moments(cnt)\n\t\tcx = int(M['m10']/M['m00'])\n\t\tcy = int(M['m01']/M['m00'])\n\t\tcentroid=(cx,cy)\n\t\treturn centroid",
"def calc_moments(field, lats, lons, xypoints, hemisphere='NH', field_type='GPH', \\\n edge=3.02e4, resolution='full'):\n print('Calculating for resolution: '+resolution)\n field_cart, x, y = sph_to_car(field,lons,lats,xypoints,resolution)\n field_vtx = isolate_vortex(field_cart, edge, field_type)\n \n aspect_ratio, latcent = moment_integrate(field_vtx, x, y,edge)\n \n return {'aspect_ratio':aspect_ratio, 'centroid_latitude':latcent}",
"def get_contour_features(mask,selectcell=\"centered\"):\r\n \r\n #binarize image (everything above 0 becomes 1)\r\n mask = np.clip(mask,a_min=0,a_max=1)\r\n\r\n #for contours, dont use RETR_TREE, but RETR_EXTERNAL as we are not interested in internal objects\r\n contours, _ = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\r\n contours = list(contours)\r\n \r\n #in case there is no contour found, add a dummy contour\r\n if len(contours)==0:\r\n contours = [np.array([[[0, 0]],[[0, 1]],[[1, 1]],[[1, 0]]])] #generate a dummy contour\r\n\r\n #Sort contours, longest first\r\n contours.sort(key=len,reverse=True)\r\n contours = [c for c in contours if len(c)>4] #proper contour should have at least 5 points\r\n hulls = [cv2.convexHull(contour,returnPoints=True) for contour in contours]\r\n\r\n mu_origs = [cv2.moments(contour) for contour in contours]\r\n mu_hulls = [cv2.moments(hull) for hull in hulls]\r\n\r\n area_origs = [mu_orig[\"m00\"] for mu_orig in mu_origs]\r\n area_hulls = [mu_hull[\"m00\"] for mu_hull in mu_hulls]\r\n\r\n #drop events where area is zero\r\n hulls = [hulls[i] for i in range(len(hulls)) if area_origs[i]>0] \r\n contours = [contours[i] for i in range(len(contours)) if area_origs[i]>0]\r\n mu_origs = [mu_origs[i] for i in range(len(mu_origs)) if area_origs[i]>0]\r\n mu_hulls = [mu_hulls[i] for i in range(len(mu_hulls)) if area_origs[i]>0]\r\n area_hulls = [area_hulls[i] for i in range(len(area_hulls)) if area_origs[i]>0]\r\n area_origs = [area_origs[i] for i in range(len(area_origs)) if area_origs[i]>0]\r\n \r\n \r\n pos_x = [int(mu_orig['m10']/mu_orig['m00']) for mu_orig in mu_origs]\r\n pos_y = [int(mu_orig['m01']/mu_orig['m00']) for mu_orig in mu_origs]\r\n\r\n \r\n if selectcell == \"smooth\":\r\n #compute the area ratio (roughness of contour)\r\n area_ratio = np.array(area_hulls)/np.array(area_origs)\r\n #get the contour with minimum roughness (smooth contour)\r\n sorter = np.argsort(area_ratio) #smallest first\r\n\r\n if selectcell == \"centered\":\r\n #select contour that is closest to the center of the image. \r\n #In iPAC, cells are usually in the center.\r\n mid_x,mid_y = mask.shape[0]/2,mask.shape[1]/2 #middle of the image\r\n BB = [cv2.boundingRect(c) for c in contours] #get a bounding box around the object\r\n distances = [np.sqrt((mid_x-bb[0])**2 + (mid_y-bb[1])**2) for bb in BB]\r\n sorter = np.argsort(distances) #smallest first\r\n \r\n #sort values with respect to chosen metric (area_ratio or distance)\r\n contours = [contours[s] for s in sorter]\r\n hulls = [hulls[s] for s in sorter]\r\n pos_x = [pos_x[s] for s in sorter]\r\n pos_y = [pos_y[s] for s in sorter]\r\n mu_origs = [mu_origs[s] for s in sorter]\r\n area_origs = [area_origs[s] for s in sorter]\r\n area_hulls = [area_hulls[s] for s in sorter]\r\n \r\n # draw mask of the chosen contour\r\n mask = np.zeros_like(mask)\r\n cv2.drawContours(mask,contours,0,1,cv2.FILLED)# produce a contour that is filled inside\r\n\r\n hull = hulls[0]#[0:n_contours]\r\n pos_x = pos_x[0]\r\n pos_y = pos_y[0] \r\n mu_orig = mu_origs[0]#[0:n_contours]\r\n area_orig = area_origs[0]#[0:n_contours]\r\n area_hull = area_hulls[0]#[0:n_contours]\r\n \r\n if area_orig>0:\r\n area_ratio = area_hull/area_orig\r\n else:\r\n area_ratio = np.nan\r\n\r\n arc = cv2.arcLength(hull, True) \r\n circularity = 2.0 * np.sqrt(np.pi * mu_orig[\"m00\"]) / arc\r\n\r\n\r\n dic = {\"mask\":mask,\"pos_x\":pos_x,\"pos_y\":pos_y,\"area_orig\":area_orig,\"area_hull\":area_hull,\\\r\n \"area_ratio\":area_ratio,\"circularity\":circularity}\r\n return dic",
"def get_centroid(image, method=\"propio\"):\n # ---------Método Propio (directo de la definición)----------\n if method == \"propio\":\n # Dimensiones\n height, width = image.shape[:2]\n # Masa total\n total_mass = image.sum()\n\n # Si la masa total es cero, entonces el centro de masa \n # no existe\n if total_mass == 0:\n r = np.array([-1, -1])\n return r, None\n\n # Primera componente (suma por filas)\n row_sum = image.sum(axis=1)\n row_weight = np.arange(1, height+1)\n r_i = np.dot(row_sum, row_weight)\n r_i /= total_mass\n r_i = int(r_i)\n \n # Segunda componente (suma por columnas)\n column_sum = image.sum(axis=0)\n column_weight = np.arange(1, width+1)\n r_j = np.dot(column_sum, column_weight)\n r_j /= total_mass\n r_j = int(r_j)\n\n # Retorna el centroide en coordenadas de imagen\n r = np.array([r_j, r_i])\n return r, None\n\n # ---------Método con contornos-----------------\n else:\n # Obtener contorno imagen binaria (máscara)\n cnts = get_contours(image)\n \n # Para cada contorno, obtener el centroide y añadirlo a lista\n r = []\n for c in cnts:\n M = cv2.moments(c)\n cX = int(M[\"m10\"] / M[\"m00\"])\n cY = int(M[\"m01\"] / M[\"m00\"])\n r.append(np.array([cX, cY]))\n\n # Ahora se retorna una lista con centroides (según la \n # cantidad de contornos que se hayan encontrado)\n if len(r) == 0:\n r.append(np.array([-1, -1]))\n return r, cnts\n else:\n return r, cnts"
] | [
"0.6727947",
"0.6727947",
"0.66949683",
"0.65000355",
"0.6481608",
"0.63708127",
"0.6356746",
"0.6351715",
"0.6243697",
"0.61840034",
"0.6168322",
"0.6164202",
"0.6142851",
"0.6103647",
"0.60443234",
"0.6014186",
"0.6013817",
"0.5995825",
"0.59587353",
"0.59543926",
"0.59508264",
"0.5949832",
"0.5949832",
"0.5935131",
"0.5892589",
"0.5855636",
"0.58379424",
"0.58336055",
"0.5813654",
"0.57924664"
] | 0.77274 | 0 |
Calculates a contour perimeter or a curve length. | def __CalculatePerimeter(self, curve):
return cv2.arcLength(curve, True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def perimeter(cnt):\n\treturn cv2.arcLength(cnt, True)",
"def perimeter(self):\n return self.sidelength1 + self.sidelength2 + self.baselength1 + self.baselength2",
"def perimeter(self):\n return sum([s.length for s in self.segments])",
"def perimeter(self):",
"def __CalculateLength(self, curve):\r\n return cv2.arcLength(curve, True)",
"def calculateperimeter(self):\r\n return (self.width * 2) + (self.height * 2)",
"def perimeter(self):\r\n\r\n return 2*math.pi*self.__radius",
"def perimeter(self):\n\t\treturn 2 * (self.width + self.height)",
"def perimeter(self):\n\t\treturn self.height * 4",
"def getPerimeter(self):\n return 2 * math.pi * self.__radius",
"def __CalculateCircularity(self, contour):\r\n if len(contour) < 2:\r\n return 0\r\n\r\n perimeter = cv2.arcLength(contour, False)\r\n area = self.__CalculateArea(contour)\r\n return (4 * math.pi * area) / (perimeter * perimeter)",
"def perimeter(self):\n return (\n self.side_1_length +\n self.side_2_length +\n self.side_3_length +\n self.side_4_length\n )",
"def get_perimeter_formula(cls):\n pass",
"def perimeter(a:float, b:float, c:float):\n return a + b + c",
"def perimeter(self):\n return sum(seg.length for seg in self.segments) + \\\n sum([p.perimeter for p in self.subs])",
"def perimeter(self):\n perimeter = (2 * self.__length) + (2 * self.__width)\n\n return perimeter",
"def perimeter(self):\r\n return (2*self.width) + (2*self.height)",
"def perimeter(points):\n return sum(get_distances(points))",
"def perimeter(self):\n return 2 * (self.height + self.width)",
"def perimeter(self):\n return sum(self._lengths)",
"def perimeter(self):\n return sum(self._lengths)",
"def edge_perimeter_length(c, stencil=nn_stencil):\n\n return np.sum(np.logical_not(c) * coordination(c, stencil=stencil))",
"def PolyPerimeter(Coords):\n peri = 0.0\n for i in range(np.shape(Coords)[0]-1):\n # next point coord - current point coord\n peri = peri + ( (Coords[i+1,0] - Coords[i,0])**2 + (Coords[i+1,1] - Coords[i,1])**2 )**0.5\n\n return peri",
"def perimeter_distance(self, p1, p2):\n\n p1_projection = self.outline.project(shgeo.Point(p1))\n p2_projection = self.outline.project(shgeo.Point(p2))\n\n distance = p2_projection - p1_projection\n\n if abs(distance) > self.outline_length / 2.0:\n # if we'd have to go more than halfway around, it's faster to go\n # the other way\n if distance < 0:\n return distance + self.outline_length\n elif distance > 0:\n return distance - self.outline_length\n else:\n # this ought not happen, but just for completeness, return 0 if\n # p1 and p0 are the same point\n return 0\n else:\n return distance",
"def regular_polygon_area(perimeter, apothem):\n return (perimeter * apothem) / 2",
"def get_rect_perimeter(length, width):\n length = (str)(length)\n width = (str)(width)\n if((length.isnumeric()) and (length.isnumeric())):\n length = (float)(length)\n width = (float)(width)\n perimeter = 2 * (length + width)\n else:\n perimeter = \"Invalid input, length and width must be numeric value\"\n return perimeter",
"def get_corrected_arclength(pts,closed=False):\r\n \r\n l = len(pts)\r\n ptsDown2 = np.concatenate((pts[2:l],pts[0:2]))\r\n ptsDown1 = np.concatenate((pts[1:l],np.array([(pts[0][0],pts[0][1])])))\r\n ptsUp1 = np.concatenate((np.array([(pts[l-1][0],pts[l-1][1])]),pts[0:l-1]))\r\n ptsUp2 = np.concatenate((pts[l-2:l],pts[0:l-2]))\r\n summedPts = ptsDown2 + ptsDown1 + pts + pts + ptsUp1 + ptsUp2\r\n avePts = summedPts/5.0\r\n zoomAvePts = np.round(avePts)\r\n arcLength = cv2.arcLength(zoomAvePts.astype(int),closed)\r\n \r\n return arcLength",
"def __CalculateApproximation(self, contour):\r\n epsilon = 0.1 * cv2.arcLength(contour, True)\r\n return cv2.approxPolyDP(contour, epsilon, True)",
"def shape_contour(contour):\n width = max(contour[1][0]-contour[0][0], contour[3][0]-contour[2][0])\n height = max(contour[3][1]-contour[0][1],contour[2][1]-contour[1][1])\n return height,width",
"def cone_area(radius: number, height: number) -> number:\n return pi*radius*(radius + sqrt(radius**2 + height**2))"
] | [
"0.721825",
"0.69289505",
"0.6759035",
"0.6589213",
"0.6564522",
"0.65498984",
"0.6483492",
"0.6478432",
"0.6431225",
"0.6417622",
"0.6385283",
"0.6373984",
"0.63636816",
"0.636077",
"0.6348714",
"0.6287157",
"0.62746215",
"0.6269028",
"0.6236131",
"0.6204229",
"0.6204229",
"0.5961299",
"0.59612936",
"0.5935809",
"0.58287364",
"0.5769499",
"0.5767257",
"0.5755544",
"0.5729385",
"0.57146126"
] | 0.7875777 | 0 |
Update the ElasticSearch index every hour. | def update_es_index():
for job in scheduler.get_jobs():
if 'task_type' in job.meta and job.meta['task_type'] == "update_index":
scheduler.cancel(job)
scheduler.schedule(
scheduled_time=datetime.now(),
func='haystack.management.commands.update_index.Command().handle()',
interval=60 * 60,
repeat=None,
)
for job in scheduler.get_jobs():
index_job = job
if index_job.func_name == 'haystack.management.commands.update_index.Command().handle()':
break
index_job.meta['task_type'] = "update_index"
index_job.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def index_later(self):\n return",
"def update_time(cls, key):\n key.put()",
"def every_hour(self, time, function, args=None, kwargs=None, name=None):\n if args is None:\n args = list()\n if kwargs is None:\n kwargs = dict()\n if name is None:\n name = function.__name__+(f'_{len(self.config)+1}' if function.__name__ in self.config else '')\n self.config[name] = {'mode':'every_hour', 'time':time, 'function':function, 'args':args, \n 'kwargs':kwargs, 'execute_num':0, 'runner':(function, args, kwargs, name),\n 'time_init':datetime.datetime.now()}\n self.params.tracker_dict[name] = dict()",
"def update(self):\n if self._refreshed_at is None or (\n self._refreshed_at + self._refresh_rate <= datetime.datetime.now()):\n\n self.run()",
"def periodicUpdate(self):\n try:\n logging.info(f'{self.cn} periodicUpdate = Start')\n isHaz = JsonSettings.parseJson('settings.json','isHazelcast')\n if self.db.isDb():\n self.insertStats()\n self.insertPorts()\n if isHaz:\n self.insertHaz() \n else:\n self.db.initDb()\n self.insertSys()\n self.insertStats()\n self.insertPorts()\n if isHaz:\n self.insertHaz() \n except Exception as e:\n logging.critical(f'{self.cn} Exception: {e}')\n logging.critical(f'{self.cn} StackTrace: \\n', exc_info=1)\n finally:\n logging.info(f'{self.cn} periodicUpdate = End')",
"def do_update(url,indexHeaders,update_file):\n updateUrl=url.replace(\"buckets\",\"riak\")\n indexHeaders['content-type'] = 'application/json'\n r=requests.post(url, data=json.dumps(update_file), headers=indexHeaders)",
"def step010():\n logger.logMessage('Begin: Getting candidate documents from elasticsearch')\n\n def limitHour(d):\n thish = d.start_time.tz_localize(tz='UTC')\n nexth = thish + dt.timedelta(hours=1)\n return { 'range': { 'time': {'gte':thish, 'lt':nexth } } }\n \n conn = sql.create_engine(pgurl)\n client = es.Elasticsearch(hostlist)\n dupesDF = pd.read_sql_table('weather_dupes',conn).set_index('time')\n hours =dupesDF.to_period('H').reset_index()['time'].unique()\n ranges = [ limitHour(h) for h in hours ]\n query = { \n '_source': [ 'tsa','time' ],\n 'query': { \n 'bool': { 'should': ranges } \n } \n }\n #logger.logMessage(level='DEBUG',message='Query body: {0}'.format(query))\n hits = eshelp.scan(client=client,index=indexName,doc_type='doc',query=query)\n numRecs = 0\n with open(candidatesFile,'w') as f:\n for h in hits:\n src = h['_source']\n tsa = int(src['tsa'])\n time = src['time']\n docid = h['_id']\n idx = h['_index']\n f.write(f'{tsa:014d};{time:25s};{docid:32s};{idx:32s}\\n') \n numRecs += 1\n if numRecs % 1000 == 0:\n logger.logMessage(level='DEBUG',message=\"{0:9d} records written\".format(numRecs))\n logger.logMessage(message=\"{0:9d} total records written\".format(numRecs))\n logger.logMessage('End: Getting candidate documents from elasticsearch')",
"def cron_refresh_spacetrack_cache():\n s = SpaceTrackApi()\n updated_tles_str = s.get_all_tles()\n storage.save_tle_cache(updated_tles_str)\n last_updated[0] = int(time.time())\n metadata = {\n 'last_updated': last_updated[0],\n }\n storage.save_metadata(metadata)",
"def reindex(self):",
"def reindex(self):",
"def update(self, time):\n raise NotImplementedError",
"def update(self, time):\n raise NotImplementedError",
"def refresh(self):\n\n self._refreshed_on = time.time() * 1000",
"def _update_on_refresh():\n cities = City.query.all()\n\n #Iterates over all cities in the database and updates their value\n for city in cities:\n metric_resp, imperial_resp = _get_open_weather_requests(city.name)\n\n metric_json = metric_resp.json()\n imperial_json = imperial_resp.json()\n\n city.temp_celsius = int(metric_json[MAIN][TEMPERATURE])\n city.temp_fahrenheit = int(imperial_json[MAIN][TEMPERATURE])\n db.session.commit()",
"def store_elasticsearch(self, item):\n self.datastore.create(\n index=\"dminer-alphabay-{date}\".format(\n date=datetime.datetime.strptime(item[\"timestamp\"], \"%Y:%m:%d %H:%M:%S\").date().strftime(\"%Y-%m-%d\")\n ),\n doc_type= \"alphabay_listing\",\n body=item\n )",
"def update(self, dt):\n pass",
"async def afterHoursAutoPurge(self, ctx: Context):",
"def store_elasticsearch(self, item):\n self.datastore.create(\n index=\"dminer-dreammarket-{date}\".format(\n date=datetime.datetime.strptime(item[\"timestamp\"], \"%Y:%m:%d %H:%M:%S\").date().strftime(\"%Y-%m-%d\")\n ),\n doc_type= \"dreammarket_listing\",\n body=item\n )",
"async def _timein_refresh(self):\n\t\t\n\t\tawait self.refresh_cache()",
"def _update(self, host):\n pass",
"def update_news_intime(minutes):\n while True:\n db_update.update()\n time.sleep(60 * minutes)",
"def __setitem__(self, url, reslut):\n record = {'result': reslut, 'timestamp': datetime.datetime.utcnow()}\n try:\n self.es.index(index=self.index, doc_type=self.doc_type, id=url, body=record)\n except Exception as e:\n print e\n print url, 'failed'",
"def refresh():\r\n DB.drop_all()\r\n DB.create_all()\r\n # TODO Get data from OpenAQ, make Record objects with it, and add to db\r\n for i in time_x_values():\r\n DB.session.add(Record(datetime=i[0], value=i[1]))\r\n DB.session.commit()\r\n return 'Data refreshed!'",
"def refresh_index(self):\n synchronize()\n # TODO: add logger call here\n self._compute_embeddings()",
"def refresh():\r\n db.drop_all()\r\n db.create_all()\r\n for time_value in get_datetime_values('Los Angeles', 'pm25'):\r\n record = Record(datetime=str(time_value[0]), value=time_value[1])\r\n db.session.add(record)\r\n db.session.commit()\r\n return render_template('refresh.html')",
"def step070() -> None:\n logger.logMessage('Begin: elasticsearch bulk update')\n client = es.Elasticsearch(hostlist)\n\n def generate():\n with open(renumFile,'r') as f:\n line = f.readline().rstrip()\n while line != '':\n fields = line.split(';')\n oper = { '_index': fields[3], \n '_op_type': 'update',\n '_id': fields[2].rstrip(),\n '_type': 'doc',\n '_source:': {'doc': {'tsa': fields[0]}}}\n \n yield oper\n line = f.readline().rstrip()\n result = eshelp.bulk(client,generate())\n logger.logMessage('Bulk result: {0}'.format(result))\n logger.logMessage('End : elasticsearch bulk update')",
"def update(self, dt):",
"def update(self, dt):",
"def reindex(self):\n raise NotImplementedError()",
"def addDayHourToURange(self, dayHour, index):\n ur_hist_len = len(self.__data['updateRange'][index]['updateHistory']) - Predictor.rangeHistorySize\n if (ur_hist_len > 0):\n for i in range(ur_hist_len):\n self.__data['updateRange'][index]['updateHistory'].pop(0)\n self.__data['updateRange'][index]['updateHistory'].append(dayHour)"
] | [
"0.59411937",
"0.57995015",
"0.56473935",
"0.5602451",
"0.558092",
"0.551876",
"0.5453459",
"0.5422525",
"0.54206413",
"0.54206413",
"0.5394929",
"0.5394929",
"0.53662395",
"0.53635",
"0.53479695",
"0.53402996",
"0.532217",
"0.53142494",
"0.5309737",
"0.5285711",
"0.52673167",
"0.525103",
"0.5217854",
"0.5209786",
"0.5201052",
"0.5197442",
"0.5189252",
"0.5189252",
"0.5187253",
"0.51755565"
] | 0.6486948 | 0 |
JavaProcess.__init__(self, class_loc, args=[]) Initializes an external Java process. | def __init__(self, config, class_loc, args=[]):
JavaProcess.config = JavaProcessConfig.configFrom_dict(config)
self._cp = self._construct_classpath_str()
self.class_loc = class_loc
self.args = args
self._process = None
self._stdout = None
self._stderr = None
LOG.debug("JavaProcess constructed for %s", self.class_loc)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, paths):\n Process.__init__(self)\n self.paths = paths",
"def __init__(self, host=\"\", port=8432):\n Process.__init__(self)\n self.host, self.port = host, port\n self._Handler.annotator = self",
"def __init__(self):\n self._recording = None\n self._java_call = get_config_str(\"Java\", \"java_call\")\n result = subprocess.call([self._java_call, '-version'])\n if result != 0:\n raise ConfigurationException(\n f\" {self._java_call} -version failed. \"\n \"Please set [Java] java_call to the absolute path \"\n \"to start java. (in config file)\")\n\n self._find_java_jar()\n\n self._machine_json_path = None\n self._placement_json = None\n self._monitor_cores = None\n self._gatherer_iptags = None\n self._gatherer_cores = None\n self._java_properties = get_config_str(\"Java\", \"java_properties\")\n self._chipxy_by_ethernet = None\n if self._java_properties is not None:\n self._java_properties = self._java_properties.split()\n # pylint: disable=not-an-iterable\n for _property in self._java_properties:\n if _property[:2] != \"-D\":\n raise ConfigurationException(\n \"Java Properties must start with -D \"\n f\"found at {_property}\")",
"def __init__(self, process=None, parent=None, **kwargs):\n super(ProcessIO, self).__init__(**kwargs)\n self.process = process\n self.parent = parent\n self.default_output = process.default_output",
"def _run_java(self, *args):\n if self._java_properties is None:\n params = [self._java_call, '-jar', self._jar_file]\n else:\n params = [self._java_call] + self._java_properties \\\n + ['-jar', self._jar_file]\n params.extend(args)\n return subprocess.call(params)",
"def __init__(self):\n self._process = None\n self._nm = PortScanner()",
"def __init__(self, binPath, numProc, wd, platform):\n self.binPath = binPath\n self.numProc = numProc\n self.wd = wd\n self.platform = platform",
"def _runner(self, classpath, main, jvm_options, args):",
"def __init__(self, task_queue, result_queue):\n multiprocessing.Process.__init__(self)\n self.task_queue = task_queue\n self.result_queue = result_queue",
"def __init__(self, args, shell, userns):\n super(BasicMgr, self).__init__(args, shell, userns)\n self.cmd = self._wlbin + args\n\n # Build Popen instance\n try:\n self.p = Popen(self.cmd, stdout=PIPE, stderr=PIPE, stdin=PIPE,)\n except OSError as e:\n if e.errno == errno.ENOENT:\n print(\"Couldn't find program: %r\" % self.cmd[0])\n return\n else:\n raise e",
"def __init__(self, stub_class, cmd, port=None):\n self._process_lock = threading.RLock()\n self._process = None\n self._stub_class = stub_class\n self._cmd = [str(arg) for arg in cmd]\n self._port = port",
"def __init__(self, *args, **kwargs):\n super(PythonTaskWrapper, self).__init__(*args, **kwargs)\n\n self.setOption(\n 'executableName',\n self.__pythonExecutable\n )",
"def __init__(self, program, args):\n self.__program = program\n self.__args = args",
"def _launch(self):\n annotators = ['tokenize', 'ssplit']\n if 'ner' in self.annotators:\n annotators.extend(['pos', 'lemma', 'ner'])\n elif 'lemma' in self.annotators:\n annotators.extend(['pos', 'lemma'])\n elif 'pos' in self.annotators:\n annotators.extend(['pos'])\n annotators = ','.join(annotators)\n options = ','.join(['untokenizable=noneDelete',\n 'invertible=true'])\n # if you work on English, use this this command\n cmd = ['java', '-mx' + self.mem, '-cp', '\"%s\"' % self.classpath,\n 'edu.stanford.nlp.pipeline.StanfordCoreNLP', '-annotators',\n annotators, '-tokenize.options', options,\n '-outputFormat', 'json', '-prettyPrint', 'false']\n \n # if you work on arabic, use this this command\n \n # cmd = ['java', '-mx' + self.mem, '-cp', '\"%s\"' % self.classpath,\n # # 'edu.stanford.nlp.pipeline.StanfordCoreNLP','-annotators',\n # 'edu.stanford.nlp.pipeline.StanfordCoreNLP', '-props', 'StanfordCoreNLP-arabic.properties','-annotators',\n # annotators, '-tokenize.options', options, #'-tokenize.whitespace', 'true',\n # '-outputFormat', 'json', '-prettyPrint', 'false']\n print(' '.join(cmd))\n\n # We use pexpect to keep the subprocess alive and feed it commands.\n # Because we don't want to get hit by the max terminal buffer size,\n # we turn off canonical input processing to have unlimited bytes.\n self.corenlp = pexpect.spawn('/bin/bash', maxread=100000, timeout=60)\n self.corenlp.setecho(False)\n self.corenlp.sendline('stty -icanon')\n self.corenlp.sendline(' '.join(cmd))\n self.corenlp.delaybeforesend = 0\n self.corenlp.delayafterread = 0\n self.corenlp.expect_exact('NLP>', searchwindowsize=100)",
"def __init__(self, *args, **kwargs):\n mp.Process.__init__(self)\n self._args = args\n self._kwargs = kwargs\n self._host_conn, self._proc_conn = mp.Pipe()\n self.daemon = True\n self.start()\n reply = self._host_conn.recv()\n if isinstance(reply, Exception):\n raise reply",
"def Start(self):\n\n\n\n assert not self._process, 'Start() can only be called once'\n self._process = subprocess.Popen(self._args)",
"def _from_java(cls, java_obj):\n # Create a new instance of this stage.\n py_obj = cls()\n py_obj._java_obj = java_obj\n if java_obj is None and java_obj.parentPipeline().isDefined():\n py_parent = MLPipeline()\n py_parent._java_obj = java_obj.parentPipeline().get()\n py_obj._parent = py_parent\n return py_obj",
"def __init__(self, target=None, *args, **kwargs):\n super(PyonThread, self).__init__()\n\n if target is not None or not hasattr(self, 'target'): # Allow setting target at class level\n self.target = target\n self.spawn_args = args\n self.spawn_kwargs = kwargs\n\n # The instance of Greenlet or subprocess or similar\n self.proc = None\n self.supervisor = None\n\n self.ev_exit = Event()",
"def __init__(self, proc_args: Optional[List[str]]):\n if proc_args:\n self.proc = subprocess.Popen(\n proc_args,\n universal_newlines=True,\n stdin=subprocess.PIPE, # pipe STDIN and STDOUT to send and receive messages\n stdout=subprocess.PIPE\n )\n self.outward_comm_stream = self.proc.stdin\n self.inward_comm_stream = self.proc.stdout\n else:\n self.proc = None\n self.outward_comm_stream = sys.stdout\n self.inward_comm_stream = sys.stdin",
"def __init__(self, args, env=None):\n self.args = args\n if env:\n self.env = env\n else:\n self.env = os.environ\n self.stdout = None\n self.stderr = None\n self._process = None",
"def __init__(self, program):\r\n self._program = program",
"def __init__(self):\n super(MultiProcessEngine, self).__init__()\n self._debug_output = False\n self._name = 'Main'\n self._last_worker_number = 0\n self._log_filename = None\n self._pid = os.getpid()\n self._process_information = process_info.ProcessInfo(self._pid)\n self._process_information_per_pid = {}\n self._processes_per_pid = {}\n self._quiet_mode = False\n self._rpc_clients_per_pid = {}\n self._rpc_errors_per_pid = {}\n self._status_update_active = False\n self._status_update_thread = None\n self._storage_writer = None\n self._worker_memory_limit = definitions.DEFAULT_WORKER_MEMORY_LIMIT",
"def __init__(self, com: AbsCommunicationProcess):\n super().__init__()\n self.__com = com\n self.__is_started = False",
"def __init__(self, pid, binary_path, host_name, node_name, telemetry):\n self.pid = pid\n self.binary_path = binary_path\n self.host_name = host_name\n self.node_name = node_name\n self.telemetry = telemetry",
"def __init__(self, args, shell, userns):\n super(SSHMgr, self).__init__(args, shell, userns)\n parser = MagicArgumentParser()\n parser.add_argument('--host', type=str, default='localhost',\n help='Machine to reach (default = localhost)')\n parser.add_argument('--pid', type=str,\n help='Variable to store SSH process pid')\n _args, cmd = parser.parse_known_args(args)\n self.cmd = self._wlbin + [_args.host, ] + cmd\n # SSH Cannot fork into background without a command to execute.\n # Popen instance is created in submit",
"def __init__(self, argv):\n self._argv = argv",
"def __init__(self, readhandle):\n # Name our self\n threading.Thread.__init__(self, name=\"ParentProcessChecker\")\n\n # Store the handle\n self.readhandle = readhandle",
"def start(self) -> None:\n JavaGate().exec_process_instance(\n self._user,\n self._project,\n self.name,\n \"\",\n self.worker_group,\n self.warning_type,\n self.warning_group_id,\n 24 * 3600,\n )",
"def spawn(self, classpath, main, jvm_options=None, args=None, **subprocess_args):\r\n cmd = self._create_command(*self._scrub_args(classpath, main, jvm_options, args))\r\n return self._spawn(cmd, **subprocess_args)",
"def __init__(self):\n self.buienradar_rpc = {\"rain_at\": self.rain_at,\n \"rain_max\": self.rain_max\n }\n\n multiprocessing.Process.__init__(self)\n self.name = 'buienradar'\n self.shutdown = False\n self._sched = None\n self._rain = []"
] | [
"0.6975941",
"0.6958235",
"0.67394876",
"0.6490618",
"0.6321262",
"0.6161817",
"0.612476",
"0.6118696",
"0.6096931",
"0.6078772",
"0.60693926",
"0.60651165",
"0.60423166",
"0.60343724",
"0.6023563",
"0.600156",
"0.5997967",
"0.5996555",
"0.5996109",
"0.5992397",
"0.59232205",
"0.5911356",
"0.5887123",
"0.58784014",
"0.58750504",
"0.5862586",
"0.5821235",
"0.58133376",
"0.580679",
"0.5776895"
] | 0.8258955 | 0 |
slick solution in python ONLY zeros = [0 for i in range(zeros_and_ones.count(0))] ones = [1 for j in range(zeros_and_ones.count(1))] return zeros + ones | def zeros_before_ones(zeros_and_ones):
index_i = 0
last_index = len(zeros_and_ones) - 1
while index_i < last_index:
if zeros_and_ones[index_i] == 1 and zeros_and_ones[last_index] == 0:
zeros_and_ones[index_i], zeros_and_ones[last_index] = zeros_and_ones[last_index], zeros_and_ones[index_i]
index_i += 1
last_index -= 1
# print(zeros_and_ones)
# TODO: NEEDS IMPROVEMENTS! zeros_and_ones | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def solution2(n):\n ones = 0\n while n > 0:\n if n & 1:\n ones += 1\n n = n >> 1\n\n return 0 if ones % 2 == 0 else 1",
"def c(ixs):\n return sum(range(1, sum((i > 0 for i in ixs)) + 1))",
"def _iter_restrict(self, zeros, ones):\n inputs = list(self.inputs)\n unmapped = {}\n for i, v in enumerate(self.inputs):\n if v in zeros:\n inputs[i] = 0\n elif v in ones:\n inputs[i] = 1\n else:\n unmapped[v] = i\n vs = sorted(unmapped.keys())\n for num in range(1 << len(vs)):\n for v, val in boolfunc.num2point(num, vs).items():\n inputs[unmapped[v]] = val\n yield sum((val << i) for i, val in enumerate(inputs))",
"def addOnes(x,m):\n n = x.size/m\n one = np.ones((m,1))\n x = x.reshape((m,n))\n judge = np.sum(x[:,0] == one.flatten())\n if judge != m:\n x = np.hstack((one,x))\n return x",
"def zero_to_ones(L):\n return [-1 if val == 0 else 1 for val in L]",
"def matOnes(shape):\n return [[1 for y in range(shape[1])] \\\n for x in range(shape[0])]",
"def count_ones(n):\n s = 0\n mask = 1\n for i in xrange(16):\n if (mask << i) & n:\n s += 1\n return s",
"def rzeros(nums):\n total = len(nums)\n zeros = 0\n nozeros = []\n for x in nums:\n if x != 0:\n nozeros.append(x) \n else:\n zeros = zeros + 1\n \n return (nozeros, total - zeros, zeros)",
"def fn(i, j, mask):\n if j == n: return 1 \n if i == m: return fn(0, j+1, mask)\n ans = 0 \n for x in 1<<2*i, 1<<2*i+1, 0b11<<2*i: \n mask0 = mask ^ x\n if mask0 & 0b11<<2*i and (i == 0 or (mask0 >> 2*i) & 0b11 != (mask0 >> 2*i-2) & 0b11): \n ans += fn(i+1, j, mask0)\n return ans % 1_000_000_007",
"def _one_pass(nums):\n pattern = [0, 1, 0, -1]\n return [\n int(str(sum(\n v * pattern[(i // n) % len(pattern)]\n for i, v in enumerate(nums, start=1)\n ))[-1])\n for n in range(1, len(nums) + 1)\n ]",
"def zeros(s, zero=0):\n\treturn [zeros(s[1:] ) for i in range(s[0] ) ] if not len(s) else zero",
"def zeros(n):\n return [0 for i in range(n)]",
"def single_number(nums: List[int]) -> int:\n ones = 0\n twos = 0\n\n for num in nums:\n # Record number that appears twice.\n twos |= (ones & num)\n\n # Record number that appears once.\n ones ^= num\n\n # Remove number that is on ones and twos.\n common_bit_mask = ~(ones & twos)\n ones &= common_bit_mask\n twos &= common_bit_mask\n return ones",
"def ones(dice):\n return sum([x for x in dice if x == 1])",
"def solution2(array):\n if not array:\n return\n\n n_rows = len(array)\n n_cols = len(array[0])\n\n # Since we are using first row and column as space to mark rows and columns to be zeroed,\n # first check whether they have any zeros or not\n first_row_zero = False\n first_column_zero = False\n \n for c in range(n_cols):\n if not array[0][c]:\n first_row_zero=True\n \n for r in range(n_rows):\n if not array[r][0]:\n first_column_zero = True\n\n # Now start from 1,1 and check for 0. If 0 is found in certain row and column, mark first row\n # and column\n for r in range(1, n_rows):\n for c in range(1, n_cols):\n if not array[r][c]:\n array[0][c] = 0\n array[r][0] = 0\n\n # Now iterate through entire matrix again starting from 1,1 and set element to zero if \n # first row or column for that element is zero\n for r in range(1, n_rows):\n for c in range(1, n_cols):\n if array[0][c]==0 or array[r][0]==0:\n array[r][c] = 0\n\n # Go back to the first row and column.\n if first_row_zero:\n for c in range(n_cols):\n array[0][c] = 0\n if first_column_zero:\n for r in range(n_rows):\n array[r][0] = 0\n\n return array",
"def solution(A):\n \n cars = 0\n ones = 0\n\n for i in range(len(A), 0, -1):\n\n if A[i-1] == 1:\n ones += 1\n else:\n cars += ones\n\n return (-1 if cars > 1000000000 else cars)",
"def _make_zero(p):\n\n return [pi == 0 for pi in p]",
"def solve_ok(number: int) -> int:\n return no_ones(number) % 2",
"def solution(num):\n \n res = [0] * (num + 1)\n for i in range(1,len(res)):\n res[i] = res[i>>1] + (i%2)\n \n return res",
"def duplicateZeros(self, arr: List[int]) -> None:\n zero = 0\n i = 0\n while i + zero < len(arr):\n if arr[i] == 0:\n zero += 1\n i += 1\n \n if i + zero > len(arr):\n arr[-1] = 0\n i -= 1\n zero -= 1\n \n i -= 1\n j = i + zero\n while j >= 0:\n if arr[i]:\n arr[j] = arr[i]\n else:\n arr[j] = 0\n j -= 1\n arr[j] = 0\n j -= 1\n i -= 1",
"def ones(cls):\n return super().ones(4, 4)",
"def test_count_binary_decisions(self):\n abs_centered_quantized_data_0 = numpy.array([0.75, 0.05, 0.1, 0.2, 0.2, 0.15], dtype=numpy.float32)\n bin_width_test_0 = 0.05\n abs_centered_quantized_data_1 = numpy.array([210., 6., 9., 6.], dtype=numpy.float32)\n bin_width_test_1 = 3.\n truncated_unary_prefix = 7\n \n (cumulated_zeros_0, cumulated_ones_0) = \\\n lossless.stats.count_binary_decisions(abs_centered_quantized_data_0,\n bin_width_test_0,\n truncated_unary_prefix)\n (cumulated_zeros_1, cumulated_ones_1) = \\\n lossless.stats.count_binary_decisions(abs_centered_quantized_data_1,\n bin_width_test_1,\n truncated_unary_prefix)\n print('1st experiment:')\n print('Number of occurrences of 0 for each binary decision computed by the function:')\n print(cumulated_zeros_0)\n print('Number of occurrences of 0 for each binary decision computed by hand:')\n print(numpy.array([0, 1, 1, 1, 2, 0, 0]))\n print('Number of occurrences of 1 for each binary decision computed by the function:')\n print(cumulated_ones_0)\n print('Number of occurrences of 1 for each binary decision computed by hand:')\n print(numpy.array([6, 5, 4, 3, 1, 1, 1]))\n print('\\n2nd experiment:')\n print('Number of occurrences of 0 for each binary decision computed by the function:')\n print(cumulated_zeros_1)\n print('Number of occurrences of 0 for each binary decision computed by hand:')\n print(numpy.array([0, 0, 2, 1, 0, 0, 0]))\n print('Number of occurrences of 1 for each binary decision computed by the function:')\n print(cumulated_ones_1)\n print('Number of occurrences of 1 for each binary decision computed by hand:')\n print(numpy.array([4, 4, 2, 1, 1, 1, 1]))",
"def fn(mask, k):\n if not mask: return 0 \n ans = inf \n for i in range(n): \n if mask & (1<<i): \n ans = min(ans, (nums1[i]^nums2[k]) + fn(mask^(1<<i), k+1))\n return ans",
"def majority_logical(*bit_arrays):\n\n if (len(bit_arrays) == 0):\n raise TypeError(\"len(bit_arrays) must be > 0.\")\n\n MINIMUM_MAJORITY = (len(bit_arrays) // 2) + 1\n\n answer = itertools.combinations(bit_arrays, MINIMUM_MAJORITY)\n answer = map(all, answer)\n answer = any(answer)\n return answer",
"def bool_both_zero_compute(juduged_min, juduged_max):\n dtype = juduged_min.dtype\n tensor_zero = topi.full(juduged_min.shape, dtype, dc.zero_const(dtype))\n min_abs = topi.abs(juduged_min)\n max_abs = topi.abs(juduged_max)\n min_max_replace = topi.add(min_abs, max_abs)\n # just check wether min and max are all zero, if true return 0\n bool_min_max_product_less_zero = less_compare_float32(min_max_replace, tensor_zero)\n bool_min_max_product_more_zero = less_compare_float32(tensor_zero, min_max_replace)\n bool_both_zero = topi.add(bool_min_max_product_less_zero, bool_min_max_product_more_zero)\n\n return bool_both_zero",
"def onequbit_modes(statemat):\n nqubit = int(np.log2(statemat.shape[0]))\n rep = np.array(list(itertools.product((0, 1), repeat=nqubit)))\n inds = [i for i, x in enumerate(np.sum(rep, 1)) if x==1]\n \n instates = np.around(statemat[:, inds], 3)\n\n outstates = np.zeros((len(inds), len(inds)), dtype=complex)\n #print(inds)\n for ii in range(len(inds)):\n shortstate = np.around(instates[sum(instates[:,ii].nonzero()), ii], 3).todense()\n outstates[:, ii] = np.squeeze( np.array( shortstate ) )\n \n return outstates",
"def duplicateZeros(self, arr: List[int]) -> None:\n i = 0\n j = 0\n n = len(arr)\n while i < n:\n if arr[i] == 0:\n j += 1\n i += 1\n j += 1\n i -= 1\n j -= 1\n while i >= 0:\n if j < n:\n arr[j] = arr[i]\n if arr[i] == 0:\n j -= 1\n if j < n:\n arr[j] = 0\n i -= 1\n j -= 1",
"def moveZeroes(self, nums: List[int]) -> None:\n count = 0\n ans = []\n for num in nums:\n if num != 0:\n ans.append(num)\n else:\n count += 1\n for zero in range(count):\n ans.append(0)\n return ans",
"def map_zero_one(x, a, b):\n assert b > a\n s = 1./(b - a)\n t = a/(a-b)\n y = s*x + t\n y[y>1] = 1\n y[y<0] = 0\n return y",
"def ones(cls):\n return super().ones(3, 3)"
] | [
"0.6483679",
"0.624784",
"0.61705774",
"0.610907",
"0.6090902",
"0.6031265",
"0.59823155",
"0.59540856",
"0.59308344",
"0.59170455",
"0.5842495",
"0.5830201",
"0.5826083",
"0.5814207",
"0.578468",
"0.57763344",
"0.5771295",
"0.57675946",
"0.5759372",
"0.5745411",
"0.5724746",
"0.5723461",
"0.5713244",
"0.5685539",
"0.5685301",
"0.5676476",
"0.5657328",
"0.56461656",
"0.5642397",
"0.5629498"
] | 0.6798596 | 0 |
Registers all the JRPC overloaders in the jrpc server | def register_overloaders(jrpc_server: JRPCServer, receiver) -> None:
jrpc_server.register_overloader(
'Application.GetProperties', lambda server: GetPropertiesOverloader(server, receiver))
jrpc_server.register_overloader(
'Application.SetMute', lambda server: SetMuteOverloader(receiver))
jrpc_server.register_overloader(
'Application.SetVolume', lambda server: SetVolumeOverloader(receiver))
jrpc_server.register_overloader(
'Application.Quit', lambda server: ApplicationQuitOverloader(receiver))
jrpc_server.register_overloader(
'System.Hibernate', lambda server: ApplicationQuitOverloader(receiver))
jrpc_server.register_overloader(
'System.Shutdown', lambda server: ApplicationQuitOverloader(receiver))
jrpc_server.register_overloader(
'System.Suspend', lambda server: ApplicationQuitOverloader(receiver))
jrpc_server.register_overloader(
'System.GetProperties', lambda server: SystemPropertiesOverloader()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def register_rpc_proxies(self):\n for rpc_name in self.rpc_proxy_list:\n logger.debug('Registering RPC to Proxy: {}'.format(rpc_name))\n\n class RPCProxy:\n\n def __init__(self, local_session, rpc_name):\n self._local_session = local_session\n self._rpc_name = rpc_name\n\n async def __call__(self, *args, **kwargs):\n logger.debug('Proxying RPC {}, with args {}, kwargs {}'.format(self._rpc_name, args, kwargs))\n return await self._local_session.call(self._rpc_name, *args, **kwargs)\n\n await self.remote_session.register(RPCProxy(self.local_session, rpc_name), rpc_name)",
"def _registerOnServer(self, daemon, nameserver,vclock):\n uri = daemon.register(self)\n nameserver.register(self._name, uri)\n self.updateVectorClock(vclock)\n print(\"Gateway registered. Name {} and uri {} \".format(self._name,uri))",
"def _setup_rpc(self):\n pass",
"def xmlrpc_methods():",
"def register_peer(self):\n try:\n self.get_file_list()\n num_files = len(self.file_list)\n total_ops = self.key_end - self.key_start\n run_ops = total_ops/num_files\n print \"Staring Benchmark Register Peer with Server...\"\n t1 = time.time()\n for i in range(run_ops):\n for file in self.file_list:\n self.service.put(file, self.peer_id)\n t2 = time.time()\n total = run_ops * num_files\n print \"%s Register operations = %s sec\" % (total,t2-t1)\n print \"per Register operation = %s sec\" % ((t2-t1)/total)\n print \"per Register operation = %s msec\" % (((t2-t1)/total)*1000)\n except Exception as e:\n print \"Registering Peer Error, %s\" % e\n sys.exit(1)",
"def registerRPC(self, call, args = None):\n\n rpc = RemoteProcedureCall(self, len(self.rpc), args)\n self.rpc.append(call)\n return rpc",
"def register_extensions(app):\n grpc_channel = grpc.insecure_channel(\n f\"{app.config['GRPC_SERVICE']}:{app.config['GRPC_PORT']}\",\n )\n grpc_client = GrpcClient(grpc_channel)\n grpc_client.init_app(app)",
"def make_json_handler(rpc):\n\n class JSONRPCHandler(BaseHTTPRequestHandler):\n \"\"\"\n A request handler for http.server that speaks JSON-RPC.\n \"\"\"\n def _validate_http_request(self):\n \"\"\"\n Ensures that we understand the HTTP portion of the request.\n \"\"\"\n if self.path != '/':\n print('Invalid request path:', self.path)\n self.send_error(HTTPStatus.NOT_FOUND, 'Request Must Have Path Of /')\n raise ValueError\n\n content_type = self.headers.get('Content-Type', None)\n if content_type != 'application/json':\n print('Invalid request Content-Type:', self.path)\n self.send_error(HTTPStatus.BAD_REQUEST, 'Content-Type Must Be application/json')\n raise ValueError\n\n def _validate_rpc_request(self, request):\n \"\"\"\n Ensures that we understand the JSON-RPC portion of the request.\n \"\"\"\n if request.get('jsonrpc', None) != '2.0':\n raise ValueError('Invalid jsonrpc: must be \"2.0\"')\n\n id = request.get('id', None)\n if not (id is None or isinstance(id, (str, int, float))):\n raise ValueError('Invalid id: must be null, string or number')\n\n method = request.get('method', None)\n if not isinstance(method, str):\n raise ValueError('Invalid method: must be string')\n\n params = request.get('params', [])\n if not isinstance(params, (dict, list)):\n raise ValueError('Invalid params: must be array or object')\n\n def _build_rpc_error(self, id, error, exception, keep_null_id=False):\n \"\"\"\n Returns an error response that can be encoded to JSON.\n\n By default this respects the ID of the request, and returns None if the\n ID is also None. To override this behavior, set keep_null_id=True.\n \"\"\"\n if id is None and not keep_null_id:\n return None\n\n message = RPC_ERROR_MESSAGES.get(error, str(exception))\n\n return {\n 'jsonrpc': '2.0',\n 'id': id,\n 'error': {\n 'code': error.value,\n 'message': message,\n 'data': {\n 'stacktrace': str(exception) + '\\n' + '\\n'.join(traceback.format_tb(exception.__traceback__))\n }\n }\n }\n\n def _build_rpc_result(self, id, result):\n \"\"\"\n Returns a result response that can be encoded to JSON.\n \"\"\"\n if id is None:\n return None\n\n return {\n 'jsonrpc': '2.0',\n 'id': id,\n 'result': result\n }\n\n def _process_request(self, request):\n \"\"\"\n Calls a single RPC function and returns the result.\n \"\"\"\n try:\n self._validate_rpc_request(request)\n except ValueError as err:\n return self._build_rpc_error(None, RpcErrors.INVALID_REQUEST, err, keep_null_id=True)\n\n id = request.get('id', None)\n\n try:\n method = getattr(rpc, request['method'])\n except AttributeError as err:\n return self._build_rpc_error(id, RpcErrors.METHOD_NOT_FOUND, err)\n\n try:\n params = request.get('params', None)\n if params is None:\n result = method()\n elif isinstance(params, list):\n result = method(*params)\n elif isinstance(params, dict):\n result = method(**params)\n\n return self._build_rpc_result(id, result)\n\n except TypeError as err:\n return self._build_rpc_error(id, RpcErrors.INVALID_PARAMS, err)\n except Exception as err:\n return self._build_rpc_error(id, RpcErrors.INTERNAL_ERROR, err)\n\n def _send_json(self, value):\n \"\"\"\n Dumps the value to a JSON string, and sets the appropriate headers to\n return it\n \"\"\"\n raw_value = json.dumps(value).encode('utf-8')\n\n self.send_response(200, 'OK')\n for header, value in CORS_HEADERS.items():\n self.send_header(header, value)\n\n self.send_header('Content-Type', 'application/json')\n self.send_header('Content-Length', str(len(raw_value)))\n self.end_headers()\n\n self.wfile.write(raw_value)\n\n def do_POST(self):\n \"\"\"\n Parses and processes a single or batch JSON-RPC request.\n \"\"\"\n try:\n self._validate_http_request()\n except ValueError:\n return\n\n content_length = int(self.headers.get('Content-Length', '0'))\n request_bytes = self.rfile.read(content_length)\n while len(request_bytes) < content_length:\n request_bytes += self.rfile.read(content_length - len(request_bytes))\n\n request_raw = request_bytes.decode('utf-8')\n try:\n request = json.loads(request_raw)\n except ValueError as err:\n error = self._build_rpc_error(None, RpcErrors.PARSE_ERROR, err, keep_null_id=True)\n self._send_json(error)\n return\n\n if isinstance(request, list):\n responses = [self._process_request(single) for single in request]\n response = [r for r in responses if r is not None]\n elif isinstance(request, dict):\n response = self._process_request(request)\n else:\n try:\n raise ValueError\n except ValueError as err:\n error = self._build_rpc_error(None, RpcErrors.INVALID_REQUEST, err)\n self._send_json(error)\n return\n\n if response is not None:\n self._send_json(response)\n else:\n self.send_response(200, 'OK')\n self.end_headers()\n\n def do_OPTIONS(self):\n \"\"\"\n Sends back the headers necessary to support CORS\n \"\"\"\n print('Processing CORS OPTIONS request')\n self.send_response(200, 'OK')\n for header, value in CORS_HEADERS.items():\n self.send_header(header, value)\n\n self.end_headers()\n\n return JSONRPCHandler",
"def register_resources(self):\n raise NotImplementedError",
"def register_options(options):\n return (\n options\n .register('jsonFilterFile',\n type_=str,\n default=None,\n description=\"Path to JSON file containing certified runs and luminosity blocks.\")\n .register('useHLTFilter',\n type_=bool,\n default=False,\n description=\"If True, only events triggered by one of the skimmed paths will be \"\n \"written out.\")\n .register('jetCollections',\n type_=str,\n default=[],\n multiplicity='list',\n description=\"The names of the jet collections to use (e.g. 'AK4PFCHS').\")\n .register('jecVersion',\n type_=str,\n default=None,\n description=\"Tag of JEC version to use for e.g. JEC uncertainties.\")\n .register('jecFromGlobalTag',\n type_=bool,\n default=False,\n description=\"If True, the JECs will be looked up in the conditions database \"\n \"(CondDB/Frontier) under the current global tag. If False, the \"\n \"text files for `jecVersion` will be used.\")\n .register('jerVersion',\n type_=str,\n default=None,\n description=\"Tag of JER version to use for e.g. jet smearing.\")\n .register('jerMethod',\n type_=str,\n default='stochastic',\n description=\"Method to use for JER smearing. One of: 'stochastic', 'hybrid'\")\n .register('jerGenMatchPtSigma',\n type_=float,\n default=3.0,\n description=\"Size of Gaussian core for 'hybrid' JER smearing.\")\n .register('jetIDSpec',\n type_=str,\n default=None,\n description=\"Version of Jet ID to use (e.g. '2016').\")\n .register('jetIDWorkingPoint',\n type_=str,\n default=None,\n description=\"Working point of Jet ID to use (e.g. 'TightLepVeto').\")\n .register('prefiringWeightFilePath',\n type_=str,\n default=\"\",\n description=\"Path to ROOT file containing prefiring weights.\")\n .register('prefiringWeightHistName',\n type_=str,\n default=\"\",\n description=\"Name of histogram inside prefiring weights file (e.g. 'L1prefiring_jetpt_2016BCD').\")\n .register('useObjectBasedJetID',\n type_=bool,\n default=False,\n description=\"If True, only jets passing the ID specified via 'jetIDSpec' and `jetIDWorkingPoint` will be considered valid.\")\n .register('checkForCompleteness',\n type_=bool,\n default=False,\n description=(\"(for testing) If True, will run some checks on the \"\n \"Ntuple output to ensure all branches are written out \"\n \"and no branch is omitted.\"))\n .register('stitchingWeight',\n type_=float,\n default=1.0,\n description=(\"(deprecated) The output branch 'stitchingWeight' \"\n \"will contain this value for each event. Can then be \"\n \"used when stitching together different samples.\"))\n .register('doJECUncertaintySources',\n type_=bool,\n default=False,\n description=\"Fill ntuple branch with JEC correction factors for individual JEC uncertainty sources.\")\n .register('doPrescales',\n type_=bool,\n default=False,\n description=\"Write out trigger prescales to Ntuple.\")\n .register('edmOut',\n type_=bool,\n default=False,\n description=\"(for testing only) Write out EDM file.\")\n )",
"def _register_services(self, pipeline):\n\n pipeline.register_service(self._aprs_service)",
"def register_server(self, ppclassname, ppclass):\n\n global rpc_pp_class\n\n def accept(cmd, data, eof, sock, address):\n \"\"\" This is called by the rpc.py's tcp listener when a remote\n client connects \"\"\"\n\n ppclass = rpc_pp_class.get(cmd)\n if ppclass == None:\n warning('No PP class found: %s\\n' %(cmd))\n return RPC_CLOSE\n\n # Instantiate the given server class\n ppclass(address=address, sock=sock, data=data)\n return RPC_RELEASE\n\n if register_rpchandler(ppclassname, accept):\n rpc_pp_class[ppclassname] = ppclass",
"def register_router(self, router):\n for prefix, viewset, basename in router.registry:\n self.register(prefix, viewset, base_name=basename)",
"def register_rpc_backend(backend_name, init_rpc_backend_handler):\n rpc_backend_registry = _get_rpc_backend_registry()\n if backend_name in rpc_backend_registry:\n raise RuntimeError(\"Rpc backend {}: already registered\".format(backend_name))\n rpc_backend_registry[backend_name] = init_rpc_backend_handler",
"def __init__(self):\n super(LoopbackTransport, self).__init__([_JSON_RPC_SERVER_PATH])",
"def configure_rpc(cls, scheme=None):\r\n scheme = scheme or cls._meta.scheme\r\n\r\n if not scheme:\r\n return\r\n\r\n if isinstance(scheme, basestring):\r\n scheme = importlib.import_module(scheme)\r\n\r\n cls.scheme_name = scheme.__name__\r\n\r\n methods = getattr(scheme, '__all__', None) \\\r\n or [m for m in dir(scheme) if not m.startswith('_')]\r\n\r\n for mname in methods:\r\n method = getattr(scheme, mname)\r\n if hasattr(method, '__call__'):\r\n cls.methods[\"{0}.{1}\".format(\r\n cls.scheme_name, method.__name__)] = method",
"def remotes():",
"def registerServer(srv):\n srv.setListenAddress(hostname)\n srv.setMachine(getMBean('/Machines/'+machineName))",
"def register(self):\n\n RPCObjectsRegistry.add(self)",
"def server_plugin():",
"async def test_multiple_rpc_transports(loop, server, redis_server_b, consume_rpcs):\n registry.add(ApiA())\n registry.add(ApiB())\n\n manually_set_plugins(plugins={})\n\n redis_server_a = server\n\n port_a = redis_server_a.tcp_address.port\n port_b = redis_server_b.tcp_address.port\n\n logging.warning(f'Server A port: {port_a}')\n logging.warning(f'Server B port: {port_b}')\n\n config = Config.load_dict({\n 'bus': {\n 'schema': {\n 'transport': {'redis': {'url': f'redis://localhost:{port_a}'}},\n }\n },\n 'apis': {\n 'default': {\n 'rpc_transport': {'redis': {'url': f'redis://localhost:{port_a}'}},\n 'result_transport': {'redis': {'url': f'redis://localhost:{port_a}'}},\n },\n 'api_b': {\n 'rpc_transport': {'redis': {'url': f'redis://localhost:{port_b}'}},\n 'result_transport': {'redis': {'url': f'redis://localhost:{port_b}'}},\n },\n }\n })\n\n bus = BusNode(name='', parent=None, bus_client=lightbus.BusClient(config=config, loop=loop))\n asyncio.ensure_future(consume_rpcs(bus))\n await asyncio.sleep(0.1)\n\n await bus.api_a.rpc_a.call_async()\n await bus.api_b.rpc_b.call_async()",
"def register_specs(self, *args):\n self._corespecs_queue.extend(*args)",
"def _initiate_registry_from_torchlib(\n self, torchlib_registry: registration.Registry\n ):\n for aten_name, aten_overloads_func in torchlib_registry.items():\n for func in aten_overloads_func.overloads:\n self.register(\n aten_name,\n self._opset_version,\n func,\n custom=False,\n )",
"def _serve(self) -> None:\n for instrument in self._config[\"instruments\"]:\n uri = self._daemon.register(instrument, objectId=str(instrument))\n self._services[instrument.id] = str(uri)\n logger.success(f\"Registered {instrument} at {uri}\")\n self.uri = self._daemon.register(self, objectId=self.servername)\n logger.success(f\"Registered self at {self.uri}\")",
"def __init__(self,server_list):\n self.workers=[]\n self.worker_by_name={}\n worker_id = 1\n for host,port in server_list:\n # Add the uid here can help with port conflicts, but only works\n # on Unix clusters. We really need to work out a daemon service\n # model that makes the port mess transparent.\n port = port #+ os.getuid()\n new_worker = sync_cluster.standard_sync_client(host,port,worker_id)\n self.workers.append(new_worker)\n self.worker_by_name[host] = new_worker\n worker_id = worker_id + 1",
"def extend(self, router):\n self.registry.extend(router.registry)",
"def register_classes():\n DiffuseCompChain.register_class()\n CatalogCompChain.register_class()\n DiffuseAnalysisChain.register_class()",
"def ListWorkers(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def server_plugin_list(ctx):\n data = ctx.obj.get_all_plugins()\n output_json_data(data)",
"def api_all():\n update_pvserver_instances(pvserver_instances)\n return jsonify(pvserver_instances)"
] | [
"0.5887183",
"0.53439647",
"0.5303127",
"0.5229928",
"0.5181709",
"0.51379925",
"0.5042705",
"0.5018712",
"0.50179935",
"0.5007511",
"0.50063205",
"0.50051254",
"0.49503332",
"0.4928616",
"0.4922325",
"0.4869519",
"0.4867418",
"0.48619875",
"0.4855552",
"0.4855092",
"0.48424605",
"0.48338652",
"0.48154017",
"0.4812718",
"0.47886303",
"0.4785449",
"0.47462106",
"0.47255152",
"0.47188923",
"0.47185788"
] | 0.78924644 | 0 |
Accumulate observed stars on the same dates. | def accumulate_dates(dates, stars):
start = min(dates)
stop = max(dates)
t_range = (stop - start).days
a_dates = [start + timedelta(days = n) for n in range(t_range + 1)]
a_stars = [0 for n in range(t_range + 1)]
for i in range(len(dates)):
idx = (dates[i] - start).days
a_stars[idx] = a_stars[idx] + stars[i]
return a_dates, a_stars | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _update_rating_history(self, rating: float, date: Union[str, float]):\n self.rating_history.append((date, rating))",
"def _increment_num_user_stars(user_id, match, now):\n\tassert match.is_streamed\n\n\tmissing = session.query(CalendarEntry)\\\n\t\t\t.filter(\n\t\t\t\tCalendarEntry.user_id == user_id,\n\t\t\t\tCalendarEntry.match_id == match.id)\\\n\t\t\t.count() == 0\n\tif missing:\n\t\t# No existing CalendarEntry; create a new one.\n\t\tentry = _get_calendar_entry(user_id, match)\n\t\tsession.add(entry)\n\telse:\n\t\t# Increment the count of stars for an existing CalendarEntry.\n\t\tsession.execute(CalendarEntries.update()\n\t\t\t\t.where(sa.and_(\n\t\t\t\t\tCalendarEntry.user_id == user_id,\n\t\t\t\t\tCalendarEntry.match_id == match.id))\n\t\t\t\t.values({CalendarEntry.num_user_stars: CalendarEntry.num_user_stars + 1}))",
"def average_review_stars():\n # get all un-counted reviews\n reviews = Review.query.filter_by(marked=False).join(Restaurant)\\\n .with_entities(Review, Restaurant).all()\n logging.info(f\"Averaging review stars of {len(reviews)} retrieved reviews..\")\n for review, restaurant in reviews:\n # compute running mean of reviews\n restaurant.num_reviews += 1\n restaurant.avg_stars = 1/restaurant.num_reviews * \\\n (restaurant.avg_stars * (restaurant.num_reviews-1) + review.stars)\n review.marked = True\n # update rows \n db.session.commit()",
"def running_total(date_list):\n return sum(d.price for d in date_list)",
"def SumSpectra(A, Rates, Times, offset=0.0):\n\n #print '*** in SumSpectra: ***'\n result = np.zeros( Times.shape )\n for i in range(len(Rates)):\n #print '***', Rates[i]\n result += A[i]*np.exp( -1.0*Rates[i]*Times) \n return result",
"def update(self, delta_time):\n for b in self.star_list:\n b.update()",
"def daily_motion(cls, date):\n mean_motion = 360 / cls.SIDEREAL_YEAR\n anomaly = cls.mean_position(date, cls.ANOMALISTIC_YEAR)\n epicycle = 14/360 - abs(cls.sine(anomaly)) / 1080\n entry = quotient(float(anomaly), angle(0, 225, 0))\n sine_table_step = cls.sine_table(entry + 1) - cls.sine_table(entry)\n factor = -3438/225 * sine_table_step * epicycle\n return mean_motion * (factor + 1)",
"def new_entry_update(cls, summary):\n totaltimes = [x.totaltime for x in summary.entries]\n total = sum(totaltimes, timedelta())\n average = total / len(totaltimes)\n summary.total_time = total\n summary.daily_average = average",
"def _accumulate_rewards(self) -> None:\n for agent, reward in self.rewards.items():\n self._cumulative_rewards[agent] += reward",
"def apply(self, obs, fcst):\n D = obs.shape[0]\n LT = obs.shape[1]\n L = obs.shape[2]\n efcst = copy.deepcopy(fcst)\n for lt in range(LT):\n day = int(np.floor(lt / 24.0)) + 1\n for d in range(day, D):\n recent = obs[d, 0, :] - fcst[d, 0, :]\n yesterday = obs[d - day, lt, :] - fcst[d - day, lt, :]\n efcst[d, lt, :] = efcst[d, lt, :] + self.weight_recent[lt] * recent\n + self.weight_yesterday[lt] * yesterday\n\n return efcst",
"def visitCalculated(self, date):\n raise NotImplementedError()",
"def add_star(array, star_data, disk_star_ratio=0.001):\n left_bound = np.shape(star_data)[1]//2 - np.shape(array)[1]//2\n right_bound = np.shape(star_data)[1]//2 + np.shape(array)[1]//2\n\n # Cutting star data into the shape of the model\n star_data = star_data[:, left_bound:right_bound, left_bound:right_bound]\n star_data /= np.amax(star_data)\n\n star_addition = array * (disk_star_ratio) + star_data * (1-disk_star_ratio)\n\n return star_addition",
"def rate(self, rating, series, is_gs=False, counts=False):\n k = self.calculate_k(rating, counts)*1.1 if is_gs else self.calculate_k(rating, counts)\n rating.value = float(rating.value) + k * self.adjust(rating, series)\n rating.times += 1\n return rating",
"def __simulate_one_day__(self):\n self.compute()\n self.days.append(next(self.index))",
"def average(self, returns):\r\n return returns.mean() * self.day",
"def rate(self, rating, series, is_gs=False, counts=False):\n k = self.calculate_k(rating,counts)*1.1 if is_gs else self.calculate_k(rating,counts)\n rating.value = float(rating.value) + k * self.adjust(rating, series)\n rating.times += 1\n return rating",
"def refresh_accumulated_point(self, school_year=str(timezone.now().year - 1) + '-' + str(timezone.now().year), school_semester=1):\n total = 0\n for activity in self.activities.all():\n if activity.school_year == school_year and activity.semester == school_semester:\n total += activity.point\n total = total if total <= 30 else 30\n self.accumulated_point += total",
"def add_star_team(client_id, team_id, now=None):\n\tnow = _get_now(now)\n\n\ttry:\n\t\t# Get the indexed name of the team.\n\t\tteam_indexed_name = session.query(Team.indexed_name)\\\n\t\t\t\t.filter(Team.id == team_id)\\\n\t\t\t\t.one()\\\n\t\t\t\t.indexed_name\n\t\t# Add the client's star for the team.\n\t\tstarred_team = StarredTeam(user_id=client_id,\n\t\t\t\tteam_id=team_id,\n\t\t\t\tindexed_name=team_indexed_name,\n\t\t\t\tadded=now)\n\t\tsession.add(starred_team)\n\t\tsession.flush()\n\texcept sa_orm.exc.NoResultFound:\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\texcept sa.exc.IntegrityError:\n\t\t# The flush failed because the client has already starred this team.\n\t\tsession.rollback()\n\t\traise common_db.DbException._chain()\n\n\t# Increment the count of stars for the team.\n\tsession.execute(Teams.update()\n\t\t\t.where(Team.id == team_id)\n\t\t\t.values({Team.num_stars: Team.num_stars + 1}))\n\n\t# If needed, add a CalendarEntry for each streamed match.\n\tmatches_cursor = session.query(MatchOpponent.match_id, Match)\\\n\t\t\t.join(Match, MatchOpponent.match_id == Match.id)\\\n\t\t\t.filter(MatchOpponent.team_id == team_id, MatchOpponent.is_streamed == True)\n\tfor match_id, match in matches_cursor:\n\t\t_increment_num_user_stars(client_id, match, now)\n\t\n\tsession.commit()",
"def _discounted_cumsum(self, rewards, rate=None):\n # HINT1: note that each entry of the output should now be unique,\n # because the summation happens over [t, T] instead of [0, T]\n # HINT2: it is possible to write a vectorized solution, but a solution\n # using a for loop is also fine\n rate = self.gamma if rate is None else rate\n\n rewards = np.array(rewards)\n disounted_return = list(\n accumulate(rewards[::-1], lambda ret, rew: rate * ret + rew))\n disounted_return = np.array(disounted_return)[::-1]\n return disounted_return",
"def accumulate(self,tod,weights,pixels):\n binFuncs.binValues(self.sigwei, pixels, weights=tod*weights)\n binFuncs.binValues(self.wei , pixels, weights=weights )\n if self.storehits:\n binFuncs.binValues(self.hits, pixels,mask=weights)",
"def update_mean_movie_rating(self):\n self.mean_movie_rating = self.ratings.groupby(['movie_id'])['rating'].mean().reset_index()",
"def avg_ttm_2y(df):\n return 0.5 * (df + df.shift(4))",
"def _update_current_ratings(self, pid, obs_time, rating, variance):\n\n\t\tself.current_player_ratings[pid]['rating'] = rating\n\t\tself.current_player_ratings[pid]['variance'] = variance\n\t\tself.current_player_ratings[pid]['last_obs'] = obs_time",
"def accumulate(self, days: int, dt: float, plot=True):\r\n self.floatCheck([days, dt])\r\n self.negValCheck([days, dt])\r\n t = np.linspace(0, days, int(days / dt) + 1)\r\n S, E, I, R = self._simulate(days, dt)\r\n # create a numpy array that will hold all of the values\r\n cases = np.zeros(len(I))\r\n # add up the total infected and removed at given time to account for everyone with the virus\r\n for i in range(len(I)):\r\n cases[i] = I[i] + R[i]\r\n # create a dictionary that holds the data for easy conversion to dataframe\r\n data1 = {\r\n \"Days\": t,\r\n \"Susceptible\": S,\r\n \"Exposed\": E,\r\n \"Infected\": I,\r\n \"Removed\": R,\r\n \"Total Cases\": cases,\r\n }\r\n # create the column labels\r\n labels = [\r\n \"Days\",\r\n \"Susceptible\",\r\n \"Exposed\",\r\n \"Infected\",\r\n \"Removed\",\r\n \"Total Cases\",\r\n ]\r\n # convert to dataframe\r\n df = pd.DataFrame(data=data1, columns=labels)\r\n if plot:\r\n # do some plotting\r\n df.plot(x=\"Days\", y=[\"Total Cases\"])\r\n plt.xlabel(\"Days\")\r\n plt.ylabel(\"Total Cases\")\r\n plt.show()\r\n # return dataframe\r\n return df",
"def _increment_date_data(klass, series, date_data):\n\n # delta is the timedelta in between events\n delta = timedelta(days=7 * series.every)\n date_data['start_date'] = date_data['start_date'] + delta\n date_data['end_date'] = date_data['end_date'] + delta",
"def get_track_rating_from_history(user_track_timestamp_MSD):\n time_format = \"%Y-%m-%dT%H:%M:%SZ\"\n user_rate_dict = dict()\n for user in user_track_timestamp_MSD:\n user_rate_dict[user] = dict()\n for key in user_track_timestamp_MSD[user]:\n length = len(user_track_timestamp_MSD[user][key])\n if length == 1:\n user_rate_dict[user][key] = 3\n continue\n\n # if a track played more than 10 times, 5 star rating\n if length > 10:\n user_rate_dict[user][key] = 5\n continue\n\n if length > 1:\n user_rate_dict[user][key] = 4\n\n # if a track played more than once in a single day, 5 star rating\n for i in range(0, length-1):\n diff_time = abs(time.mktime(time.strptime(user_track_timestamp_MSD[user][key][i], time_format)) \\\n - time.mktime(time.strptime(user_track_timestamp_MSD[user][key][i+1], time_format))) /3600\n if diff_time < 24:\n user_rate_dict[user][key] = 5\n break\n if user_rate_dict[user][key] == 5:\n continue\n\n # if a track played more than 4 times per month, 5 star rating\n if length > 4:\n for i in range(0, length-4):\n diff_time = abs(time.mktime(time.strptime(user_track_timestamp_MSD[user][key][i], time_format)) \\\n - time.mktime(time.strptime(user_track_timestamp_MSD[user][key][i+3], time_format))) /3600/24\n if diff_time < 30:\n user_rate_dict[user][key] = 5\n break\n if user_rate_dict[user][key] == 5:\n continue\n\n return user_rate_dict",
"def update_mean_user_rating(self):\n self.mean_user_rating = self.ratings.groupby(['user_id'])['rating'].mean().reset_index()",
"def accumulate_privacy_spending(self, sigma=1, num_examples=5040):\n q = tf.cast(num_examples, tf.float64) * 1.0 / self._total_examples\n\n moments_accum_ops = []\n for i in range(len(self._log_moments)):\n moment = self._compute_log_moment()\n moments_accum_ops.append(tf.compat.v1.assign_add(self._log_moments[i], moment[i]))\n #print(moments_accum_ops)\n return tf.group(*moments_accum_ops)",
"def sum(self):\n\n return time_stat(self, stat=\"sum\")",
"def updateAllShifts(shiftList):\n \n for shift in shiftList.measurements:\n averageShiftValue(shift)"
] | [
"0.5819865",
"0.562832",
"0.5501563",
"0.5379064",
"0.52238375",
"0.5169488",
"0.5166707",
"0.5135347",
"0.50705355",
"0.50331134",
"0.50316",
"0.50230944",
"0.5007228",
"0.4953354",
"0.49355468",
"0.4928901",
"0.49267507",
"0.49242172",
"0.49139744",
"0.49059406",
"0.48995715",
"0.48837632",
"0.48814723",
"0.48799047",
"0.48688054",
"0.48593923",
"0.48348004",
"0.48159477",
"0.48131004",
"0.47940797"
] | 0.7347785 | 0 |
Calculate estimated number of stars observed during VLASS observation Assume 4.2 sec per pointing as estimated by Paul. | def vlass_stars(duration, n_beams):
n_pointings = duration//4.2
n_observed = n_pointings*n_beams
return n_observed | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ventilation_rate(self):\n # TODO: calculate based on MERV ratings/efficiency/power/etc.\n return (\n sum(v.calculate_ach(self.volume) for v in self.air_quality_measures)\n + self.outdoor_air_ventilation\n )",
"def getStarRating(waveHeight, windDir, avgWind, tideHeight):\n\n starRating = 0\n\n # wave height\n if waveHeight > 2:\n starRating += 4\n elif waveHeight > 1.6:\n starRating += 3\n elif waveHeight > 1.4:\n starRating += 2\n elif waveHeight > 1.2:\n starRating += 1\n\n # wind direction\n if windDir >= 270 or windDir <= 30:\n starRating += 1\n\n # wind strength\n if avgWind < 15:\n starRating += 1\n\n # tide\n if tideHeight < 1.2:\n starRating += 1\n elif tideHeight > 2.2:\n starRating = 1\n\n # check upper bound of 5 stars\n if starRating > 5:\n starRating = 5\n elif waveHeight < 1:\n starRating = 0\n\n return starRating",
"def supernovae_rate(self, time, timestep, metallicity):\n # get the mass limits of the timesteps the user passed in. The\n # lower time corresponds to the higher stellar mass\n m_low = self.lifetimes.turnoff_mass(time + timestep, metallicity)\n m_high = self.lifetimes.turnoff_mass(time, metallicity)\n\n # check the bounds, since supernovae can only happen for certain\n # mass stars\n min_mass = self.sn_ii_model.sn.mass_boundary_low\n max_mass = self.sn_ii_model.sn.mass_boundary_high\n m_low = max(m_low, min_mass)\n m_high = min(m_high, max_mass)\n if m_low > max_mass or m_high < min_mass:\n return 0\n\n # Here we just integrate over the IMF to count the stars in this mass\n # range that die\n number = self._integrate_mass_smart(self.imf.normalized_dn_dm,\n m_low, m_high, source=\"massive\")\n return number / timestep",
"def ventilation_rate_per_second(self):\n return self.volume * self.outdoor_air_ventilation * 1000 / 3600",
"def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios",
"def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios",
"def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios",
"def __len__(self):\n return self.params['nbins_sfh']+2 # z, mass, met, + logsfr_ratios",
"def getavgvel(self):\n if self.total_time:\n return (6.28)/(self.total_time)",
"def ndpm(self):\n\n merged = pd.merge(left=self.test, right=self.predict, on=['user', 'item'], how='inner')[\n ['user', 'rating_x', 'rating_y']]\n ndpms = []\n for user in merged.user.unique():\n frame = merged[merged.user == user]\n if frame.shape[0] <= 1:\n continue\n C_plus = self.num_of_ordered_positive(frame, 'rating_x', 'rating_y')\n C_minus = self.num_of_ordered_negative(frame, 'rating_x', 'rating_y')\n C_u = self.num_of_ordered(frame, 'rating_x')\n if C_u == 0:\n continue\n C_s = self.num_of_ordered(frame, 'rating_y')\n C_u0 = C_u - (C_plus + C_minus)\n ndpms.append(1 - (C_minus + 0.5 * C_u0) / C_u)\n\n return sum(ndpms) / len(ndpms)",
"def P(lag):\n N = len(SP)\n ratios = SP[lag:N]/SP[0:N-lag]\n P = 100.*(ratios-1.)\n return P",
"def estimate_arpu(x):\n arpu = 0\n if x['mean_luminosity_km2'] > 5:\n # #10 year time horizon\n # for i in range(0, 10):\n # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year\n # arpu += (\n # (20*12) / (1 + 0.03) ** i\n # )\n return 20 * 12 * 10#arpu\n elif x['mean_luminosity_km2'] > 1:\n # for i in range(0, 10):\n # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year\n # arpu += (\n # (5*12) / (1 + 0.03) ** i\n # )\n return 5 * 12 * 10#arpu\n else:\n # for i in range(0, 10):\n # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year\n # arpu += (\n # (2*12) / (1 + 0.03) ** i\n # )\n return 2 * 12 * 10#arpu",
"def get_pvalue_thd(self):\n terminals_values = []\n for terminal in self.feature_tree.get_terminals():\n temp = self.get_mannwitneyu_pvalue(terminal)\n terminals_values.append(temp)\n if temp == 1:\n print('non siginificant')\n while 0 in terminals_values:\n terminals_values.remove(0)\n self.pvalue_thd = min(self.pvalue_thd,np.mean(terminals_values))\n #print('pvalue_thd',self.pvalue_thd)",
"def success_rate(x_tapes):\n return np.sum([is_success(x_tape) for x_tape in x_tapes]) / len(x_tapes)",
"def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)",
"def rmspe(self) -> float:\n return float(np.sqrt(np.mean(np.square(((self.true - self.predicted) / self.true)), axis=0)))",
"def _estimate(self, env):\n env_variables = env.env_variables\n obs = env_variables[0]\n current_player = env_variables[1]\n\n estimate = 0\n for player in range(len(obs)):\n # multiplier is +1 if player is same as current_player\n # multiplier is -1 if player is different from current_player\n multiplier = 2 * abs(player - current_player) - 1\n\n for row in range(len(obs[0])):\n for col in range(len(obs[0][0])):\n if obs[player, row, col] == 1:\n estimate += multiplier * (10 ** self._num_tokens_left_diagonally(obs, player, row, col))\n estimate += multiplier * (10 ** self._num_tokens_vertically(obs, player, row, col))\n estimate += multiplier * (10 ** self._num_tokens_right_diagonally(obs, player, row, col))\n estimate += multiplier * (10 ** self._num_tokens_horizontally(obs, player, row, col))\n\n return estimate",
"def score(cur_ven, ven):\r\n try:\r\n alpha = 750\r\n numerator = (ven[\"rating\"] * 0.75) + (2.5 * (1- eulers**(-ven[\"ratingSignals\"]/144)))\r\n cur_coord = (cur_ven[\"location\"][\"lat\"], cur_ven[\"location\"][\"lng\"])\r\n ven_coord = (ven[\"location\"][\"lat\"], ven[\"location\"][\"lng\"])\r\n denominator = vincenty(cur_coord, ven_coord).meters + alpha\r\n except Exception as e:\r\n print \"{}, \\n has produced an error from {}\".format(ven[\"name\"], e)\r\n return float(\"-inf\")\r\n return numerator / denominator",
"def get_rating(self):\n self.total = sum(int(review['stars']) for review in self.reviews.values())\n if self.total > 0:\n return round(self.total / self.reviews.count(), 1)\n else:\n return self.total",
"def getActualNumObs(avgNumObs, proportion):\n result = round(avgNumObs*proportion)\n return result",
"def count_star(self, tokens):\n return self.counts[tokens] - self.beta",
"def dishlist_avg_cal(n:list)->float:\r\n all_cal = dishlist_cal(n)\r\n return sum(all_cal)/len(all_cal)",
"def numberOfSamples (self) :\n S = self.mdp.S\n A = self.mdp.A\n gamma = self.mdp.gamma\n\n factor = 1 / (self.epsilon ** 2 * (1 - gamma) ** 4)\n term2 = np.log((S * A) / (self.epsilon * (1 - gamma) ** self.delta))\n return (S + term2) * factor",
"def get_correct_lap_count(self):",
"def nyquist(self):\n return 1 / (2 * np.median(np.diff(self.lc.time)))",
"def calculate_text_stars(word_counts) -> int:\n if word_counts == []:\n return 3\n words_per_slide = sum(word_counts) / len(word_counts)\n stars = 5 - abs(words_per_slide - 35) / 8\n # print(stars)\n return max(0, min(5, int(stars + 0.5)))",
"def NBIAS(self):\n return len(self.STARS[\"dist\"])",
"def starsize(self, hipid):\n #if hipid<0 or len(self.hip_stars)<=hipid: return 0\n s = self.hip_stars[hipid]\n if s==None: return 0\n #return self.zerosize*(.8**(s[1]))\n #return self.zerosize-s[1]-2\n return self.dimmest_mag-s[1]+1",
"def molar_mass_dry_air():\n return 28.9647",
"def avg_num_visits_patient(self):\n pass"
] | [
"0.61229646",
"0.6093639",
"0.59843814",
"0.59413254",
"0.5775997",
"0.5775997",
"0.5775997",
"0.5775997",
"0.57482773",
"0.57340753",
"0.5731418",
"0.57153237",
"0.5698593",
"0.56730896",
"0.5668431",
"0.5663986",
"0.5649949",
"0.5639038",
"0.5635766",
"0.5622494",
"0.561866",
"0.5612916",
"0.5608629",
"0.5587175",
"0.55677193",
"0.55670786",
"0.55634433",
"0.5555399",
"0.5553607",
"0.55502486"
] | 0.73987895 | 0 |
Queries a nearby weather underground station for temp data and rain data | def get_weather_data(weather_station):
now = datetime.datetime.now()
then = now - datetime.timedelta(days=7)
query_date_start = ("%d%02d%02d" % (then.year, then.month, then.day))
query_date_end = ("%d%02d%02d" % (now.year, now.month, now.day))
api_key = '/api/%s' % WUNDERGROUND_KEY
history_key = '/history_%s%s/lang:EN/units:english/bestfct:1/v:2.0' % (query_date_start, query_date_end)
query = '/q/%s.json?showObs=0&ttl=120' % weather_station
weather_url = ("%s%s%s%s" % (WUNDERGROUND_HOST, api_key, history_key, query))
logger.info('Weather URL: %s', weather_url)
response = requests.get(weather_url).text
max_temp_avg = json.loads(response)['history']['summary']['max_temperature_avg']
sum_precip = json.loads(response)['history']['summary']['precip_sum']
return max_temp_avg, sum_precip | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getTodaysWeather(self, keyword, temp):\n\n\t\t# Variables\n\t\tweather = {} \n\t\tfio = self.helper.getFio(keyword, temp) # Getting fio object\n\t\t\n\t\t# Getting todays weather data and populating the dictionary\n\t\tif fio.has_daily() is True and fio.has_hourly() is True:\n\t\t daily = FIODaily.FIODaily(fio)\n\t\t hourly = FIOHourly.FIOHourly(fio)\n\t\t for day in xrange(0, 1):\n\t\t\t\tfor item in daily.get_day(day).keys():\n\t\t\t\t\tif item == \"temperatureMin\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"temperatureMax\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"windSpeed\":\n\t\t\t\t\t\twindSpeed = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"windBearing\":\n\t\t\t\t\t\twindBearing = unicode(daily.get_day(day)[item])\n\t\t\t\t\t\twindBearing = self.helper.convertWindBearing(windBearing)\n\t\t\t\t\tif item == \"sunsetTime\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"sunriseTime\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"precipProbability\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tweather[\"wind\"] = windBearing + \" \" + windSpeed + \" mph\"\n\t\t\t\tfor item in hourly.get_hour(day).keys():\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[\"current\"] = unicode(hourly.get_hour(0)[item])\n\t\t\t\t\tif item == \"temperature\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(0)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"icon\":\n\t\t\t\t\t\tweather[item] = unicode(hourly.get_hour(0)[item])\n\t\t\t\t\tif item == \"cloudCover\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(0)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tweather[\"town\"] = self.helper.getCoords(keyword)[2]\n\t\telse:\n\t\t\treturn 'No Todays data'\n\n\t\treturn weather",
"def temperatures():\n\n return station_9281",
"def query(self, lon, lat):\n def distance(lon1, lat1, lon2, lat2):\n return (lon2 - lon1) ** 2 + (lat2 - lat1) ** 2\n\n min_distance = sys.maxint\n weather = {}\n for w in self._weather:\n d = distance(lon, lat, w['lon'], w['lat'])\n if d < min_distance:\n min_distance = d\n weather = w\n\n return dict(temp=weather['temp'],\n humidity=weather['humidity'],\n weather_code=weather['weather_code'])",
"def get_weather_data(lat='40.761440',lng='-73.981806'):\r\n key ='********************************'\r\n x = pd.DataFrame()\r\n unix_now = int((dt.datetime.now()- dt.datetime(1970,1,1)).total_seconds())\r\n for time in range(unix_now-86400, unix_now+604800, 86400):\r\n rsp = rq.get('https://api.darksky.net/forecast/{}/{},{},{}'.format(key, lat, lng, time))\r\n rsp_json = json.loads(rsp.text)\r\n row = json_normalize(rsp_json[\"daily\"]['data'])\r\n x = x.append(row)\r\n \r\n x = x[['icon','apparentTemperatureHigh','apparentTemperatureLow','cloudCover','humidity','precipProbability',\r\n 'pressure','visibility','windBearing','windGust','windSpeed']].reset_index(drop=True)\r\n return x",
"def getDailyWeather(self, keyword, temp):\n\n\t\t# Variables\n\t\tdaily_weather = []\n\t\tweather = {}\n\t\tfio = self.helper.getFio(keyword, temp) # Getting fio object\n\n\t\t# Getting 4-day forecast, storing each day's data in a dictionary and\n\t\t# storing each dictionary in an array\n\t\tif fio.has_daily() is True:\n\t\t\tdaily = FIODaily.FIODaily(fio)\n\t\t\tfor day in xrange(0, 4):\n\t\t\t\tfor item in daily.get_day(day).keys():\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"icon\":\n\t\t\t\t\t\tweather[item] = unicode(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"temperatureMax\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\t\n\t\t\t\t\tif item == \"temperatureMin\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item]).split(\".\")[0]\n\t\t\t\t\tif item == \"precipProbability\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\t\tif item == \"time\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(daily.get_day(day)[item])\n\t\t\t\t\tif item == \"cloudCover\":\n\t\t\t\t\t\tweather[item] = str(daily.get_day(day)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\tdaily_weather.append(weather)\n\t\t\t\tweather = {}\n\t\telse:\n\t\t\treturn 'No Daily data'\n\t\treturn daily_weather",
"def getHourlyWeather(self, keyword, temp, last_hour):\n\n\t\t# Variables\n\t\tconditions = []\n\t\tweather = {}\n\n\t\tfio = self.helper.getFio(keyword, temp) # Getting fio object\n\n\t\tif fio.has_hourly() is True:\n\t\t\thourly = FIOHourly.FIOHourly(fio)\n\n\t\t\t# Getting weather forecast for next 12 hours\n\t\t\tfor hour in xrange(1, last_hour):\n\t\t\t\tfor item in hourly.get_hour(hour).keys():\n\t\t\t\t\t# Parsing data from hourly fio object and adding it to weather dictionary\n\t\t\t\t\tif item == \"icon\":\n\t\t\t\t\t\tweather[item] = unicode(hourly.get_hour(hour)[item])\n\t\t\t\t\tif item == \"summary\":\n\t\t\t\t\t\tweather[item] = unicode(hourly.get_hour(hour)[item])\n\t\t\t\t\tif item == \"temperature\":\n\t\t\t\t\t\tif temp == \"f\":\n\t\t\t\t\t\t\tweather[item] = str(hourly.get_hour(hour)[item]).split(\".\")[0] + \"° F\"\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tweather[item] = str(hourly.get_hour(hour)[item]).split(\".\")[0] + \"° C\"\n\t\t\t\t\tif item == \"humidity\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(hour)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\t\tif item == \"time\":\n\t\t\t\t\t\tweather[item] = self.helper.getDateForWeather(hourly.get_hour(hour)[item])\n\t\t\t\t\tif item == \"precipProbability\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(hour)[item] * 100).split(\".\")[0] + \"%\"\n\t\t\t\t\tif item == \"windSpeed\":\n\t\t\t\t\t\twindSpeed = unicode(hourly.get_hour(hour)[item])\n\t\t\t\t\tif item == \"windBearing\":\n\t\t\t\t\t\twindBearing = unicode(hourly.get_hour(hour)[item])\n\t\t\t\t\t\twindBearing = self.helper.convertWindBearing(windBearing)\n\t\t\t\t\t\tweather[\"wind\"] = windBearing + \" \" + windSpeed + \" mph\"\n\t\t\t\t\tif item == \"cloudCover\":\n\t\t\t\t\t\tweather[item] = str(hourly.get_hour(hour)[item] * 100).split(\".\")[0] + \"%\"\n\n\t\t\t\t# Populating conditions array with weather dicitonary\n\t\t\t\tconditions.append(weather)\n\t\t\t\tweather = {}\n\t\telse:\n\t\t\treturn 'No hourly data'\n\t\treturn conditions",
"def read_weather_data():\n # Check if UTC to gmt+1 conversion is being handled correctly\n weather = pd.read_csv('//datc//opschaler//weather_data//knmi_10_min_raw_data//output//df_combined_uncleaned.csv',\n delimiter='\\t', comment='#',\n parse_dates=['datetime'])\n weather = weather.set_index(['datetime'])\n return weather",
"def forecast_weather(self):\n pass",
"def update_rain_temp(self, day_of_week, departure_time_seconds):\n\n current_time = t.time()\n today = datetime.today().weekday()\n\n if (departure_time_seconds < (current_time + 3600) \\\n and day_of_week == today):\n\n self.temp = self.current_temperature\n self.rain = self.current_rainfall\n\n elif (day_of_week == today):\n for i in range(24):\n if (departure_time_seconds > self.weather_forecast_json \\\n [\"hourly\"][\"data\"][i][\"time\"] and departure_time_seconds \\\n < self.weather_forecast_json[\"hourly\"][\"data\"][i + 1][\"time\"]):\n\n self.temp = self.weather_forecast_json \\\n ['hourly']['data'][i]['temperature']\n\n self.rain = self.weather_forecast_json['hourly'] \\\n ['data'][i]['precipIntensity']\n break\n else:\n continue\n else:\n day_difference = int((departure_time_seconds - current_time) / 86400)\n\n self.temp = (self.weather_forecast_json['daily']['data'] \\\n [day_difference]['temperatureMax'] + \\\n self.weather_forecast_json['daily']['data'] \\\n [day_difference]['temperatureMin']) / 2\n\n self.rain = self.weather_forecast_json['daily'] \\\n ['data'][day_difference]['precipIntensity']",
"def testWeatherFetch(self):\n\n timeCol = 'timestamp'\n rows = []\n for row in self.aggregator.rawData(dataType = 'weather',\n orderBy = [timeCol],\n timestampCol = timeCol,\n startDate = self.testStart,\n endDate = self.testEnd):\n rows.append(row)\n self.assertIsNotNone(rows, 'Rows are present.')",
"def get_weather(days, hours, db):\n days = format_list_for_db(days)\n hours = format_list_for_db(hours)\n sql = f\"SELECT * FROM weather WHERE day in {days} AND HOUR in {hours}\"\n cursor = db.cursor()\n cursor.execute(sql)\n data = cursor.fetchall()\n cursor.close()\n\n weathers = []\n if len(data) > 0:\n for weather in data:\n weather = {\"hour\": weather[1],\n \"day\": day_absolute_to_relative(weather[2]),\n \"temperature\": weather[3],\n \"apparenttemperature\": weather[4],\n \"precipitationintensity\": weather[5],\n \"precipitationprobability\": weather[6],\n \"humidity\": weather[7],\n \"dewpoint\": weather[8],\n \"windspeed\": weather[9],\n \"windbearing\": weather[10],\n \"windgust\": weather[11],\n \"pressure\": weather[12],\n \"cloudcover\": weather[13],\n \"uvindex\": weather[14],\n \"visibility\": weather[15]}\n weathers.append(weather)\n return weathers",
"def GetWeather(query, api_key):\n try:\n owm = pyowm.OWM(api_key)\n observation = owm.weather_at_place(str(query))\n location = observation.get_location()\n weather = observation.get_weather()\n temp = weather.get_temperature('fahrenheit')\n status = CleanupWeatherStatus(weather.get_detailed_status())\n return 'It is %sF degrees with %s in %s right now.' % (int(temp['temp']),\n status,\n location.get_name())\n except:\n return 'I couldn\\'t find any weather for %s. I am sorry.' % (query)",
"def get_weather_data():\n keys = ['1364038.csv',\n '1364041.csv',\n '1364042.csv',\n '1364043.csv',\n '1364044.csv',\n '1364046.csv',\n '1364047.csv',\n '1364048.csv',\n '1364051.csv',\n '1364052.csv',\n '1364053.csv',\n '1364054.csv',\n '1364055.csv',\n '1364058.csv',\n '1364059.csv',\n '1364060.csv',\n '1364061.csv',\n '1364062.csv',\n '1364063.csv',\n '1364064.csv',\n '1364066.csv']\n df_weather = import_weather(keys)\n df_weather_dist = df_weather[[\n 'LATITUDE', 'LONGITUDE', 'name']].drop_duplicates().reset_index()\n return df_weather, df_weather_dist",
"def combine_weather(weather):\n\n weather1 = weather[weather[\"Station\"] == 1]\n weather2 = weather[weather[\"Station\"] == 2]\n\n\n pass",
"def get_weather(phenny, input):\n import wunderground\n \n report_type = 'conditions'\n\n unicode_input = unicode(input)\n if unicode_input[1:8] == 'weather':\n location_str = unicode_input[9:]\n elif unicode_input[1:3] == 'w ':\n location_str = unicode_input[3:]\n try:\n json_data = wunderground.format_json(location_str, input.weather_API, report_type)\n output_results(phenny, json_data)\n except Exception, e:\n print e\n phenny.say('Could not find results for \"%s\", please reword the search and try again.' % location_str)",
"def get_weather_data(lat, lon):\n\n # Get weather\n filedata = pvtoolslib.get_s3_filename_df()\n filedata_closest = nsrdbtools.find_closest_datafiles(float(lat), float(lon),\n filedata)\n\n filename = filedata_closest['filename'].iloc[0]\n\n if filename == '124250_37.93_-122.3.npz':\n weather, info = nsrdbtools.get_local_weather_data(filename)\n else:\n weather, info = pvtoolslib.get_s3_weather_data(filename)\n\n return weather, info",
"def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)",
"def getHourlyWind(self, keyword):\n\n\t\tweather_data = self.getHourlyWeatherFromCSV(keyword, \"f\", \"wind\")\n\t\twind_values = [] # Array that will contain all the wind data\n\t\twind_data = {} # Dictionary of wind data\n\n\t\t# Getting humidity data\n\t\tfor data in weather_data:\n\t\t\twind_data[\"x\"] = self.helper.getDateInEpoch(data[\"date\"])\n\t\t\twind_data[\"y\"] = float(data[\"wind\"].split(\" \")[1])\n\t\t\twind_values.append(wind_data)\n\t\t\twind_data = {}\n\n\t\treturn wind_values",
"def get_temperature_data(zone):\n\n zone = zone[1:len(zone)-1]\n temp_response = {}\n conn = sqlite3.connect(os.path.abspath('database.db'))\n\n # get temperatures data\n query = \"Select temp_date, temp_max From temperature Left join fire_danger_zone on temperature.temp_station=fire_danger_zone.fdz_station Where fire_danger_zone.fdz_station == '\" + zone + \"' and temperature.temp_date >= date('2010-01-01') Order by temperature.temp_date;\"\n dataframe = pd.read_sql_query(query, conn) \n temperatures = dataframe['temp_max'].values.tolist()\n\n # get dates\n dates = dataframe['temp_date'].values.tolist()\n \n # add data in dictionary \n data_name = 'temp_'+zone\n temp_response[data_name] = temperatures\n temp_response['labels'] = dates\n \n # return data\n response = jsonify(temp_response)\n response.headers.add('Access-Control-Allow-Origin', '*')\n \n # close database connection\n conn.close()\n return response",
"def _do_checkWeather(self, mjd, w, config):\n # Convert mjd to the relevant time units of the weather dates.\n time = (mjd - config['sim_start'] + config['%s_start' %(w)]) * _day2sec\n # And wrap the time, if we need to. \n time = time % self.maxtime[w]\n # Find the observations which are closest in time to our requested time.\n time_order = (abs(self.dates[w] - time)).argsort()\n date1 = self.dates[w][time_order[0]]\n date2 = self.dates[w][time_order[1]]\n weather1 = self.weather[w][time_order[0]]\n weather2 = self.weather[w][time_order[1]]\n # Do interpolation for weather at this particular time.\n weather = (weather2 - weather1) / (date2 - date1) * (time - date1) + weather1\n return weather, weather1",
"def get_weather(html):\n\tcheck_page_type(html)\n\tget_temp(html)\n\tget_table(html)\n\treturn weather_dict",
"def get_fire_weather_stations(session: Session) -> CursorResult:\n return session.query(PlanningWeatherStation, FuelType, PlanningArea, FireCentre)\\\n .join(FuelType, FuelType.id == PlanningWeatherStation.fuel_type_id)\\\n .join(PlanningArea, PlanningArea.id == PlanningWeatherStation.planning_area_id)\\\n .join(FireCentre, FireCentre.id == PlanningArea.fire_centre_id)\\\n .filter(PlanningWeatherStation.is_deleted == False)",
"def get_hourly(location_list):\n location, human_location = location_list\n query = location\n url = \"http://api.wunderground.com/auto/wui/geo/WXCurrentObXML/index.xml?query=%s\" % query\n f = urllib2.urlopen(url)\n xml = f.read()\n root = ET.XML(xml)\n \n current = {'location': location, 'human_location': human_location}\n current['observation_time'] = parser.parse(root.find('observation_time').text.replace('Last Updated on',''))\n current['temperature'] = root.find('temp_f').text\n current['humidity'] = root.find('relative_humidity').text.strip('%') #Remove %\n current['wind_speed'] = root.find('wind_mph').text\n current['wind_direction'] = root.find('wind_dir').text\n current['icon'] = root.find('icon').text\n current['conditions'] = root.find('weather').text\n try:\n f = Forecast(**current)\n f.save()\n except:\n logging.info(\"Hourly Forecast Data missing or no new data available\")",
"def Kweather():\n while True:\n hr = int(datetime.datetime.now().strftime(\"%H\"))\n if hr == 23:\n from weather import Weather, Unit\n weather = Weather(unit=Unit.CELSIUS)\n lookup = weather.lookup_by_location('Taipei')\n condition = lookup.print_obj\n code = condition[\"item\"][\"forecast\"][1][\"text\"]\n hightemp = condition[\"item\"][\"forecast\"][1][\"high\"]\n lowtemp = condition[\"item\"][\"forecast\"][1][\"low\"]\n \n print(hightemp,lowtemp,code)\n #Warning\n msg = \"\"\n if int(hightemp) > 32:\n msg = msg + \"明天溫度: \" + hightemp + \" 早上可能會很熱哦, 敲鼻可以穿少一點 \"\n if int(lowtemp) < 15:\n msg = msg + \"明天溫度: \" + lowtemp + \" 會很冷哦, 敲鼻要記得多穿一點\"\n if \"Rain\" in code or \"Thunder\" in code or \"Showers\" in code:\n msg = msg + \"明天會下雨, 敲鼻記得帶傘\"\n if msg != \"\":\n print(msg)\n SendMsg(msg)\n time.sleep(60*60)",
"def get_rainfall_data(zone):\n zone = zone[1:len(zone)-1]\n rain_response = {}\n conn = sqlite3.connect(os.path.abspath('database.db'))\n\n # get rainfall data\n query = \"Select rain_date, rain_rainfall From rainfall Left join fire_danger_zone on rainfall.rain_station=fire_danger_zone.fdz_station Where fire_danger_zone.fdz_station == '\" + zone + \"' and rainfall.rain_date >= date('2010-01-01') Order by rainfall.rain_date;\"\n dataframe = pd.read_sql_query(query, conn) \n rainfall = dataframe['rain_rainfall'].values.tolist()\n\n # get dates\n dates = dataframe['rain_date'].values.tolist()\n \n # add data in dictionary \n data_name = 'rain_'+zone\n rain_response[data_name] = rainfall\n rain_response['labels'] = dates\n \n # return data\n response = jsonify(rain_response)\n response.headers.add('Access-Control-Allow-Origin', '*')\n \n # close database connection\n conn.close()\n return response",
"def get_weather(self):\n with urllib.request.urlopen(self.url) as response:\n json_data = response.read().decode('utf-8')\n\n data = json.loads(json_data)\n\n weather = {}\n weather['current'] = {\n 'temp': round(data['current']['temp_f']),\n 'humidity': round(data['current']['humidity']),\n 'summary': data['current']['condition']['text']\n }\n today = data['forecast']['forecastday'][0]['day']\n weather['today'] = {\n 'temp': round(today['maxtemp_f']),\n 'summary': today['condition']['text']\n }\n \n return weather",
"def _get_dict_weather_data(self, weather_current):\n\n returned_dict = dict()\n returned_dict[\"weather_status\"] = weather_current.get_detailed_status()\n\n time_format = '%H:%M'\n if self.am_pm_time:\n time_format = '%I:%M %p'\n\n returned_dict[\"sunset\"] = datetime.fromtimestamp(weather_current.get_sunset_time()).strftime(time_format)\n returned_dict[\"sunrise\"] = datetime.fromtimestamp(weather_current.get_sunrise_time()).strftime(time_format)\n\n returned_dict[\"temperature\"] = int(round(weather_current.get_temperature(unit=self.temp_unit)[\"temp\"]))\n returned_dict[\"temperature_min\"] = int(round(weather_current.get_temperature(unit=self.temp_unit)[\"temp_min\"]))\n returned_dict[\"temperature_max\"] = int(round(weather_current.get_temperature(unit=self.temp_unit)[\"temp_max\"]))\n\n returned_dict[\"pressure\"] = weather_current.get_pressure()[\"press\"]\n returned_dict[\"sea_level_pressure\"] = weather_current.get_pressure()[\"sea_level\"]\n\n returned_dict[\"humidity\"] = weather_current.get_humidity()\n\n wind = weather_current.get_wind()\n wind_deg = wind.get(\"deg\", None)\n wind_speed = wind.get(\"speed\", None)\n returned_dict[\"wind_deg\"] = wind_deg\n returned_dict[\"wind_speed\"] = wind_speed\n\n snow_current = weather_current.get_snow()\n snow_current = snow_current.get('all', None)\n rain_current = weather_current.get_rain()\n rain_current = rain_current.get('all', None)\n returned_dict[\"rainfall\"] = rain_current\n returned_dict[\"snow\"] = snow_current\n\n returned_dict[\"clouds_coverage\"] = weather_current.get_clouds()\n\n return returned_dict",
"def populate_weather(connection):\n metadata = load_metadata('weather')\n cursor = connection.cursor()\n water_defs = get_water_definitions()\n\n # Check if tables are already populated.\n cursor.execute('SELECT count(*) FROM weather')\n weather_count = cursor.fetchone()[0]\n\n if weather_count:\n print('Weather tables already populated!')\n return\n\n print('WEATHER:')\n\n # Darksky data\n for dir_name, location in metadata.items():\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"','\".join(location[water_body])}')''')\n break\n\n dir_path = get_data_path('weather', 'raw', dir_name)\n for json_file_name in os.listdir(dir_path):\n json_path = os.path.join(dir_path, json_file_name)\n with open(json_path, 'r', encoding='utf-8') as json_file:\n print(f'\\t\\tPopulating year: {json_file_name[0:-5]}')\n year_forecasts = json.load(json_file)\n for date, date_forecast in year_forecasts.items():\n hourly_forecasts = date_forecast['hourly']\n\n if not hourly_forecasts:\n print(f'\\t\\tNo hourly forecasts for {date}!')\n continue\n\n daily_forecast = {\n 'location_id': location_id,\n 'time': date_forecast['time'],\n 'day_time': date_forecast['sunset_time'] - date_forecast['sunrise_time'],\n 'precipitation': 0,\n 'snow_accumulation': 0\n }\n # List of value names with `avg`, `min` and `max` values\n value_names = {\n 'temperature': 'temperature',\n 'cloud_cover': 'cloudCover',\n 'dew_point': 'dewPoint',\n 'humidity': 'humidity',\n 'pressure': 'pressure',\n 'uv_index': 'uvIndex',\n 'precipitation_probability': 'precipProbability',\n 'precipitation_intensity': 'precipIntensity'\n }\n # Value name counters, which indicate how many times (out of 24)\n # certain value appears in hourly data.\n value_counts = {k: 0 for k in value_names.keys()}\n\n for value_name in value_names.keys():\n daily_forecast[f'{value_name}_avg'] = 0.0\n daily_forecast[f'{value_name}_min'] = float('inf')\n daily_forecast[f'{value_name}_max'] = float('-inf')\n\n # Calculate daily forecast values from hourly forecasts.\n for hourly_forecast in hourly_forecasts:\n for value_name in value_names.keys():\n orig_value_name = value_names[value_name]\n if is_forecast_number(orig_value_name, hourly_forecast):\n daily_forecast[f'{value_name}_avg'] += hourly_forecast[orig_value_name]\n daily_forecast[f'{value_name}_min'] = min(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_min']\n )\n daily_forecast[f'{value_name}_max'] = max(\n hourly_forecast[orig_value_name],\n daily_forecast[f'{value_name}_max']\n )\n value_counts[value_name] += 1\n\n if is_forecast_number('precipAccumulation', hourly_forecast) \\\n and hourly_forecast['precipType'] == 'snow':\n daily_forecast['snow_accumulation'] += hourly_forecast['precipAccumulation']\n elif is_forecast_number('precipIntensity', hourly_forecast) \\\n and is_forecast_number('precipProbability', hourly_forecast):\n daily_forecast['precipitation'] += \\\n hourly_forecast['precipIntensity'] * hourly_forecast['precipProbability']\n\n for value_name, value_count in value_counts.items():\n if value_count:\n # Calculate average.\n daily_forecast[f'{value_name}_avg'] = daily_forecast[f'{value_name}_avg'] / value_count\n else:\n # If value never appeared\n daily_forecast[f'{value_name}_avg'] = 'NULL'\n daily_forecast[f'{value_name}_min'] = 'NULL'\n daily_forecast[f'{value_name}_max'] = 'NULL'\n\n cursor.execute(f'''INSERT INTO weather({', '.join(daily_forecast.keys())})\n VALUES ({', '.join([str(v) for v in daily_forecast.values()])})''')\n\n # IOT data:\n for location in SETTINGS['weather_locations_iot']:\n print(f'\\tPopulating weather: \"{location[\"name\"]}\".')\n\n # Insert location.\n cursor.execute(f'''INSERT INTO locations(name, lat, lng)\n VALUES ('{location['name']}', {location['lat']}, {location['lng']})''')\n location_id = cursor.lastrowid\n\n # Set weather locations for watercourses/aquifers.\n for water_body in [d['body'] for d in water_defs.values()]:\n if water_body in location:\n cursor.execute(f'''UPDATE {water_body}s\n SET location_id = {location_id}\n WHERE name IN ('{\"', '\".join(location[water_body])}')''')\n\n # Set locations for all stations on given water body to match its location.\n cursor.execute(f'''SELECT id\n FROM {water_body}s\n WHERE location_id = {location_id}''')\n ids = [row[0] for row in cursor.fetchall()]\n if len(ids):\n cursor.execute(f'''UPDATE {water_body}_stations\n SET location_id = {location_id}\n WHERE {water_body}_id IN ({', '.join([str(v) for v in ids])})''')\n\n break \n \n file_name = f'''{location['lat']}-{location['lng']}.json'''\n json_path = get_data_path('weather', 'raw', file_name)\n\n # If data file doesn't exist, download it first.\n if not os.path.isfile(json_path):\n with open(json_path, 'wb', encoding=\"utf-8\") as file:\n file.write(read_from_url(location['url'], decode=False))\n \n with open(json_path, 'r', encoding='utf-8') as json_file:\n row_names = {\n \"Sun_duration\": \"sun_duration\",\n \"CloudCover\": \"cloud_cover_avg\",\n \"Percipitation\": \"precipitation\",\n \"New_snow_blanket\": \"snow_accumulation\",\n \"Snow_blanket\": \"snow_depth\",\n \"TemperatureAvg\": \"temperature_avg\",\n \"TemperatureMin\": \"temperature_min\",\n \"TemperatureMax\": \"temperature_max\"\n }\n forecasts = json.load(json_file)\n for forecast in forecasts:\n f = {row_names[k]: forecast[k] for k in row_names.keys()}\n f['location_id'] = location_id\n f['time'] = round(forecast['LastUpdatedEpoch'] / 1000)\n cursor.execute(f'''INSERT INTO weather({', '.join(f.keys())})\n VALUES ({', '.join([str(v) for v in f.values()])})''')",
"def streaming_weather_data(**kwargs):\n df = weather_data(['San Francisco'])\n df['time'] = [pd.Timestamp.now()]\n return df.set_index('time')",
"def get_weather_data() -> dict:\n # Creating the url for the api call\n api_key = \"96bba64ba34672da132c1a987ad2fee6\"\n lat = 49.24\n long = -123.15\n config = '&units=metric'\n url = f'https://api.openweathermap.org/data/2.5/onecall?lat={lat}&lon={long}&appid={api_key}{config}'\n\n # Querying and JSON parsing\n api_return = requests.get(url)\n weather_data = api_return.json()\n return weather_data"
] | [
"0.6910243",
"0.66520226",
"0.6608227",
"0.6573686",
"0.63866216",
"0.6343781",
"0.631178",
"0.6305189",
"0.6281451",
"0.62793523",
"0.62376016",
"0.6201287",
"0.6198531",
"0.6157297",
"0.61510736",
"0.6086177",
"0.6076465",
"0.6055122",
"0.60240924",
"0.5990383",
"0.598555",
"0.59790176",
"0.59455276",
"0.59411716",
"0.5938862",
"0.59372866",
"0.5936434",
"0.59353644",
"0.59311247",
"0.59307945"
] | 0.71451485 | 0 |
Return all zip streams and their positions in file. | def zipstreams(filename):
with open(filename, 'rb') as fh:
data = fh.read()
i = 0
while i < len(data):
try:
zo = zlib.decompressobj()
yield i, zo.decompress(data[i:])
i += len(data[i:]) - len(zo.unused_data)
except zlib.error:
i += 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_stream_readers_for_zip(fh, tmp_dir):\n fasta_zip = zipfile.ZipFile(fh, 'r')\n rval = []\n for member in fasta_zip.namelist():\n fasta_zip.extract(member, tmp_dir)\n rval.append(open(os.path.join(tmp_dir, member), 'rb'))\n return rval",
"def _GetStreamNames(self):\n if self._zipfile:\n for stream_name in self._zipfile.namelist():\n yield stream_name",
"def ls(self):\n return self._zip_file.infolist()",
"def _extract_zip(self, zipfile):\n zf = ZipFile(zipfile)\n d = {}\n for n in zf.namelist():\n d[n] = zf.read(n)\n return d",
"def get_list_of_files_in_zip(zip_files):\n files = {k: [] for k in zip_files}\n for zip_file in zip_files:\n print('[ INFO ] Loading: %s' % zip_file)\n try:\n with ZipFile(join(zip_file), 'r') as z:\n files[zip_file] = files[zip_file] + z.namelist()\n except Exception as e:\n print(e)\n return files",
"def get_zip(self):\n self.zip.rewind()\n return self.zip.in_memory_zip",
"def zip(self):\n global pointer\n global error_flag\n global totalFiles\n while pointer < len(self.files) and ((self.t and not error_flag) or not self.t):\n # Se o modo e' t e a error_flag nao for false entao pode avancar\n # Se o modo nao for t pode avancar sem restricoes\n self.sem.acquire()\n iterator = pointer\n pointer += 1\n self.sem.release()\n if iterator < len(self.files): # Iterator e' o ficheiro que deve ser utilizado pela thread\n File = self.files[iterator]\n if os.path.isfile(File): # Ver se o ficheiro existe\n with ZipFile(File + '.zip', 'w') as zipfile:\n zipfile.write(File) # Zip\n self.totalFilesSem.acquire()\n totalFiles += 1\n self.totalFilesSem.release()\n else:\n print \"O ficheiro\", File, \"não existe.\" # Se nao existir, avisa o utilizador\n error_flag = True # Atualiza a sua propria flag",
"def extract(zip_file_path: str) -> List[bytes]:\n\n with ZipFile(zip_file_path, 'r') as zip_file:\n files_extracted = zip_file.namelist()\n try:\n assert len(files_extracted) == 1, f\"{zip_file_path} contains more than one file: \"\n except:\n logging.info(f\"{zip_file_path} contains more than one file: {files_extracted}\")\n with zip_file.open(files_extracted[0], 'r') as f:\n return f.readlines()",
"def zip(self):\n return self.__zip",
"def test_open_each(self):\n zip_paths = zip_scanner(os.getcwd())\n for zip_path in zip_paths:\n with ZipEditor(zip_path) as zed:\n self.assertEqual(zip_path, zed.file)\n zed.open()\n self.assertIsNotNone(zed.tmpdir.name)\n self.assertEqual(zed.tmpdir.name, zed.getdir())\n self.assertIsNone(zed.getdir())",
"def load_zip_files(zip_path):\n zd = zipfile.ZipFile(zip_path)\n paths = sorted(zd.filelist, key=lambda x: x.date_time)\n for path in paths:\n with zd.open(path, 'r') as fd:\n try:\n data = json.load(fd)\n yield File.Ok(fd.name, data)\n except (json.JSONDecodeError, UnicodeDecodeError) as exc:\n fd.seek(0)\n yield File.Err(fd.name, fd.read(), str(exc))",
"def list_zip_files(zip_path):\n with zipfile.ZipFile(zip_path) as zf:\n data = ComixData(Directories=[], Files=[])\n app.logger.info(\"Loaded the zip file: %s\", zip_path)\n dirs = [name for name in zf.namelist() if name.endswith('/')]\n subdirs = set([name.split('/')[0] for name in dirs])\n if subdirs:\n for dirname in subdirs:\n dirname = dirname.decode('euc-kr').encode('utf-8')\n app.logger.debug('list_zip_files: %s, %s', dirname, [to_hex(c) for c in dirname])\n data.Directories.append(dirname)\n data = json.dumps(data._asdict(), ensure_ascii=False)\n response = flask.Response(data, headers=None)\n return reseponse\n ## No folder in zip file\n return get_files_in_zip_path(zip_path, '')",
"def quickScanZip(args, fh):\n # 100 bytes is the smallest .zip possible\n\n fh.seek(0, 2)\n fsize = fh.tell()\n if fsize==0:\n print(\"Empty file\")\n return\n if fsize<100:\n print(\"Zip too small: %d bytes, minimum zip is 100 bytes\" % fsize)\n return\n fh.seek(-100, 2)\n\n eoddata = fh.read()\n iEND = eoddata.find(b'PK\\x05\\x06')\n if iEND==-1:\n # try with larger chunk\n ofs = max(fh.tell()-0x10100, 0)\n fh.seek(ofs, 0)\n eoddata = fh.read()\n iEND = eoddata.find(b'PK\\x05\\x06')\n if iEND==-1:\n print(\"expected PK0506 - probably not a PKZIP file\")\n return\n else:\n ofs = fh.tell()-0x100\n eod = EndOfCentralDir(ofs, eoddata, iEND+4)\n yield eod\n\n dirofs = eod.dirOffset\n for _ in range(eod.thisEntries):\n fh.seek(dirofs)\n dirdata = fh.read(46)\n if dirdata[:4] != b'PK\\x01\\x02':\n print(\"expected PK0102\")\n return\n dirent = CentralDirEntry(dirofs, dirdata, 4)\n\n yield dirent\n dirofs = dirent.endOffset",
"def file_package_iter(self):\n files = list()\n futures = list()\n\n amount = 0\n for file in self.file_iterator:\n if amount + self._estimate_file_size(file) > self.max_size:\n if len(files) == 0: # This file is too large for one archive, special handling\n self.pool.wait(futures)\n self._calculate_hash(file)\n yield self._finish_info_package([file])\n continue\n\n self.pool.wait(futures)\n yield self._finish_info_package(files)\n\n files = list()\n amount = 0\n\n amount += file.size\n files.append(file)\n futures.append(self.pool.add_task(self._calculate_hash, file)) # todo calc small files in-thread?\n\n if len(files) > 0:\n yield self._finish_info_package(files)",
"def _zip_files(files: Iterable[str], root: str) -> Tuple[bytes, str]:\n zip_data = StringIO()\n files = list(files) # create copy of list also converts generator to list\n with ZipFile(zip_data, \"w\", ZIP_DEFLATED) as zip_file:\n for file_name in files:\n zip_file.write(os.path.join(root, file_name), file_name)\n\n # Fix file permissions to avoid any issues - only care whether a file\n # is executable or not, choosing between modes 755 and 644 accordingly.\n for zip_entry in zip_file.filelist:\n perms = (zip_entry.external_attr & ZIP_PERMS_MASK) >> 16\n new_perms = 0o755 if perms & stat.S_IXUSR != 0 else 0o644\n if new_perms != perms:\n LOGGER.debug(\n \"fixing perms: %s: %o => %o\", zip_entry.filename, perms, new_perms\n )\n new_attr = (zip_entry.external_attr & ~ZIP_PERMS_MASK) | (\n new_perms << 16\n )\n zip_entry.external_attr = new_attr\n\n contents = zip_data.getvalue()\n zip_data.close()\n content_hash = _calculate_hash(files, root)\n\n return contents, content_hash",
"def _unzip_files(self) -> None:\n for file in self.input_path.iterdir():\n if is_zipfile(file):\n with ZipFile(file, mode=\"r\") as archive:\n archive.extractall(path=self.temp_path)",
"def add_zip(manager, zipfile, incref=False):\n from .core.cache.buffer_cache import empty_dict_checksum, empty_list_checksum\n result = []\n for checksum in zipfile.namelist():\n if checksum in (empty_dict_checksum, empty_list_checksum):\n continue\n checksum2 = bytes.fromhex(checksum)\n buffer = zipfile.read(checksum)\n checksum3 = calculate_checksum(buffer)\n if checksum3 != checksum2:\n raise ValueError(\"Incorrect checksum for zipped file '{}'\".format(checksum))\n buffer_cache.cache_buffer(checksum2, buffer)\n if incref:\n buffer_cache.incref(checksum2, authoritative=False)\n result.append(checksum)\n return result",
"def Read(self):\n try:\n file_object = self._zip_file.open(self._stream_name, mode='r')\n except KeyError as exception:\n raise IOError(\n 'Unable to open stream with error: {0!s}'.format(exception))\n\n try:\n entry_data = file_object.read(self._TABLE_ENTRY_SIZE)\n while entry_data:\n table_entry = self._TABLE_ENTRY.parse(entry_data)\n\n self._offsets.append(table_entry.offset)\n entry_data = file_object.read(self._TABLE_ENTRY_SIZE)\n\n except construct.FieldError as exception:\n raise IOError(\n 'Unable to read table entry with error: {0!s}'.format(exception))\n\n finally:\n file_object.close()",
"def unzip(self):\n global pointer\n global error_flag\n global totalFiles\n while pointer < len(self.files) and ((self.t and not error_flag) or not self.t):\n # Se o modo nao for t pode avancar sem restricoes\n # Se o modo e' t e a error_flag nao for false entao pode avancar\n self.sem.acquire()\n iterator = pointer\n pointer += 1\n self.sem.release()\n if iterator < len(self.files): # Iterator e' o ficheiro que deve ser utilizado pela thread\n File = self.files[iterator]\n if os.path.isfile(File): # Ver se o ficheiro existe\n with ZipFile(File, 'r') as zipfile:\n zipfile.extractall('.') # Unzip\n self.totalFilesSem.acquire()\n totalFiles += 1\n self.totalFilesSem.release()\n else:\n print \"O ficheiro\", File, \"não existe.\" # Se nao exister, avisa o utilizador\n error_flag = True # Atualiza a sua propria flag",
"def get_zipinfo(self):\n zipinfo = zipfile.ZipInfo()\n zipinfo.filename = self.translated_path()\n zipinfo.date_time = self.get_mod_time()\n zipinfo.file_size = self.get_size()\n return zipinfo",
"def Zip(args):\n parser = argparse.ArgumentParser(description=Zip.__doc__)\n parser.add_argument(\n '-r', dest='recursive', action='store_true',\n default=False,\n help='recurse into directories')\n parser.add_argument(\n '-q', dest='quiet', action='store_true',\n default=False,\n help='quiet operation')\n parser.add_argument('zipfile')\n parser.add_argument('filenames', nargs='+')\n options = parser.parse_args(args)\n\n src_files = []\n for filename in options.filenames:\n globbed_src_args = glob.glob(filename)\n if not globbed_src_args:\n if not options.quiet:\n print('zip warning: name not matched: %s' % filename)\n\n for src_file in globbed_src_args:\n src_file = os.path.normpath(src_file)\n src_files.append(src_file)\n if options.recursive and os.path.isdir(src_file):\n for root, dirs, files in os.walk(src_file):\n for dirname in dirs:\n src_files.append(os.path.join(root, dirname))\n for filename in files:\n src_files.append(os.path.join(root, filename))\n\n # zip_data represents a list of the data to be written or appended to the\n # zip_stream. It is a list of tuples:\n # (OS file path, zip path/zip file info, and file data)\n # In all cases one of the |os path| or the |file data| will be None.\n # |os path| is None when there is no OS file to write to the archive (i.e.\n # the file data already existed in the archive). |file data| is None when the\n # file is new (never existed in the archive) or being updated.\n zip_data = []\n new_files_to_add = [OSMakeZipPath(src_file) for src_file in src_files]\n zip_path_to_os_path_dict = dict((new_files_to_add[i], src_files[i])\n for i in range(len(src_files)))\n write_mode = 'a'\n if os.path.exists(options.zipfile):\n with zipfile.ZipFile(options.zipfile, 'r') as zip_stream:\n try:\n files_to_update = set(new_files_to_add).intersection(\n set(zip_stream.namelist()))\n if files_to_update:\n # As far as I can tell, there is no way to update a zip entry using\n # zipfile; the best you can do is rewrite the archive.\n # Iterate through the zipfile to maintain file order.\n write_mode = 'w'\n for zip_path in zip_stream.namelist():\n if zip_path in files_to_update:\n os_path = zip_path_to_os_path_dict[zip_path]\n zip_data.append((os_path, zip_path, None))\n new_files_to_add.remove(zip_path)\n else:\n file_bytes = zip_stream.read(zip_path)\n file_info = zip_stream.getinfo(zip_path)\n zip_data.append((None, file_info, file_bytes))\n except IOError:\n pass\n\n for zip_path in new_files_to_add:\n zip_data.append((zip_path_to_os_path_dict[zip_path], zip_path, None))\n\n if not zip_data:\n print('zip error: Nothing to do! (%s)' % options.zipfile)\n return 1\n\n with zipfile.ZipFile(options.zipfile, write_mode,\n zipfile.ZIP_DEFLATED) as zip_stream:\n for os_path, file_info_or_zip_path, file_bytes in zip_data:\n if isinstance(file_info_or_zip_path, zipfile.ZipInfo):\n zip_path = file_info_or_zip_path.filename\n else:\n zip_path = file_info_or_zip_path\n\n if os_path:\n st = os.stat(os_path)\n if stat.S_ISDIR(st.st_mode):\n # Python 2.6 on the buildbots doesn't support writing directories to\n # zip files. This was resolved in a later version of Python 2.6.\n # We'll work around it by writing an empty file with the correct\n # path. (This is basically what later versions do anyway.)\n zip_info = zipfile.ZipInfo()\n zip_info.filename = zip_path\n zip_info.date_time = time.localtime(st.st_mtime)[0:6]\n zip_info.compress_type = zip_stream.compression\n zip_info.flag_bits = 0x00\n zip_info.external_attr = (st[0] & 0xFFFF) << 16\n zip_info.CRC = 0\n zip_info.compress_size = 0\n zip_info.file_size = 0\n zip_stream.writestr(zip_info, '')\n else:\n zip_stream.write(os_path, zip_path)\n else:\n zip_stream.writestr(file_info_or_zip_path, file_bytes)\n\n if not options.quiet:\n if zip_path in new_files_to_add:\n operation = 'adding'\n else:\n operation = 'updating'\n zip_info = zip_stream.getinfo(zip_path)\n if (zip_info.compress_type == zipfile.ZIP_STORED or\n zip_info.file_size == 0):\n print(' %s: %s (stored 0%%)' % (operation, zip_path))\n elif zip_info.compress_type == zipfile.ZIP_DEFLATED:\n print(' %s: %s (deflated %d%%)' % (operation, zip_path,\n 100 - zip_info.compress_size * 100 / zip_info.file_size))\n\n return 0",
"def iter_zip(\n path: Path,\n *,\n fp: bool = False,\n) -> Generator[ZipfileItem, None, None]:\n with zipfile.ZipFile(path, mode='r') as zip_file:\n for zip_info in zip_file.infolist():\n item = _get_zipfile_item(zip_info)\n if fp and item.type == FileSystemItemType.file:\n with zip_file.open(zip_info) as fp:\n item.fp = fp\n yield item\n else:\n yield item",
"def unzip_oxygen_files(zip_file):\n name_main_content = None\n name_left_menu = None\n list_img_files_to_save = list()\n\n files_unzipped = ZipFile(zip_file)\n for file_unzipped_name in files_unzipped.namelist():\n if not file_unzipped_name.startswith('__MACOSX'):\n if file_unzipped_name.endswith(\".jpeg\"):\n list_img_files_to_save.append(file_unzipped_name)\n elif file_unzipped_name.endswith(\".indexList.html\"):\n name_left_menu = file_unzipped_name\n elif file_unzipped_name.endswith(\"_xsd.html\"):\n name_main_content = file_unzipped_name\n\n return files_unzipped, name_left_menu, name_main_content, list_img_files_to_save",
"def get_lines_from_zipped_file(fname):\n content = []\n fd = gzip.open(fname, 'r')\n try:\n for line in fd:\n content.append(line.strip('\\n'))\n except Exception as err:\n raise Exception(\"Error reading from file %s: %s\" % (fname, err))\n finally:\n fd.close()\n return content",
"def getZipCounts(fname):\n counts = {}\n with open(fname) as f:\n counts = json.load(f)\n return counts",
"def extractall(self, *args, **kwargs):\n self.zipfile.extractall(*args, **kwargs)",
"def files_in_archive(fd: BinaryIO) -> Iterable[int]:\n\n _check_next_bytes(fd, ARCHIVE_MAGIC, 'archive magic number')\n\n while True:\n # In some archives, the first file ends with an additional \\n. If that\n # is present, skip it.\n if fd.read(1) != b'\\n':\n fd.seek(-1, 1)\n\n # Each file in an archive is prefixed with an ASCII header:\n #\n # 16 B - file identifier (text)\n # 12 B - file modification timestamp (decimal)\n # 6 B - owner ID (decimal)\n # 6 B - group ID (decimal)\n # 8 B - file mode (octal)\n # 10 B - file size in bytes (decimal)\n # 2 B - ending characters (`\\n)\n #\n # Skip the unused portions of the file header, then read the size.\n fd.seek(16 + 12 + 6 + 6 + 8, 1)\n size_str = fd.read(10)\n if not size_str:\n return\n\n try:\n size = int(size_str, 10)\n except ValueError as exc:\n raise FileDecodeError(\n 'Archive file sizes must be decimal integers') from exc\n\n _check_next_bytes(fd, b'`\\n', 'archive file header ending')\n offset = fd.tell() # Store offset in case the caller reads the file.\n\n yield size\n\n fd.seek(offset + size)",
"def extractZipFiles(rootDir, zipDir):\n for root, dirs, files in os.walk(zipDir, topdown=False):\n for name in files:\n \n zipFiles = os.path.join(root, name)\n \n #Check file extension here\n if \".zip\" not in zipFiles:\n continue\n \n else:\n zipPath = zipfile.ZipFile(zipFiles, 'r')\n #print(zipPath) \n \n filesInZip = zipPath.namelist()\n i = 0 \n for i in range(len(filesInZip)):\n #print(filesInZip[i])\n #print(zipPath.getinfo(filesInZip[i]))\n \n if \".mp3\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".m4a\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".mp4\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".png\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".jpg\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n \n elif \".pdf\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n else:\n print(\"No media found in zip file {0}\".format(name))\n \n zipPath.close()",
"def _index_files(path):\n with zipfile.ZipFile(path) as zf:\n names = sorted(zf.namelist())\n names = [nn for nn in names if nn.endswith(\".tif\")]\n names = [nn for nn in names if nn.startswith(\"SID PHA\")]\n phasefiles = []\n for name in names:\n with zf.open(name) as pt:\n fd = io.BytesIO(pt.read())\n if SingleTifPhasics.verify(fd):\n phasefiles.append(name)\n return phasefiles",
"def get_filenames(zip_file: str) -> List:\n file_names = []\n with ZipFile(zip_file, 'r') as zipObj:\n listOfiles = zipObj.namelist()\n for elem in listOfiles:\n if \"wav\" in elem:\n file_names.append(elem)\n return file_names"
] | [
"0.7123306",
"0.654805",
"0.64884543",
"0.6211099",
"0.6143137",
"0.60887825",
"0.60620993",
"0.6023242",
"0.591442",
"0.58101535",
"0.57589924",
"0.5750186",
"0.57359564",
"0.5727873",
"0.57115054",
"0.57092226",
"0.56497854",
"0.5616687",
"0.561402",
"0.5552648",
"0.55434126",
"0.55362296",
"0.551657",
"0.5498416",
"0.5497909",
"0.5496984",
"0.5484534",
"0.54766744",
"0.5465474",
"0.546309"
] | 0.6771445 | 1 |
Returns an enumeration member with a value matching `value`. | def get_member(
cls,
value: str,
):
if not value:
return None
members = [
(member, member.value)
for member in cls.__members__.values()
]
for member, member_value in members:
if member_value == value:
return member
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Enum(enum, value, default=None):\n if value is None:\n return default\n\n for pair in enum:\n if pair.value == value:\n return pair\n\n raise KeyError(\"Value '{}' not contained in enum type\".format(value))",
"def from_value(cls, value: str):\n return cls._value2member_map_[value]",
"def test_get_enum_by_value():\n assert BusinessType.get_enum_by_value('CP') == BusinessType.COOPERATIVE\n assert BusinessType.get_enum_by_value('FM') == BusinessType.PARTNERSHIP_AND_SOLE_PROP\n assert BusinessType.get_enum_by_value('NOT_FOUND') is None",
"def parse(\n cls,\n value: str\n ):\n\n if value is None or len(value) == 0:\n raise ValueError(\"provided value may not be None or empty\")\n\n for item in cls:\n if value == item.value:\n # found a matching value\n return item\n\n # Fallback value in case the API adds an enum that is not supported\n # by an older version of the SDK\n return cls.Unknown",
"def __getitem__(self, key):\n try:\n if utils.is_str(key):\n key = utils.force_name_case(key)\n return next(enum for enum in self if enum.name == key)\n else:\n return self._enums[key]\n except (StopIteration, TypeError, KeyError, IndexError):\n raise KeyError(\"There is no enum with the name/index '%s' in the '%s' bitfield!\" % (key, self.name))",
"def from_value(cls, value):\n value = value if value else 0\n try:\n flags = [flag.name for flag in cls.enum_class if flag.value & value]\n except TypeError:\n flags = [flag.name for flag in cls.enum_class if flag.name == value]\n\n return cls(*flags)",
"def getName(cls, value):\n for v, n in cls.iterate():\n if v == value:\n return n\n\n raise ValueError('Value {0} not found in {1}'.format(value, cls.__name__))",
"def member_status(value):\n for status in models.MEMBER_STATUS:\n if status[0]==value:\n return status[1]\n\n return \"MEMBER STATUS NOT FOUND\"",
"def get_enum_value_row(enum_field, enum_value):\n # Translate plural, if given\n enum_field = ENUM_PLURALS_TRANSLATE[enum_field] if enum_field in ENUM_PLURALS_TRANSLATE else enum_field\n return apps.get_model('ahj_app', enum_field).objects.get(Value=enum_value)",
"def cast_value_to_enum(attribute: Any, widget_value: str):\n enum_class: MyEnum = attribute.__class__\n return (t for i, t in enumerate(enum_class)\n if t.value == widget_value).__next__()",
"def get_by(cls, name, value):\n return cls.query(getattr(cls, name) == value).get()",
"def from_value(value):\r\n result = TokenKind._value_map.get(value, None)\r\n\r\n if result is None:\r\n raise ValueError('Unknown TokenKind: %d' % value)\r\n\r\n return result",
"def check_enum(enumerator, value):\n is_valid = False\n for data in enumerator:\n if data == value:\n is_valid = True\n break\n\n if is_valid:\n return value\n else:\n my_banner(\"Value must be from enum \" + enumerator +\" Value has been set to N/A\")\n return \"na\"",
"def enum_value(cls: Any, e: Any) -> Any:\n if is_enum(e):\n v = e.value\n # Recursively get value of Nested enum.\n if is_enum(v):\n return enum_value(v.__class__, v)\n else:\n return v\n else:\n return cls(e).value",
"def _get_data_type_name_by_value(data_type, value, field_name='data_type'):\n return data_type.DESCRIPTOR.fields_by_name[field_name].enum_type.values_by_number[value].name",
"def from_int(value):\n for item in Sfc5xxxUnit:\n if item.value == value:\n return item\n raise ValueError(\"Invalid unit value: {}!\".format(value))",
"def __getitem__(self, value):\n\n # Select the correct index\n if isinstance(value, six.integer_types):\n idx = self.by_value\n elif isinstance(value, six.string_types):\n idx = self.by_name\n else:\n raise KeyError(value)\n\n # Look up the value in that index\n return idx[value]",
"def FromString(cls, value: str):\n for _, member in cls.__members__.items():\n if member.value == value:\n return member\n raise LookupError('Invalid component: ' + value)",
"def get(cls, value):\n for k in cls:\n if k.value == value:\n return k\n\n raise KeyError(f'Cannot get key by value \"{value}\" of {cls}')",
"def getItem(enum, index):\r\n return enum[list(enum.__members__)[index]]",
"def with_value(self, value):\n return type(self)(self.name, self.type, value, self.metadata or None)",
"def by_label(self, value: str) -> RegistryType:\n return {k: v for k, v in self.items() if k == value}",
"def EnumValueName(self, enum_value, enum_type):\n return '%s_%s' % (self.ToPpapiType(enum_type).upper(),\n enum_value.name.upper())",
"def get_by_field(self, field, value):\n for item in self.items:\n if item.__dict__[field] == value:\n return item\n return None",
"def _enum_getter(enum):\n def getter(name):\n try:\n return enum[name]\n except KeyError:\n return name\n getter.__name__ = enum.__name__\n return getter",
"def from_int(value):\n for item in Sfc5xxxUnitPrefix:\n if item.value == value:\n return item\n raise ValueError(\"Invalid unit prefix value: {}!\".format(value))",
"def get_by(self, field, value):\n return self._client.get_by(field, value)",
"def to_python(self, value):\n if isinstance(value, self.enum_class):\n return value\n value = super(self.__class__, self).to_python(value)\n if isinstance(value, int):\n return self.enum_class(value)\n assert value is None\n return None",
"def enum_lookup(enumtype, name):\n # type: (typing.Type[T], str) -> Optional[T]\n try:\n return enumtype[name]\n except LookupError:\n return None",
"def get_attr(self, value):\n return self.index[value]"
] | [
"0.67772806",
"0.66527843",
"0.64541763",
"0.6362393",
"0.611267",
"0.6082106",
"0.5995382",
"0.59891886",
"0.5980627",
"0.5902926",
"0.5802926",
"0.57012653",
"0.560989",
"0.5605811",
"0.5560168",
"0.55101895",
"0.55054975",
"0.5468104",
"0.5463636",
"0.5452052",
"0.5404608",
"0.53592795",
"0.5358487",
"0.53295296",
"0.5295095",
"0.5256075",
"0.5239886",
"0.51944584",
"0.5184017",
"0.515656"
] | 0.7054582 | 0 |
Decorator that can be used to return the first item of a callable's `list` return. | def return_first_item(func):
# Define the wrapper function.
def wrapper(self, *args, **kwargs):
# Execute the decorated method with the provided arguments.
result = func(self, *args, **kwargs)
# If the function returned a result and that result is a list then
# return the first item on that list.
if result and isinstance(result, list):
result = result[0]
return result
return wrapper | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def first(items):\n return next(iter(items or []), None)",
"def first(items):\r\n return items[0]",
"def first(l):\n return next(iter(l), None)",
"def _get_first(details: CallableDetails) -> CallableArg:\n return details.args[0]",
"def first(sequence, default=Ellipsis):\n if default is Ellipsis:\n return next(iter(sequence))\n else:\n return next(iter(sequence), default)",
"def return_first(fn):\n def wrapped(*args, **kwargs):\n res = fn(*args, **kwargs)\n return res if _HVD.rank() == 0 else None\n return wrapped",
"def first(iterable: t.Iterable[T]) -> T:\n return next(iter(iterable))",
"def first(self, func: Callable[[T], bool], default=None, raise_exception: bool=True) -> Optional[T]:\n if raise_exception:\n return next(iter(filter(func, self.array)))\n return next(iter(filter(func, self.array)), default)",
"def first(xs):\n if not xs:\n return None\n return xs[0]",
"def first(xs):\n if not xs:\n return None\n return xs[0]",
"def decorator(arg):\n return lambda: list(arg)",
"def _sfn(x):\n if len(x) == 1:\n return x[0]\n return fn(*x)",
"def first(collection):\n return next(iter(collection))",
"def first(collection):\n return next(iter(collection))",
"def first(iterable: Iterable[T1], predicate: Callable[[T1], bool]) -> Union[T1, None]:\n for x in iterable:\n if predicate(x):\n return x\n return None",
"def _resolver_first(self, item: Any, *_: Any) -> Any:\n try:\n return next(iter(item))\n except StopIteration:\n assert False # not supposed to happen in current tests",
"def first(seq):\n return next(iter(seq))",
"def first(l: iter, predicate):\n for ele in l:\n if predicate(ele):\n return ele\n raise RuntimeError(\"Found nothing to match predicate\")",
"def _first(self, \n iterable, \n condition=lambda x: True):\n try:\n return next(x for x in iterable if condition(x))\n except:\n return None",
"def first_last_item(input_list: list) -> list:\n\n if len(input_list) > 1:\n return [input_list[0], input_list[-1]]\n else:\n return []",
"def memoize(func):\n result: List[Any] = []\n\n @functools.wraps(func)\n def wrapped_func():\n if not result:\n result.append(func())\n return result[0]\n\n return wrapped_func",
"def getfirst(s):\n return s[0] if isinstance(s, list) else s",
"def hd(lst):\n return lst[0] if lst else None",
"def getFirstFunction(self) -> ghidra.program.model.listing.Function:\n ...",
"def return_first(x):\r\n if x == []:\r\n return ''\r\n else:\r\n return x[0]",
"def first(self):\n if self.is_empty():\n raise Empty('list is empty')\n return self._head._element # front aligned with head of list",
"def first(x):\n try:\n x = x.to_series()\n except AttributeError:\n pass\n return list(x)[0]",
"def first_true(iterable, default=False, pred=None):\n return next(filter(pred, iterable), default)",
"def get_first_item(videos):\n\n return next(iter(videos or []), None)",
"def first(self, callback: Callable = None) -> Any:\n if callback:\n return self.filter(callback).first()\n\n return self[0]"
] | [
"0.65695965",
"0.65369004",
"0.6376428",
"0.6358152",
"0.6354889",
"0.6343154",
"0.6285985",
"0.62713856",
"0.6234703",
"0.6234703",
"0.61748505",
"0.61029243",
"0.6100002",
"0.6100002",
"0.60022485",
"0.5952739",
"0.5899202",
"0.5892642",
"0.58904845",
"0.58807445",
"0.5874004",
"0.5869914",
"0.58322227",
"0.5802878",
"0.5801705",
"0.5779935",
"0.5777",
"0.57767045",
"0.5774048",
"0.5767386"
] | 0.8338826 | 0 |
Decorator that ensures all ``list`` objects in a method's arguments have the same length | def lists_equal_length(func):
# Define the wrapper function.
def wrapper(self, *args, **kwargs):
# Collect all `list` objects from `args`.
lists_args = [arg for arg in args if isinstance(arg, list)]
# Collecgt all `list` object from `kwargs`.
lists_kwargs = [arg for arg in kwargs.values() if isinstance(arg, list)]
# Concatenate the lists of `list` objects.
lists = lists_args + lists_kwargs
# Check whether all the `list` objects have the same length.
do_have_same_length = len(set(map(len, lists))) == 1
# Raise an `InvalidArgumentsError` exception if there's a length
# mismatch.
if not do_have_same_length:
msg_fmt = "The argument lists must have the same length."
raise InvalidArgumentsError(msg_fmt)
# Simply execute the decorated method with the provided arguments
# and return the result.
return func(self, *args, **kwargs)
return wrapper | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_of_equal_len():\n\n @type_checked\n def _run_test(something:[str, int, bool]):\n assert isinstance(something[0], str)\n assert isinstance(something[1], int)\n assert isinstance(something[2], bool)\n\n _run_test(something=[None, \"12\", 1])",
"def __size_restriction_correct_list_parameter(self):\n\n strTestName = 'List size equal to a parameter (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a tuple\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizEq('parameter1', 'iRefParameter1')\n\n RxCSObject.iRefParameter1 = 3\n RxCSObject.parameter1 = [11, 12, 13]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def list_check(*args, func=None):\n func = func or inspect.stack()[2][3]\n for var in args:\n if not isinstance(var, (list, collections.UserList, collections.abc.MutableSequence)):\n name = type(var).__name__\n raise ListError(\n 'Function {} expected list, {} got instead.'.format(func, name))",
"def __size_restriction_incorrect_list_parameter(self):\n\n strTestName = 'List size equal to a parameter (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizEq('parameter1', 'iRefParameter1')\n\n RxCSObject.iRefParameter1 = 14\n RxCSObject.parameter1 = [11, 12, 13]\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)",
"def decorator(arg):\n return lambda: list(arg)",
"def __size_restriction_correct_list_list(self):\n\n strTestName = 'List size higher than the size of other list (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('lRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('lRefParameter1', list)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizEq('parameter1', 'lRefParameter1', mul=0.5)\n\n RxCSObject.lRefParameter1 = [21, 22, 23, 24, 25, 26]\n RxCSObject.parameter1 = [11, 12, 13]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def __size_restriction_incorrect_list_list(self):\n\n strTestName = 'List size higher or equal to the size of other list (incorrect)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('lRefParameter1', 'Ref. parameter')\n RxCSObject.paramType('lRefParameter1', list)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List 1D parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizHE('parameter1', 'lRefParameter1', mul=0.5)\n\n RxCSObject.lRefParameter1 = [21, 22, 23, 24, 25, 26]\n RxCSObject.parameter1 = [11, 12]\n\n self.__parametersCheck_error(RxCSObject, SizeError, strTestName)",
"def __len__(self, *args, **kwargs):\n return len(self._list(*args, **kwargs))",
"def test_listlist_op_1():\n\n @ops.listlist_op\n def f(x):\n return [4, 5, 6]\n\n result = f([1, 2, 3]) # Passing in a list, as expected\n\n assert(isinstance(result, list)), f\"{result}\"\n assert(result == [4, 5, 6])",
"def needs_arglist(self):\n True",
"def __DimSiz_restriction_correct_list_parameter_pedantic(self):\n\n strTestName = 'The size of a list dimension lower than a parameter [pedantic] (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iParameter1', 'Int parameter')\n RxCSObject.paramType('iParameter1', int)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramDimL('parameter1', 'iParameter1', 0, mul=2, pedantic=1) # Size of dimension 0 must be lower than 2 * 'iParameter1'\n\n RxCSObject.iParameter1 = 2\n RxCSObject.parameter1 = [0, 1, 4]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def __DimSiz_restriction_correct_list_parameter(self):\n\n strTestName = 'The size of a list dimension lower than a parameter (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iParameter1', 'Int parameter')\n RxCSObject.paramType('iParameter1', int)\n\n # Now, let us define a list\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramDimL('parameter1', 'iParameter1', 0) # Size of dimension 0 must be lower than 'iParameter1'\n\n RxCSObject.iParameter1 = 5\n RxCSObject.parameter1 = [0, 1, 2, 4]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def test_arguments(self):\n calls = []\n decorator = self.decorator()\n\n @decorator\n def func(a, b, c):\n calls.append((a, b, c))\n\n func(1, 2, c=3)\n self.assertEqual(calls, [(1, 2, 3)])",
"def doing_nothing(A: list):\n pass",
"def _check_args(self, args):\n if not isinstance(args, list) or not len(args) >= 2:\n raise FunctionArgumentException(\"Argument of attribute getter \"\n \"function '%s' must be a list of \"\n \"indeces; got: '%s'\" % (\n self.name,\n args\n ))\n\n if not is_homogeneous(args, (str, int)):\n raise FunctionArgumentException(\n \"'%s': argument must be a list of strings; got: '%s'\" %\n (self.name, args)\n )",
"def __NDim_restriction_correct_list_parameter(self):\n\n strTestName = 'The number of dimensions in a list lower than a parameter (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('iRefParameter1', 'Int parameter')\n RxCSObject.paramType('iRefParameter1', int)\n\n # Now, let us define a list parameter\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramNDimH('parameter1', 'iRefParameter1')\n\n RxCSObject.iRefParameter1 = 0\n RxCSObject.parameter1 = [4, 2, 11, -1, -4]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def getListSize(*args):",
"def getListSize(*args):",
"def __len__(self):\n return len(self.lst)",
"def listify(arg):\n if isinstance(arg, list):\n return arg\n else:\n return [arg]",
"def __size_restriction_correct_list_number(self):\n\n strTestName = 'List size higher than a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'List parameter')\n RxCSObject.paramType('parameter1', list)\n RxCSObject.paramSizH('parameter1', 3)\n\n RxCSObject.parameter1 = [1, 2, 3, 4, 5, 6]\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)",
"def len_list(self) -> int:\n return 1",
"def test_sizesetterwithlist(self):\n Rectangle.reset_objects()\n with self.assertRaises(TypeError) as e:\n r1 = Square(1, 2, 2, 3)\n self.assertEqual(r1.size, 1)\n r1.size = [1, 2]\n self.assertEqual(str(e.exception), \"width must be an integer\")",
"def size(*args):",
"def list_generalizer(f):\n @functools.wraps(f)\n def wrapped(data, *args, **kwargs):\n if type(data) == list:\n return [f(d, *args, **kwargs) for d in data]\n else:\n return f(data, *args, **kwargs)\n\n return wrapped",
"def test_args_count_equal(args: list, target: int) -> bool:\n\n\treturn (args_count(args) == target)",
"def test_raises_typeerror_if_arg_not_list(self):\n def result():\n return num_islands({})\n\n self.assertRaises(TypeError, result)",
"def _list4_validator(_: object, attrib: 'attrs.Attribute[List[Vec]]', value: object) -> None:\n if not isinstance(value, list):\n raise TypeError(attrib.name + ' should be a list!')\n if len(value) != 4:\n raise ValueError(attrib.name + ' must have 4 values!')",
"def validate_too_many_args(args_list):\n if len(args_list) > 1:\n raise TooManyArgsError()",
"def length(memoryManager, paramsList):\n handleEmpty(paramsList, \"cannot get length of\")\n head = paramsList[0]\n\n if not validateList(head):\n raise Exception('Tried to get length of non-list')\n # if type(head) == float:\n # return [1.0]\n\n return [float(len(head))]"
] | [
"0.6833578",
"0.6415181",
"0.63303596",
"0.61641717",
"0.6103861",
"0.6006148",
"0.59215814",
"0.59204984",
"0.5868942",
"0.57566774",
"0.57490146",
"0.5735287",
"0.572292",
"0.5700607",
"0.56883466",
"0.56657",
"0.5599322",
"0.5599322",
"0.55968153",
"0.5568369",
"0.5561684",
"0.5558008",
"0.5543931",
"0.553128",
"0.553022",
"0.55268925",
"0.55224335",
"0.55217195",
"0.5516648",
"0.55093"
] | 0.8060742 | 0 |
Clear the screen and draw the alien. | def draw():
screen.fill((0, 0, 0))
alien.draw() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear(self):\n pygame.draw.rect(self.screen,BLACK,(0,0,WINDOWWIDTH,\n WINDOWHEIGHT))\n pygame.display.update()",
"def clear(self) -> None:\n\n self.screen.fill(self.bg)",
"def draw(self):\n self.screen.fill(BACKGROUND_COLOR)\n self.cannon.draw(self.screen)\n self.objects.draw(self.screen)",
"def clearScreen(self):\n background = pygame.Surface(self.getSize())\n background = background.convert()\n background.fill((0, 0, 0))\n self.screen.blit(background, (0, 0))",
"def clearScreen():\n dislin.erase()",
"def draw(self):\r\n self.__screen.draw_asteroid(self, self.__x, self.__y)",
"def draw(self):\r\n\r\n self.screen.fill((0,0,0))\r\n self.sprite_group.draw(self.screen)\r\n pygame.display.flip()",
"def draw(self):\r\n self.scr.fill(SCREEN_COLOR)\r\n self.label.draw()\r\n pygame.display.flip()",
"def draw(self):\n\n State.screen.draw()",
"def draw(self):\n arcade.draw_rectangle_filled(self.center.x,\n self.center.y,\n self.width,\n self.height,\n arcade.color.WHITE)",
"def draw(self):\n arcade.draw_xywh_rectangle_filled(\n self.x, self.y, self.width, self.height, self.fill.color\n )\n arcade.draw_xywh_rectangle_outline(\n self.x, self.y, self.width, self.height, self.pen.color, 3\n )",
"def draw(self):\n self.screen.fill(WHITE)\n self.color_invalid()\n self.draw_selected()\n self.shade_locked_cells()\n self.draw_grid()\n self.draw_buttons()\n self.draw_numbers()",
"def draw(self):\n\n self.squares.draw(self.screen)\n if not self.hide_grid:\n self.draw_grid()\n self.fleas.draw(self.screen)\n pygame.display.flip()",
"def on_draw(self):\n self.clear()\n arcade.draw_text(\n \"Game Over - Click to restart\",\n SCREEN_WIDTH / 2,\n SCREEN_HEIGHT / 2,\n arcade.color.WHITE,\n 30,\n anchor_x=\"center\",\n )",
"def display_pygame():\n sprite_group.clear(screen, eraser_image)\n sprite_group.draw(screen)\n pygame.display.update()",
"def draw_screen(self):\n\t\tself.current_screen.draw_screen(self.master_screen)",
"def _blank_screen(self):\n self._screen.fill(self._bgcolor)\n pygame.display.update()",
"def clear_board(self):\n pygame.draw.rect(self.display, self.white, pygame.Rect(0, 0, self.window_x, self.window_y))\n self.draw_grid()",
"def clear(self):\n self.animation.stop()\n self.draw(0, 0, 0, 0, 0)",
"def draw(self):\n self.bufferX = (self.appWidth/2) - self.viewX\n self.bufferY = (self.appHeight/2) - self.viewY\n anwp.sl.engine.clear()\n anwp.sl.engine.drawImage(0, 0, self.appWidth, self.appHeight, self.backgroundImage)\n self.drawWarpLines()\n \n # render engine\n anwp.sl.engine.render()\n self.drawSystemInfo()\n self.drawWarpGateInfo()\n self.drawWarpTradeInfo()",
"def draw(self, screen):",
"def clear_screen(self):\n os.system('cls' if os.name == 'nt' else 'clear')\n self.display_heading()\n self.display_empty_lines()",
"def clearScreen():\n pass",
"def clear():\n\tglobal _s\n\t_s.screen.fill(_s.back)\n\t_s.tab(0,0)\n\t_flip()",
"def do_paint(self):\r\n curses.curs_set(0)\r\n if self.win:\r\n self.paint()\r\n self.done_paint()",
"def clear(self):\n black = neo.Color(0,0,0)\n self.set_all(black)\n self.draw()",
"def drawScreen(screen):\n screen.fill(BLACK) # Fill the screen with black.\n \n\n # Flip the display so that the things we drew actually show up.\n pygame.display.flip()",
"def draw(self, screen):\n self.draw_left_zone(screen)\n self.draw_middle_zone(screen)\n self.draw_right_zone(screen)",
"def clear_screen():\n print('\\n' * TERMINAL_HEIGHT)",
"def draw(self):\n ui.clear()\n ui.draw_board(self)\n ui.output_buffer()"
] | [
"0.7505039",
"0.7242356",
"0.7230753",
"0.7131363",
"0.7115127",
"0.7101928",
"0.70682305",
"0.70466423",
"0.7031619",
"0.6995481",
"0.6974922",
"0.6921669",
"0.69176",
"0.69122386",
"0.6896714",
"0.6883864",
"0.6875905",
"0.6849436",
"0.6818272",
"0.6815119",
"0.6790672",
"0.6789373",
"0.674679",
"0.67335576",
"0.6732626",
"0.6732472",
"0.672865",
"0.6727181",
"0.6722812",
"0.67102516"
] | 0.8509718 | 0 |
Move the alien around using the keyboard. | def update():
if keyboard.left:
alien.x -= 2
elif keyboard.right:
alien.x += 2
if keyboard.space:
alien.y = GROUND - 50
animate(alien, y=GROUND, tween='bounce_end', duration=.5)
# If the alien is off the screen,
# move it back on screen
if alien.right > WIDTH:
alien.right = WIDTH
elif alien.left < 0:
alien.left = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def move(event):\r\n\t\tif event.char == \"a\":\r\n\t\t\tcanvas.move(z[a], -10, 0)\r\n\t\telif event.char == \"d\":\r\n\t\t\tcanvas.move(z[a], 10, 0)\r\n\t\telif event.char == \"w\":\r\n\t\t\tcanvas.move(z[a], 0, -10)\r\n\t\telif event.char == \"s\":\r\n\t\t\tcanvas.move(z[a], 0, 10)",
"def joystick_move(self, emphasis=1):\n step = int(20*emphasis)\n self.display.ship.move_vertical(step=step)",
"def move(self):\n \n self.position = self.wander()",
"def move(self):\n keys = pygame.key.get_pressed()\n\n if keys[pygame.K_w]:\n self.y -= self.vel\n if keys[pygame.K_a]:\n self.x -= self.vel\n if keys[pygame.K_s]:\n self.y += self.vel\n if keys[pygame.K_d]:\n self.x += self.vel",
"def move(self, environment):\n ch2 = getch()\n if ch2 == b'H' or ch2 == \"A\":\n # the up arrow key was pressed\n print (\"up key pressed\")\n\n\n elif ch2 == b'P' or ch2 == \"B\":\n # the down arrow key was pressed\n print(\"down key pressed\")\n\n\n elif ch2 == b'K' or ch2 == \"D\":\n # the left arrow key was pressed\n print(\"left key pressed\")\n\n elif ch2 == b'M' or ch2 == \"C\":\n # the right arrow key was pressed\n print(\"right key pressed\")",
"def move_tower(self, x, y):\n self.x = x\n self.y = y\n self.menu.x = x\n self.menu.y = y\n self.menu.update()",
"def joy_callback(self, msg):\n mappings = gamepad_mappings.set_gamepad_mappings(msg)\n self.move_vertical = mappings[\"button_vertical\"] # up: +1.0, down: -1.0\n self.move_horizontal = mappings[\"button_horizontal\"] # left: +1.0, right: -1.0",
"def update(self):\n if games.keyboard.is_pressed(games.K_RIGHT):\n self.x += 1\n if games.keyboard.is_pressed(games.K_a):\n self.x -= 1",
"def on_key_press(self, key):\n if key == LEFT:\n self.player.change_x = -5\n elif key == RIGHT:\n self.player.change_x = 5\n elif key == UP:\n self.player.change_y = -5 \n elif key == DOWN:\n self.player.change_y = 5",
"def update(self):\n keys = pygame.key.get_pressed() # Checks for an input by the user\n if keys[pygame.K_RIGHT]:\n king.move_right() # Moves right if the user presses the right key\n\n if keys[pygame.K_LEFT]:\n king.move_left() # Moves left if the user presses the left key",
"def update(self):\n pygame.event.pump()\n self.pos_x += 0\n if (pygame.key.get_pressed()[pygame.K_w]) and self.pos_y > 0:\n self.pos_y -= 1\n if (pygame.key.get_pressed()[pygame.K_a]) and self.pos_x > 0:\n self.pos_x -= 1\n if (pygame.key.get_pressed()[pygame.K_d]) and self.pos_x < 1080:\n self.pos_x += 1\n if (pygame.key.get_pressed()[pygame.K_s]) and self.pos_y < 360:\n self.pos_y += 1",
"def moveBy(self, x, y):\n\t\tself.moveTo(self.x + x, self.y + y)",
"def on_key_press(self, key, modifiers):\n\n if key == arcade.key.UP or key == arcade.key.W:\n self.player.change_y += .2\n elif key == arcade.key.LEFT or key == arcade.key.A:\n self.player.change_x -= .2\n elif key == arcade.key.RIGHT or key == arcade.key.D:\n self.player.change_x += .2\n elif key == arcade.key.DOWN or key == arcade.key.S:\n self.player.change_y -= .2",
"def MoveCurrentSpace(self):\n if self.facing == 0:\n self.y -= 1\n elif self.facing == 1:\n self.x += 1\n elif self.facing == 2:\n self.y += 1\n elif self.facing == 3:\n self.x -= 1",
"def move_to(self, x, y):\r\n self.__current_room = x, y",
"def AeroMove(self, pos):\r\n\r\n pass",
"def move(self, x, y):\n self.x+=x\n self.y+=y",
"def move_east(self):\r\n self.move(dx=1, dy=0)",
"def movement(self):\n self.rect.left -= self.speedx #to move the asteroid to the left",
"def on_key_press(self, key, modifiers):\n #if self.player_sprite.amphet_excited is False:\n \n\n if key == arcade.key.UP:\n self.player_sprite.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player_sprite.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player_sprite.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player_sprite.change_x = MOVEMENT_SPEED\n\n\n elif key == arcade.key.ESCAPE:\n raise Exception(\"\\n\\n See You soon, fork it share it !\")",
"def on_key_press(self, key, modifiers):\n if key == arcade.key.UP:\n self.player.change_y = MOVEMENT_SPEED\n elif key == arcade.key.DOWN:\n self.player.change_y = -MOVEMENT_SPEED\n elif key == arcade.key.LEFT:\n self.player.change_x = -MOVEMENT_SPEED\n elif key == arcade.key.RIGHT:\n self.player.change_x = MOVEMENT_SPEED",
"def move(self):\n \n self.position = self.explore()",
"def on_key_press(self, key, modifiers):\r\n if key == arcade.key.UP:\r\n self.player.change_y = MOVEMENT_SPEED\r\n elif key == arcade.key.DOWN:\r\n self.player.change_y = -MOVEMENT_SPEED\r\n elif key == arcade.key.LEFT:\r\n self.player.change_x = -MOVEMENT_SPEED\r\n elif key == arcade.key.RIGHT:\r\n self.player.change_x = MOVEMENT_SPEED",
"def on_key_press(self, key, modifiers):\r\n if key == arcade.key.UP:\r\n self.player.change_y = MOVEMENT_SPEED\r\n elif key == arcade.key.DOWN:\r\n self.player.change_y = -MOVEMENT_SPEED\r\n elif key == arcade.key.LEFT:\r\n self.player.change_x = -MOVEMENT_SPEED\r\n elif key == arcade.key.RIGHT:\r\n self.player.change_x = MOVEMENT_SPEED",
"def advance(self): \n self.center.x = self.center.x + self.velocity.dx\n self.center.y = self.center.y + self.velocity.dy",
"def move(self, dx, dy):\n self.x += dx\n self.y += dy",
"def keyboard(key, x, y):\r\n\tglobal ROTATE\r\n\r\n\tif key == chr(27): \r\n\t\tsys.exit(0)\r\n\telif key == 'r': \r\n\t\tROTATE = (ROTATE + 5) % 360\r\n\telif key == 'R': \r\n\t\tROTATE = (ROTATE - 5) % 360\r\n\r\n\tglutPostRedisplay()",
"def move(self, key):\n \n global last_time\n if (key == K_RIGHT):\n self.xMove = self.x_dist\n self.x_pos=self.xMove\n elif (key == K_LEFT):\n self.xMove = -self.x_dist\n self.x_pos+=self.xMove\n elif (key == K_UP):\n self.yMove = -self.y_dist\n self.y_pos+=self.yMove\n elif (key == K_DOWN):\n self.yMove = self.y_dist\n self.y_pos+=self.yMove\n self.rect = self.rect.move(self.xMove,self.yMove)",
"def move_north(self):\r\n self.move(dx=0, dy=-1)",
"def control(self, keyCode):\n if (keyCode == DOWN and (self.on_left or self.on_right)):\n if self.on_left:\n self.x = self.maze.LEFT_VERT\n else:\n self.x = self.maze.RIGHT_VERT\n self.rot_begin = self.MOUTH_DOWN_BEGIN_ANGLE\n self.rot_end = self.MOUTH_DOWN_END_ANGLE\n self.x_add = 0\n self.y_add = self.velocity\n elif (keyCode == UP and (self.on_left or self.on_right)):\n if self.on_left:\n self.x = self.maze.LEFT_VERT\n else:\n self.x = self.maze.RIGHT_VERT\n self.rot_begin = self.MOUTH_UP_BEGIN_ANGLE\n self.rot_end = self.MOUTH_UP_END_ANGLE\n self.x_add = 0\n self.y_add = -(self.velocity)\n elif (keyCode == LEFT and (self.on_top or self.on_bottom)):\n if self.on_top:\n self.y = self.maze.TOP_HORIZ\n else:\n self.y = self.maze.BOTTOM_HORIZ\n self.rot_begin = self.MOUTH_LEFT_BEGIN_ANGLE\n self.rot_end = self.MOUTH_LEFT_END_ANGLE\n self.x_add = -(self.velocity)\n self.y_add = 0\n elif (keyCode == RIGHT and (self.on_top or self.on_bottom)):\n if self.on_top:\n self.y = self.maze.TOP_HORIZ\n else:\n self.y = self.maze.BOTTOM_HORIZ\n self.rot_begin = self.MOUTH_RIGHT_BEGIN_ANGLE\n self.rot_end = self.MOUTH_RIGHT_END_ANGLE\n self.x_add = self.velocity\n self.y_add = 0"
] | [
"0.70149887",
"0.699595",
"0.681032",
"0.6801341",
"0.66253835",
"0.65040016",
"0.64911735",
"0.6442173",
"0.6367084",
"0.632558",
"0.62957025",
"0.6282375",
"0.62190115",
"0.6184205",
"0.61793596",
"0.6166848",
"0.61651015",
"0.61622244",
"0.6133459",
"0.61307865",
"0.61206126",
"0.609572",
"0.60945827",
"0.60945827",
"0.60821307",
"0.60700375",
"0.60412914",
"0.6030451",
"0.6030245",
"0.60272413"
] | 0.71201307 | 0 |
basic preprocessing on the raw variatons returns a single histogram for each systematic with bin content represting the symmterised uncertainty Preprocessing top mass variations are scaled by 1/3 | def processSystematic(observable, xsecType, xsecLevel, systematic, histNominal):
varHists = []
linkStr = ""
singlePointSystematics = ["ERDON", "ERDONRETUNE", "GLUONMOVETUNE", "BFRAG_PETERSON"]
sPS = 0
if any(singlePointSystematic in systematic for singlePointSystematic in singlePointSystematics):
sPS = 1
linkStr = "_"
variations = [""]
for variation in variations:
path = directory_base + xsec_type + "_" + xsec_level + directory_tail + systematic + linkStr + variation + "/combinedUnfolded/Hyp" + observable + "Results.txt"
inputfile = open(path, 'r').readlines()
bins = []
for line in inputfile:
bins.append(float(line.split( )[3]))
bins.append(float(line.split( )[5]))
bins = sorted(bins)
binsArray = array('f',bins)
histNameUp = systematic + "_UP"
histNameDown = systematic + "_DOWN"
histUp = TH1F(histNameUp, histNameUp, len(bins)-1, binsArray)
histDown = TH1F(histNameDown, histNameDown, len(bins)-1, binsArray)
histUpFinal = TH1F("", "", len(bins)-1, binsArray)
histDownFinal = TH1F("", "", len(bins)-1, binsArray)
ibin = 0
for line in inputfile:
nomBin = histNominal.GetBinContent(ibin+1)
nomBinCenter = histNominal.GetBinCenter(ibin+1)
unc = float(line.split( )[7])
if systematic == "DY":
print "DY UP = " + str(1.0 + unc)
print "DY DOWN = " + str(1.0 - unc)
histUp.SetBinContent(ibin+1, 1.0 + unc)
histDown.SetBinContent(ibin+1,1.0 - unc)
ibin = ibin + 1
histUpVis = histUp.Clone()
histDownVis = histDown.Clone()
histUpFinal = histUp.Clone()
histDownFinal = histDown.Clone()
if systematic == "PDF":
histUpFinal, histDownFinal = reNormalise(histNominal, histUpVis, histDownVis)
return (histUpFinal, histDownFinal) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def processSystematic(observable, xsecType, xsecLevel, systematic, histNominal):\n varHists = []\n\n linkStr = \"_\"\n variations = [\"\"]\n\n for variation in variations:\n if xsecType == \"normalised\":\n xsecType = \"normalized\"\n if xsecLevel == \"particle\":\n xsecLevel = \"pseudo\"\n path = directory_base + xsecType + \"_\" + xsecLevel + directory_tail + systematic + linkStr + variation + \"/combinedUnfolded/Hyp\" + observable + \"Results.txt\"\n #print \"directory = \" + str(path)\n inputfile = open(path, 'r').readlines()\n bins = []\n for line in inputfile:\n bins.append(float(line.split( )[3]))\n bins.append(float(line.split( )[5]))\n bins = sorted(bins)\n binsArray = array('f',bins)\n histNameUp = systematic + \"_UP\" \n histNameDown = systematic + \"_DOWN\" \n histUp = TH1F(histNameUp, histNameUp, len(bins)-1, binsArray)\n histDown = TH1F(histNameDown, histNameDown, len(bins)-1, binsArray)\n histUpFinal = TH1F(\"\", \"\", len(bins)-1, binsArray)\n histDownFinal = TH1F(\"\", \"\", len(bins)-1, binsArray)\n \n ibin = 0\n\n for line in inputfile:\n nomBin = histNominal.GetBinContent(ibin+1)\n nomBinCenter = histNominal.GetBinCenter(ibin+1)\n unc = float(line.split( )[7])\n# if systematic == \"MASS\":\n# unc = unc/(3.0)\n# if systematic == \"PSFSRSCALE\":\n# unc = unc/(sqrt(2.0))\n\n histUp.SetBinContent(ibin+1, 1.0 + unc)\n histDown.SetBinContent(ibin+1,1.0 - unc)\n ibin = ibin + 1 \n\n histUpVis = histUp.Clone()\n histDownVis = histDown.Clone()\n histUpFinal = histUp.Clone()\n histDownFinal = histDown.Clone()\n\n if systematic == \"PDF\":\n histUpFinal, histDownFinal = reNormalise(histNominal, histUpVis, histDownVis)\n\n return (histUpFinal, histDownFinal)",
"def _plot_psth_flat(self, sigma=5, figsize = (15, 8)):\n\t\n\t\tgaus_filt = sp.ndimage.gaussian_filter1d\n\t\tall_resp = gaus_filt(self.conditions_hist_mean.flatten(), sigma)\n\t\t\n\t\tfig = plt.figure(figsize=figsize)\n\t\tax = fig.add_subplot(1, 1, 1)\n\t\t\n\t\tax.plot(all_resp, linestyle='-', color='0.28')\n\t\t\n\t\tn_con = self.parameters['conditions']\n\t\tcon_mark = np.arange(0, (self.bins.size -1) * n_con, self.bins.size -1)\n\t\t\t\t\n\t\tax.xaxis.set_ticks(con_mark)\n\n\t\ttry:\n\t\t\tax.xaxis.set_ticklabels(self.cond_label)\n\t\texcept:\n\t\t\tax.xaxis.set_ticklabels(np.unique(self.marker_codes))\n\t\t\n\t\tfreq_label = np.round(ax.get_yticks() * (1/self.bin_width),\n\t\t\t\t\t\t\t decimals = 1)\n\t\tax.set_yticklabels(freq_label)\n\t\tax.set_ylabel('Frequency')\n\t\t\n\t\tfor label in ax.xaxis.get_majorticklabels():\n\t\t\tlabel.set_horizontalalignment('left')\n\t\t\t\n\t\tax.set_xlim(0, (self.bins.size -1) * n_con)\n\t\t\n\t\t# bug with macosx backend\n# plt.tight_layout()\n\t\tplt.subplots_adjust(hspace=0.45)",
"def wScalogram_nsig(data, hypothesis=None,\n nsigma=None, nsigma_min=None, nsigma_percent=1,\n reconstruction_scaled=False,\n firsttrend=False, logscale=True,\n title=None, xlabel=None, outputfile=None):\n\n WaveDec_data = HaarTransform(data)\n Ccoeffs = WaveDec_data[:-1]\n FirstTrend = WaveDec_data[-1]\n Level = len(Ccoeffs)\n\n if logscale==True:\n scale='log'\n else:\n scale='linear'\n\n nlevels = Level if firsttrend==False else Level+1\n nrows = nlevels+1 # the first panel is the data histogram\n if nsigma is not None:\n nrows += 1 # add another panel for the generating function\n ratio = [1.5] + [1.5]\n ratio += [1]*(nrows-2)\n\n fig = plt.figure(figsize=(12,12))\n gs = gridspec.GridSpec(ncols=1, nrows=nrows,\n height_ratios=ratio,\n hspace=0)\n axs = [fig.add_subplot(gs[i,0]) for i in range(nrows)]\n cbar_axs = fig.add_axes([0.93, 0.15, 0.02, 0.7]) # colorbar axis\n\n # Fill out top panel\n data_hist, _, data_center, data_width = _BinData(data, bins=2**Level)\n axs[0].bar(data_center, data_hist, align='center', width=data_width, color=Data_color)\n axs[0].set_yscale(scale)\n axs[0].text(x=.93, y=.63, s='Data', fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[0].transAxes)\n\n # If nsigma function is provided\n if nsigma is not None:\n\n nsigCcoeffs = nsigma\n\n cut = '(No cut)'\n if nsigma_percent is not None:\n cut = str(nsigma_percent*100) + '%'\n if nsigma_min is not None:\n cut = r'$\\sigma_{min}$ = ' + str(nsigma_min)\n\n if hypothesis is not None:\n #TODO: error trap\n DeltaCoeff = _NSigmaFilter(data, hypothesis, nsigma, nsigma_min, nsigma_percent)\n ReconstructedData = InvHaarTransform(DeltaCoeff, normalize=False)\n if reconstruction_scaled is True:\n RecData = np.divide(ReconstructedData, np.sqrt(hypothesis))\n else:\n RecData = ReconstructedData\n rec_hist, _, rec_center, rec_width = _BinData(RecData, bins=2**Level)\n axs[1].plot(rec_center, rec_hist, 'o', markersize=3, color='#E67E22',\n label='Reconstruction ({})'.format(cut))\n axs[1].plot(range(len(data_center)), np.zeros_like(RecData), color='black', linewidth=0.5)\n axs[1].set_yscale('linear')\n axs[1].legend(edgecolor=\"black\", fancybox=False,\n handletextpad=0, handlelength=0, markerscale=0, fontsize=12)\n\n cmap = _NewColorMap()\n binintensity = np.absolute(nsigma)\n sig_min = _findmin(binintensity)\n sig_max = _findmax(binintensity)\n norm = Normalize(vmin=sig_min, vmax=sig_max)\n\n # If firsttrend, fill out the bottom panel with the first trend\n if firsttrend==True:\n bins=1\n norm_points = norm(binintensity[-1])\n color_points = [cmap(i) for i in norm_points]\n hist, _, center, width = _BinData(FirstTrend, bins=1)\n axs[-1].bar(center, hist, align='center', width=width, color=color_points)\n axs[-1].tick_params(axis='both', bottom=False, labelbottom=False)\n axs[-1].set_yscale(scale)\n axs[-1].text(x=.93, y=.63, s=r'$\\ell={%.1i}$'%(0), fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[-1].transAxes)\n\n # Now plot the negative coefficients. The bars are hashed to distinguish the\n # pos and neg coefficients.\n s = 2 if nsigma is not None else 1\n for l in range(Level):\n bins=2**(Level-l-1)\n coeffs = Ccoeffs[l]\n norm_points = norm(binintensity[l])\n color_points = [cmap(i) for i in norm_points]\n\n if logscale==True:\n # Plot the positive coefficients\n pos_ix = np.where(coeffs>0)\n pos_coeffs = np.zeros_like(coeffs)\n for i in pos_ix:\n pos_coeffs[i] = coeffs[i]\n pos_hist, _, pos_center, pos_width = _BinData(pos_coeffs, bins=bins)\n axs[l+s].bar(pos_center, pos_hist, align='center', width=pos_width, color=color_points)\n\n # Now plot the negative coefficients. The bars are hashed to distinguish the\n # pos and neg coefficients.\n neg_ix = np.where(Ccoeffs[l]<0)\n neg_coeffs = np.zeros_like(coeffs)\n for j in neg_ix:\n neg_coeffs[j] = np.absolute(coeffs[j])\n neg_hist, _, neg_center, neg_width = _BinData(neg_coeffs, bins=bins)\n axs[l+s].bar(neg_center, neg_hist, align='center', width=neg_width, color=color_points,\n hatch='///')\n\n axs[l+s].tick_params(axis='both', bottom=False, labelbottom=False)\n lev = Level-l-1\n axs[l+s].text(x=.93, y=.63, s=r'$\\ell={%.1i}$'%(lev+1), fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[l+s].transAxes)\n axs[l+s].set_yscale(scale)\n\n else:\n hist, _, center, width = _BinData(coeffs, bins=bins)\n axs[l+s].bar(center, hist, align='center', width=width,\n color=color_points)\n axs[l+s].plot(range(bins), np.zeros(bins), color='black',\n linewidth=0.5)\n axs[l+s].tick_params(axis='both', bottom=False, labelbottom=False)\n lev=Level-l-1\n axs[l+s].text(x=.93, y=.63, s=r'$C_{l=%.1i}$'%(lev), fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[l+s].transAxes)\n axs[l+s].set_yscale(scale)\n\n cbar = ColorbarBase(cbar_axs, cmap=cmap, norm=norm)\n #cbar_axs.text(.5, sig_max, r'$N\\sigma$', fontsize=12)\n fig.text(x=0.93, y=.86, s=r'$N\\sigma$', fontsize=12)\n\n if title is not None:\n fig.suptitle(title, fontsize=18, y=0.92)\n fig.text(x=0.5, y=0.1, s=xlabel, fontsize=14)\n if outputfile is not None:\n plt.savefig(outputfile, bbox_inches='tight')\n plt.show()",
"def preprocess(N, sigma2, R, f_def, params):\n\n #\n if 'scale' in params:\n if params['scale']:\n\n #\n N0 = np.sum(R * f_def, axis=1)\n\n #\n f_def *= np.average(N / N0)\n\n return N, sigma2, R, f_def, params",
"def plot_hist_snfit_meta(self): \n \n self.read_meta()\n self.read_snfit_results()\n\n \n self.diff_x0 = []\n self.diff_x0_err = []\n self.diff_x1 = []\n self.diff_x1_err = [] \n self.diff_c = []\n self.diff_c_err = [] \n self.diff_mb = []\n self.diff_mb_err = [] \n self.diff_cov_x0_x1 = []\n self.diff_cov_x0_c = []\n self.diff_cov_x1_c = []\n self.diff_cov_mb_x1 = []\n self.diff_cov_mb_c = []\n\n for i in range (len(self.sn_name)):\n for j in range (len(self.meta_sn_name_list)):\n if self.sn_name[i] == self.meta_sn_name_list[j]:\n if np.abs(self.mb[i] - self.meta_mb[j]) < 0.0001:\n self.diff_x0.append(self.x0[i] - self.meta_x0[j])\n self.diff_x0_err.append(self.x0_err[i] - self.meta_x0_err[j])\n self.diff_x1.append(self.x1[i] - self.meta_x1[j])\n self.diff_x1_err.append(self.x1_err[i] - self.meta_x1_err[j]) \n self.diff_c.append(self.c[i] - self.meta_c[j])\n self.diff_c_err.append(self.c_err[i] - self.meta_c_err[j]) \n self.diff_mb.append(self.mb[i] - self.meta_mb[j])\n self.diff_mb_err.append(self.mb_err[i] - self.meta_mb_err[j])\n# self.diff_cov_x0_x1.append()\n# self.diff_cov_x0_c.append()\n# self.diff_cov_x1_c.append()\n# self.diff_cov_mb_x1.append()\n# self.diff_cov_mb_c.append()\n else:\n print self.x1[i] - self.meta_x1[j], self.sn_name[i],self.meta_sn_name_list[j], self.x1[i], self.meta_x1[j]\n\n\n fig = plt.figure(figsize=(8.,8.)) \n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x0,25,label='$\\Delta$ X0')\n ax0_2.hist(self.diff_x0_err,25,label='$\\Delta$ X0 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/x0_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_x1,25,label='$\\Delta$ X1')\n ax0_2.hist(self.diff_x1_err,25,label='$\\Delta$ X1 error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/x1_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n \n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_c,25,label='$\\Delta$ Color')\n ax0_2.hist(self.diff_c_err,25,label='$\\Delta$ Color error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/color_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()\n\n gs = gridspec.GridSpec(2, 1) #subplots ratio\n f, (ax0_1, ax0_2) = plt.subplots(2, sharex=True)\n f.subplots_adjust(hspace = 0.5)\n ax0_1 = plt.subplot(gs[0, 0])\n ax0_2 = plt.subplot(gs[1, 0])\n \n ax0_1.hist(self.diff_mb,50,label='$\\Delta$ mb')\n ax0_2.hist(self.diff_mb_err,50,label='$\\Delta$ mb error')\n ax0_1.legend()\n ax0_2.legend()\n ax0_1.set_ylabel('N')\n ax0_2.set_ylabel('N')\n pdffile = '../sugar_analysis_data/results/mb_plot_meta_snfit.pdf'\n plt.savefig(pdffile, bbox_inches='tight')\n plt.show()",
"def dataset_handling_with_standardisation(init_data):\n #\n ##Maximum number of points = 72 , keep around 80 values for even number\n max_len = 80\n ##Fluxes, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'fluxes_0', u'fluxes_1', u'fluxes_2', u'fluxes_3', u'fluxes_4', u'fluxes_5']].values\n zp_array_flux = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux.append(n_data)\n zp_array_flux = np.array(zp_array_flux)\n print(zp_array_flux.shape)\n\n ##Fluxerrors, Standardisation is done over 1 type of feature\n data = init_data.loc[:,\n [u'fluxerrs_0', u'fluxerrs_1', u'fluxerrs_2', u'fluxerrs_3', u'fluxerrs_4', u'fluxerrs_5']].values\n zp_array_flux_error = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_flux_error.append(n_data)\n zp_array_flux_error = np.array(zp_array_flux_error)\n print(zp_array_flux_error.shape)\n\n ##Time, Standardisation is done over 1 type of feature\n data = init_data.loc[:, [u'mjds_0', u'mjds_1', u'mjds_2', u'mjds_3', u'mjds_4', u'mjds_5']].values\n zp_array_mjds = []\n for dat in data:\n n_data = []\n for ii in range(len(dat)):\n n_data = np.append(n_data, np.pad(dat[ii], (0, max_len * 5 - len(dat[ii])), 'constant', constant_values=0))\n n_data = QuantileTransformer(output_distribution='uniform').fit_transform(n_data.reshape(-1, 1)).flatten()\n zp_array_mjds.append(n_data)\n zp_array_mjds = np.array(zp_array_mjds)\n print(zp_array_mjds.shape)\n\n ##Concatenating everything\n zp_data = np.c_[zp_array_flux, zp_array_flux_error, zp_array_mjds]\n\n ##Adding redshift info// Gal pos info might be necessary to remove\n zp_data = np.c_[\n zp_data, init_data.loc[:, [u'gal_b', u'gal_l', u'hostgal_photoz', u'hostgal_photoz_err', u'hostgal_specz', u'mwebv']].values]\n print(zp_data.shape)\n\n ##Load labels and convert to integer\n labels = init_data.loc[:, [u'target']].values\n labels = labels.flatten()\n labels_name = np.array([6, 15, 16, 42, 52, 53, 62, 64, 65, 67, 88, 90, 92, 95, 99])\n [np.place(labels, labels == labels_name[i], [i]) for i in range(len(labels_name))]\n\n return [zp_data, labels]",
"def create_fixed_hist(self):\n hist = cv2.calcHist([self.obj], [0, 1, 2], None, [32, 8, 8],\n [0, 256, 0, 256, 0, 256])\n self.hist = cv2.normalize(hist).flatten()\n print self.hist",
"def defineProcessTemplates(histos):\n\n templates=[]\n\n #nominal\n templates.append( histos[0] )\n nomStats=templates[-1].Integral()\n\n #systematic variations\n #if Up/Down already in the name store directly updating the name\n #if not, mirror the variation given \n for i in xrange(1,len(histos)): \n templates.append( histos[i] )\n key=templates[-1].GetName()\n if not 'Up' in key and not 'Down' in key :\n templates[-1].SetName(key+'Up')\n templates.append( histos[i].Clone(key+'Down') )\n for xbin in range(templates[0].GetNbinsX()):\n templates[-1].SetBinContent(xbin+1,2*templates[0].GetBinContent(xbin+1)-templates[-2].GetBinContent(xbin+1))\n \n #don't leave bins with 0's\n for h in templates:\n h.SetDirectory(0)\n iStats=h.Integral()\n if iStats>0: h.Scale(nomStats/iStats)\n for xbin in range(h.GetNbinsX()):\n if h.GetBinContent(xbin+1)>0: continue\n h.SetBinContent(xbin+1,1e-6)\n \n return templates",
"def setup_hist(self):\n self.x_min = {}\n self.x_max = {}\n self.x_max_minus_min = {}\n self.dx = {}\n self.n_bins = {}\n\n self.histogram_edges = {}\n self.histogram_values = {}\n self.histogram_cdf = {}",
"def test_normal(self):\r\n s = np.random.normal(-0.42, 0.55, 5000)\r\n plt.hist(s, 30, density=False)\r\n plt.xlabel('Interlayer point energy [eV]')\r\n plt.ylabel('Frequency')\r\n plt.show()",
"def wScalogram(data, hypothesis=None,\n nsigma=None, nsigma_min=None, nsigma_percent=1,\n reconstruction_scaled=False,\n firsttrend=False, logscale=True,\n filled=False,\n title=None, xlabel=None,\n outputfile=None):\n\n WaveDec_data = HaarTransform(data)\n Ccoeffs = WaveDec_data[:-1]\n FirstTrend = WaveDec_data[-1]\n Level = len(Ccoeffs)\n\n nlevels = Level if firsttrend==False else Level+1\n nrows = nlevels+1 # the first panel is the data histogram\n if nsigma is not None:\n nrows += 1 # add another panel for the generating function\n ratio = [1.5]\n ratio += [1]*(nrows-1)\n\n if filled==True:\n histtype='bar'\n coeffs_color=Coeffs_color\n firsttrend_color=Firsttrend_color\n else:\n histtype='step'\n coeffs_color='black'\n firsttrend_color='black'\n\n if logscale==True:\n scale='log'\n else:\n scale='linear'\n\n fig = plt.figure(figsize=(12,12))\n gs = gridspec.GridSpec(ncols=1, nrows=nrows,\n height_ratios=ratio,\n hspace=0)\n axs = [fig.add_subplot(gs[i,0]) for i in range(nrows)]\n\n # Fill out top panel\n data_hist, _, data_center, data_width = _BinData(data, bins=2**Level)\n axs[0].bar(data_center, data_hist, align='center',\n width=data_width, color=Data_color)\n axs[0].text(x=.93, y=.63, s='Data', fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[0].transAxes)\n axs[0].set_yscale(scale)\n\n # If nsigma is provided\n if nsigma is not None:\n\n nsigCcoeffs = nsigma\n\n cut = '(No cut)'\n if nsigma_percent is not None:\n cut = str(nsigma_percent*100) + '%'\n if nsigma_min is not None:\n cut = r'$\\sigma_{min}$ = ' + str(nsigma_min)\n\n if hypothesis is not None:\n #TODO: error trap\n DeltaCoeff = _NSigmaFilter(data, hypothesis, nsigma, nsigma_min, nsigma_percent)\n ReconstructedData = InvHaarTransform(DeltaCoeff, normalize=False)\n if reconstruction_scaled is True:\n RecData = np.divide(ReconstructedData, np.sqrt(hypothesis))\n else:\n RecData = ReconstructedData\n rec_hist, _, rec_center, rec_width = _BinData(RecData, bins=2**Level)\n axs[1].plot(rec_center, rec_hist, 'o', markersize=3, color='#E67E22',\n label='Reconstruction ({})'.format(cut))\n axs[1].set_yscale('linear')\n axs[1].legend(edgecolor=\"black\", fancybox=False,\n handletextpad=0.0, handlelength=0, markerscale=0, fontsize=12)\n\n # If firsttrend, fill out the bottom panel with the first trend\n if firsttrend==True:\n bins = 1\n axs[-1].hist(x=range(bins), bins=bins, weights=FirstTrend,\n histtype=histtype, color=firsttrend_color)\n axs[-1].tick_params(axis='both', bottom=False, labelbottom=False)\n axs[-1].set_yscale(scale)\n axs[-1].text(x=.93, y=.63, s=r'$\\ell={%.1i}$'%(0), fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[-1].transAxes)\n\n # Fill out the rest of the pannels with the wavelet coefficients\n # If signal_only, start two panels below the top panel\n s = 2 if nsigma is not None else 1\n for l in range(Level):\n bins=2**(Level-l-1)\n coeffs = Ccoeffs[l]\n\n if logscale==True:\n # Plot the positive coefficients\n pos_ix = np.where(Ccoeffs[l]>0)\n pos_coeffs = np.zeros_like(coeffs)\n for i in pos_ix:\n pos_coeffs[i] = coeffs[i]\n axs[l+s].hist(x=range(bins), bins=bins,\n weights=pos_coeffs, histtype=histtype, color=coeffs_color)\n\n # Now plot the negative coefficients. The bars are hashed to distinguish the\n # pos and neg coefficients.\n neg_ix = np.where(Ccoeffs[l]<0)\n neg_coeffs = np.zeros_like(coeffs)\n for j in neg_ix:\n neg_coeffs[j] = np.absolute(coeffs[j])\n axs[l+s].hist(x=range(bins), bins=bins,\n weights=neg_coeffs, histtype=histtype, hatch='///', color=coeffs_color)\n\n axs[l+s].tick_params(axis='both', bottom=False, labelbottom=False)\n lev = Level-l-1\n axs[l+s].text(x=.93, y=.63, s=r'$\\ell={%.1i}$'%(lev+1), fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[l+s].transAxes)\n axs[l+s].set_yscale(scale)\n\n else:\n axs[l+s].hist(x=range(bins), bins=bins, weights=coeffs, histtype=histtype, color=coeffs_color)\n axs[l+s].tick_params(axis='both', bottom=False, labelbottom=False)\n lev = Level-l-1\n axs[l+s].text(x=.93, y=.63, s=r'$\\ell={%.1i}$'%(lev+1), fontsize=12,\n bbox={'facecolor': 'white', 'alpha': 0.5, 'pad': 5},\n transform=axs[l+s].transAxes)\n axs[l+s].set_yscale(scale)\n\n if title is not None:\n fig.suptitle(title, fontsize=18, y=0.92)\n fig.text(x=0.5, y=0.1, s=xlabel, fontsize=14)\n if outputfile is not None:\n plt.savefig(outputfile, bbox_inches='tight')\n plt.show()",
"def histogramFromSketch_M2M(sketch,Phi,domain,dimension,nb_cat_per_dim=None,bins_cont=10,project_on_probabilitySimplex=True,reg_rho=0.01):\n\n ## 0) Parsing the inputs\n\n # Number of categorical inputs\n if nb_cat_per_dim is None:\n nb_cat_per_dim = np.zeros(Phi.d)\n\n is_integer_dimension = False\n if nb_cat_per_dim[dimension] > 0:\n # The data is integer-type\n is_integer_dimension = True\n bins = int(nb_cat_per_dim[dimension])\n else:\n bins = bins_cont\n\n # Parse m, d\n if isinstance(Phi,SimpleFeatureMap):\n Omega = Phi.Omega\n d = Phi.d\n m = Phi.m\n else:\n raise ValueError('The Phi argument does not match one of the supported formats.')\n \n ## 1) Construct the A matrix\n # Build a new sketch with all the difference of Omega\n Omega_diffs = np.empty((d,m**2))\n for i in range(m):\n for j in range(m):\n Omega_diffs[:,i*m+j] = Omega[:,i] - Omega[:,j]\n\n Phi_diffs = SimpleFeatureMap(\"complexExponential\", Omega_diffs,xi=Phi.xi,c_norm=Phi.c_norm)\n\n # Evaluate the box constraints Fourier transform thanks to this sketch function\n z_diffs_domain = fourierSketchOfBox(domain,Phi_diffs,nb_cat_per_dim)\n\n # And reshape (not sure if correct)\n A_compl = z_diffs_domain.reshape(m,m)\n\n # Stack real and imaginary components\n A = np.zeros((2*m,2*m))\n A[:m,:m] = A_compl.real\n A[:m,m:] = A_compl.imag\n A[m:,:m] = -A_compl.imag\n A[m:,m:] = A_compl.real\n \n # Regularize\n A += reg_rho*np.eye(2*m)\n\n box = domain.copy() # the box in which we do the learning\n bin_edges = np.linspace(domain[dimension,0],domain[dimension,1],bins+1)\n h = np.zeros(bins)\n for p in range(bins):\n # move to the next box\n if is_integer_dimension:\n box[dimension,0] = p\n box[dimension,1] = p\n else:\n box[dimension,0] = bin_edges[p]\n box[dimension,1] = bin_edges[p+1]\n F = fourierSketchOfBox(box,Phi,nb_cat_per_dim)\n\n # Stack the b vector\n b = np.zeros(2*m)\n b[:m] = F.real\n b[m:] = -F.imag\n\n \n # ... and solve! \n a_ri = np.linalg.solve(A, b)\n a = a_ri[:m] + 1j*a_ri[m:]\n \n\n \n # Predict with the sketch\n #print(a)\n h[p] = np.real(np.dot(a,sketch))\n if project_on_probabilitySimplex:\n h = project_probabilitySimplex(h)\n return h",
"def _derive_variance_(self):\n # Pure Photon Noise\n self._properties[\"var\"] = np.sqrt(self.rawdata*self.exposuretime) / self.exposuretime",
"def main():\n\n\n ## Groups showing similar noise profile\n #grp1 = [ 1, 4, 5, 8, 9 ]\n #grp2 = [ 18, 19, 22, 23, 30, 31 ]\n grp1 = [ 0, 1, 6, 7, 4, 5 ]\n grp2 = [ 12, 13, 16, 17, 18, 19 ]\n #grp3 = [ 18, 19, 22, 23, 26, 27 ]\n with tb.open_file(sys.argv[1], 'r') as dataF:\n\n npm = len(dataF.root.Sensors.DataPMT)#len(dataF.root.RD.pmtrwf[0])\n nevt = len(dataF.root.RD.pmtrwf)\n\n ## Filter definition\n fSample = 40E6\n freqLPF = 100E3\n freqLPFd = 2*freqLPF / fSample\n b, a = signal.butter(1, freqLPFd, 'low', analog=False)\n ##\n fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20,6))\n #fig.tight_layout()\n fig.show()\n wf_len = len(dataF.root.RD.pmtrwf[0][0])\n if len(sys.argv) > 3:\n wf_len = wf_len/2+1 \n elif len(sys.argv) == 3:\n g1_first = np.zeros(wf_len, np.float64)\n g2_first = np.zeros(wf_len, np.float64)\n g3_first = np.zeros(wf_len, np.float64)\n mean_first = np.zeros(wf_len, np.float64)\n ##\n for ievt in range(nevt):\n ## clear the axies\n for ax in axes.flatten():\n ax.cla()\n plt_frq = np.zeros(wf_len, np.float64)\n fwf_mean = np.zeros(wf_len, np.float64)\n wf_mean = np.zeros(wf_len, np.float64) # No filter\n g1_mean = np.zeros(wf_len, np.float64)\n g2_mean = np.zeros(wf_len, np.float64)\n g3_mean = np.zeros(wf_len, np.float64)\n for ipm in range(npm):\n\n sg = getWF(dataF, ipm, ievt)\n sg = sg - np.mean(sg)\n\n sgf = signal.lfilter(b, a, sg)\n ## remove mean again just in case\n sgf = sgf - np.mean(sgf)\n #sgf = sg\n\n pmID = getPMid(dataF, ipm)\n\n if len(sys.argv) == 3:\n axes[0][0].plot(sgf, label='pmt '+str(pmID))\n fwf_mean += sgf/npm\n wf_mean += sg/npm\n if pmID in grp1:\n g1_mean += sgf/len(grp1)\n elif pmID in grp2:\n g2_mean += sgf/len(grp2)\n elif pmID in grp3:\n g3_mean += sgf/len(grp3)\n else:\n ft = np.fft.rfft(sgf)\n freq = np.fft.rfftfreq(len(sgf), d=25E-9)\n if ipm == 0:\n plt_frq = freq\n if sys.argv[2] == 'mag':\n ft_mag = np.absolute(ft)\n axes[0][0].plot(freq, ft_mag, label='pmt '+str(pmID))\n fwf_mean += ft_mag/npm\n if pmID in grp1:\n g1_mean += ft_mag/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_mag/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_mag/len(grp3)\n elif sys.argv[2] == 'phase':\n ft_pha = np.angle(ft)\n axes[0][0].plot(freq, ft_pha, label='pmt '+str(pmID))\n fwf_mean += ft_pha/npm\n if pmID in grp1:\n g1_mean += ft_pha/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_pha/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_pha/len(grp3)\n \n \n ## The axes not set\n if len(sys.argv) == 3:\n axes[0][1].plot(g1_mean)\n axes[0][1].set_title('Group 1 mean waveform')\n axes[1][0].plot(g2_mean)\n axes[1][0].set_title('Group 2 mean waveform')\n axes[1][1].plot(g3_mean)\n axes[1][1].set_title('Group 3 mean waveform')\n axes[2][0].plot(fwf_mean)\n axes[2][0].set_title('Mean waveform')\n if ievt == 0:\n g1_first = g1_mean\n g2_first = g2_mean\n g3_first = g3_mean\n mean_first = fwf_mean\n else:\n axes[0][1].plot(g1_first)\n axes[1][0].plot(g2_first)\n axes[1][1].plot(g3_first)\n axes[2][0].plot(mean_first)\n axes[2][1].plot(wf_mean)\n axes[2][1].set_title('Mean waveform and corrected')\n axes[2][1].plot(wf_mean-fwf_mean)\n axes[2][1].set_xlim(0, 1000)\n else:\n axes[0][0].set_xlim(0,50000)\n axes[0][1].plot(plt_frq, g1_mean)\n axes[0][1].set_title('Group 1 mean '+sys.argv[2])\n axes[0][1].set_xlim(0,50000)\n axes[1][0].plot(plt_frq, g2_mean)\n axes[1][0].set_title('Group 2 mean '+sys.argv[2])\n axes[1][0].set_xlim(0,50000)\n axes[1][1].plot(plt_frq, g3_mean)\n axes[1][1].set_title('Group 3 mean '+sys.argv[2])\n axes[1][1].set_xlim(0,50000)\n axes[2][0].plot(plt_frq, fwf_mean)\n axes[2][0].set_title('Mean '+sys.argv[2])\n axes[2][0].set_xlim(0,50000)\n plt.draw()\n #fig.legend(loc=0)\n catcher = input(\"next plot?\")\n if catcher == 'q':\n exit()\n plt.cla()",
"def parameters_histograms(w, dw, a, da, b, db):\n w = w.cpu()\n dw = dw.cpu()\n a = a.cpu()\n da = da.cpu()\n b = b.cpu()\n db = db.cpu()\n \n fig = plt.figure(figsize=(10,6))\n ax = fig.add_subplot(231)\n ax.hist(w.reshape(1, w.shape[0] * w.shape[1]))\n ax.set_title('Weights', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(232)\n ax.hist(dw.reshape(1, dw.shape[0] * dw.shape[1]))\n ax.set_title('Weights variations', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(233)\n ax.hist(a)\n ax.set_title('Visible bias', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(234)\n ax.hist(da)\n ax.set_title('Visible bias variations', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(235)\n ax.hist(b)\n ax.set_title('Hidden bias', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n ax = fig.add_subplot(236)\n ax.hist(db)\n ax.set_title('Hidden bias variations', fontsize = 11)\n ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0))\n plt.subplots_adjust(hspace=0.25)\n plt.show()\n plt.close('all')",
"def fwhmwhisker_multiext(filename,sigma,band,zenith):\n hdu=pf.open(filename)\n e1=[]\n e2=[]\n fwhmw=[]\n whiskerw=[]\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n for i in range(Nobj):\n print i\n img = hdui.data[i][4:].reshape(160,160)\n imgrbin = rebin(img,(40,40))\n res=wfwhm(imgrbin,sigma)\n e1.append(res[0])\n e2.append(res[1])\n whiskerw.append(res[2]*0.27)\n fwhmw.append(res[3]*0.27)\n e1 = np.array(e1)\n e2 = np.array(e2)\n fwhmw = np.array(fwhmw)\n whiskerw = np.array(whiskerw)\n e1mean = e1.mean()\n e1std = e1.std()\n e2mean = e2.mean()\n e2std = e2.std()\n whiskerwmean = whiskerw.mean()\n whiskerwstd = whiskerw.std()\n fwhmwmean = fwhmw.mean()\n fwhmwstd = fwhmw.std()\n r50mean = np.mean(fwhmw/2.)\n r50std = np.std(fwhmw/2.)\n pl.figure(figsize=(15,10))\n pl.subplot(2,3,1)\n pl.hist(e1,bins=20,normed=True)\n pl.xlabel('e1')\n pl.title('mean: '+str(round(e1mean,6))+' std: '+str(round(e1std,5)))\n pl.subplot(2,3,2)\n pl.hist(e2,bins=20,normed=True)\n pl.xlabel('e2')\n pl.title('mean: '+str(round(e2mean,6))+' std: '+str(round(e2std,5)))\n pl.subplot(2,3,3)\n pl.hist(whiskerw,bins=20,normed=True)\n pl.xlabel('whisker')\n pl.title('mean: '+str(round(whiskerwmean,5))+' std: '+str(round(whiskerwstd,5)))\n pl.subplot(2,3,4)\n pl.hist(fwhmw,bins=20,normed=True)\n pl.xlabel('fwhm')\n pl.title('mean: '+str(round(fwhmwmean,5))+' std: '+str(round(fwhmwstd,5)))\n pl.subplot(2,3,5)\n pl.hist(fwhmw/2.,bins=20,normed=True)\n pl.xlabel('r50')\n pl.title('mean: '+str(round(r50mean,5))+' std: '+str(round(r50std,5)))\n pl.figtext(0.7,0.4,'band: '+band)\n pl.figtext(0.7,0.37,'zenith angle: '+zenith +' deg')\n pl.figtext(0.3,0.95,'Perfect focus/alignment, 0.7 arcsec fwhm circular seeing',fontsize=18,color='red')\n pl.savefig(filename[0:-6]+'png')\n np.savetxt(filename[0:-6]+'txt',[e1mean,e1std,e2mean,e2std,whiskerwmean,whiskerwstd,fwhmwmean,fwhmwstd,r50mean,r50std],fmt='%10.5f')\n pl.close()\n return '---done !-----'",
"def rescale_data(self):\n\n # Dividing every array of simulated data vectors by the mean of that array.\n '''# Didnt work\n for key in self.data.keys():\n self.data[key] /= np.mean(self.data[key])\n '''\n\n self.rescaled = True\n\n # Mean normalization\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.mean(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Median normalization\n \"\"\" didnt work, still dividing by large number \n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.max(self.data[key]) - np.min(self.data[key]))\n \"\"\"\n\n # Divide by median\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] -= np.median(self.data[key])\n self.data[key] /= (np.median(self.data[key]))\n \"\"\"\n\n # Take logarithm of data\n \"\"\" didnt work\n for key in self.data.keys():\n self.data[key] = np.log10(self.data[key])\n \"\"\"\n\n # Scale by length of vector\n \"\"\"\n for key in self.data.keys():\n self.data[key] /= np.linalg.norm(self.Cl_noiseless)\n \"\"\"\n\n \n # Scale by negative of the natural logarithm \n for key in self.data.keys():\n self.data[key] = -1 * np.log(self.data[key]) \n \n \"\"\"\n # Scale by subtracting the mean and dividing by std\n std = np.nanstd(self.data['data'])\n mean = np.nanmean(self.data['data'])\n for key in self.data.keys():\n # self.data[key] -= np.log(self.Cl_noiseless) # -1* # scale this same way\n # self.data[key] -= self.Cl_noiseless # -1* # scale this same way\n self.data[key] -= mean \n self.data[key] /= std\n \"\"\"",
"def preprocess_adata(adata, n_top_genes=5000):\n sc.pp.filter_cells(adata, min_genes=200)\n sc.pp.filter_genes(adata, min_cells=3)\n sc.pp.normalize_total(adata, target_sum=1e4)\n sc.pp.log1p(adata)\n sc.pp.highly_variable_genes(adata, n_top_genes=n_top_genes)\n adata.raw = adata\n adata = adata[:, adata.var.highly_variable]\n return adata",
"def normalized_hist_dataframe(data_column, bin_number=50, output_dir='/var/tmp/'):\n db = celldatabase.load_hdf(\"/var/tmp/figuresdata/2019astrpi/direct_and_indirect_cells.h5\")\n # dbTuned = db.query(studyparams.TUNING_FILTER)\n D1DB = db.query(studyparams.D1_CELLS)\n nD1DB = db.query(studyparams.nD1_CELLS)\n D1DB = D1DB.replace([np.inf, -np.inf], np.nan)\n nD1DB = nD1DB.replace([np.inf, -np.inf], np.nan)\n D1DB = D1DB[D1DB[data_column].notnull()]\n nD1DB = nD1DB[nD1DB[data_column].notnull()]\n D1Hist, D1bins = np.histogram(D1DB[data_column], bins=bin_number, density=True)\n nD1Hist, nD1bins = np.histogram(nD1DB[data_column], bins=bin_number, density=True)\n center = (D1bins[:-1] + D1bins[1:])/2\n width = 0.7 * (D1bins[1] - D1bins[0])\n D1Median = np.median(D1DB[data_column])\n nD1Median = np.median(nD1DB[data_column])\n\n fig = plt.gcf()\n fig.clf()\n figFilename = \"{}\".format(data_column) # Do not include extension\n figFormat = 'png' # 'pdf' or 'svg'\n figSize = [5, 5]\n\n ax = fig.add_subplot()\n ax.bar(center, D1Hist, width=width, align='center', label='D1', alpha=0.5)\n ax.bar(center, nD1Hist, width=width, align='center', label='nD1', alpha=0.5)\n ax.legend()\n ax.set_xlabel('{} value'.format(data_column))\n ax.set_ylabel('Frequency')\n ax.set_title(data_column)\n ymin, ymax = ax.get_ybound()\n ax.vlines(D1Median, ymin, ymax, color=\"Green\")\n ax.vlines(nD1Median, ymin, ymax, color=\"Red\")\n\n extraplots.save_figure(figFilename, figFormat, figSize, output_dir, 'w')\n plt.show()\n return fig, ax",
"def main(args):\n samples = TQSampleFolder.loadLazySampleFolder(args.input_file + \":\" + args.sample_folder)\n reader = TQSampleDataReader(samples)\n\n # this list contains 2-tuples with (\"CutName\", \"HistogramName\")\n hist_info = list()\n hist_info.append((\"Cut2TagMllSR1VBFVeto\", \"NN_SR1_Signal_Rebin\", \"[ee+mm+em+me]\"))\n hist_info.append((\"Cut2TagMllSR1VBFVeto\", \"NN_SR1_Top\", \"[ee+mm+em+me]\"))\n hist_info.append((\"Cut2TagMllSR1VBFVeto\", \"NN_SR1_Other\", \"[ee+mm+em+me]\"))\n\n processes = list()\n processes.append(Process(\"sig\", r\"Signal\", \"/sig/{channel}/{campaign}/nonres\"))\n processes.append(Process(\"bkg\", r\"Background\", \"/bkg/{channel}/{campaign}/[prompt+nonprompt]\"))\n\n output_directory = \"results/mva_yields_soverb/\"\n ensure_directory(output_directory)\n output_file_name = os.path.splitext(os.path.basename(args.input_file))[0] + \".tex\"\n\n with LaTeXFile.from_rel_path(os.path.join(output_directory, output_file_name)) as tex:\n tex.document_settings.append(\"landscape\")\n tex.write_header()\n tex.begin_document()\n\n logging.info(\"Getting per-bin significances\")\n for cut_name, histogram_name, channel in hist_info:\n logging.info(\"Processing %s/%s\", cut_name, histogram_name)\n hists = dict()\n for process in processes:\n campaign = \"[c16a+c16d+c16e]\"\n hists[process.name] = reader.getHistogram(\n process.path.format(channel=channel, campaign=campaign), \"{}/{}\".format(cut_name, histogram_name)\n )\n\n table_data = list()\n sigs = list()\n hist_sig = hists[\"sig\"]\n hist_bkg = hists[\"bkg\"]\n for i in range(1, hist_sig.GetNbinsX() + 1):\n s = hist_sig.GetBinContent(i)\n b = hist_bkg.GetBinContent(i)\n\n if b != 0:\n # z = math.sqrt(2 * ((s + b) * math.log(1 + s / b) - s))\n z = s / math.sqrt(b)\n sigs.append(z)\n else:\n z = \"--\"\n table_data.append((i, z))\n logging.debug(\"Bin % 2d: %g\", i, z)\n table_data.append((\"Total\", math.sqrt(sum([z ** 2 for z in sigs]))))\n\n tex.write_table(\n table_data,\n [\"{}\", \"{:.4f}\"],\n [\"Bin\", \"Significance\"],\n \"{}/{}\".format(cut_name, histogram_name),\n format_rows=\"cc\",\n )\n\n tex.end_document()\n tex.write_make_file()",
"def preprocessing(self):\n # Standardizing series names\n self.raw.columns = ['stress', 'strain', 'e']\n # Removing percentage format to strain values\n if self.strainPercent:\n self.raw['strain'] = self.raw['strain'].divide(100)\n # On-table (initial) void ratio\n self.e_0 = self.raw['e'].iloc[0]\n return",
"def bin_histogram (modified_df, v_to_bin):\n for variable in v_to_bin:\n # Remove Nas\n df = modified_df[modified_df[variable].notnull()]\n # Create surv filter\n hist_filter = df[\"Survived\"] == 1\n # Create Histogram\n plt.hist([df[variable][hist_filter], df[variable][~hist_filter]],\n stacked=True, label=['Survived', 'Not Survived'], color=['g', 'r'])\n plt.legend()\n # Save and reset fig\n plt.savefig(variable+\"_histogram\")\n plt.clf()",
"def _perturbation(self):\n if self.P > 1:\n scales = []\n for term_i in range(self.n_randEffs):\n _scales = sp.randn(self.diag[term_i].shape[0])\n if self.jitter[term_i] > 0:\n _scales = sp.concatenate((_scales, sp.zeros(1)))\n scales.append(_scales)\n scales = sp.concatenate(scales)\n else:\n scales = sp.randn(self.vd.getNumberScales())\n return scales",
"def create_histograms(PrimaryParticleName, LongVectorSignals, LongVectorSignalsCher,\n\tShortVectorSignals, ShortVectorSignalsCher, LongScinMaxFiber, LongCherMaxFiber, \n\tShortScinMaxFiber, ShortCherMaxFiber, EnergyTotContainer, MaxEnergyTotContainer):\n\n\t#Set ROOT histograms\n\tTH1LongScin = TH1F(\"LongScintillation\", PrimaryParticleName, 100, 0.0, LongScinMaxFiber+200.)\n\tTH1LongCher = TH1F(\"LongCherenkov\", PrimaryParticleName, 100, 0.0, LongCherMaxFiber+200.)\n\tTH1ShortScin = TH1F(\"ShortScintillation\", PrimaryParticleName, 100, 0.0, ShortScinMaxFiber+200.)\n\tTH1ShortCher = TH1F(\"ShortCherenkov\", PrimaryParticleName, 100, 0.0, ShortCherMaxFiber+200.)\n\tTH1EnergyTot = TH1F(\"EnergyTot\", PrimaryParticleName, 100, MaxEnergyTotContainer-10000., MaxEnergyTotContainer+500.) \n\n\t#Fill histograms in for loop\n\tfor index in range(len(LongVectorSignals)):\n\t\tTH1LongScin.Fill(LongVectorSignals[index])\n\t\tTH1LongCher.Fill(LongVectorSignalsCher[index])\n\t\tTH1ShortScin.Fill(ShortVectorSignals[index])\n\t\tTH1ShortCher.Fill(ShortVectorSignalsCher[index])\n\t\tTH1EnergyTot.Fill(EnergyTotContainer[index])\n\n\t#Draw + DrawOptions\n\tStyle = gStyle\n\tStyle.SetOptStat(1) #Show statistics\n\tStyle.SetLineWidth(1)\n\tXAxis = TH1LongScin.GetXaxis() #TH1LongScin\n\tXAxis.SetTitle(\"Energy (MeV)\")\n\tXAxis.SetTitleOffset(1.2)\n\tYAxis = TH1LongScin.GetYaxis()\n\tYAxis.SetTitle(\"Entries\")\n\tTH1LongScin.Draw()\n\tgPad.SaveAs(\"EnergyLongScin.eps\")\n\tXAxis = TH1LongCher.GetXaxis() #TH1LongCher\n\tXAxis.SetTitle(\"# Cher p.e.\")\n\tXAxis.SetTitleOffset(1.2)\n\tYAxis = TH1LongCher.GetYaxis()\n\tYAxis.SetTitle(\"Entries\")\n\tTH1LongCher.Draw()\n\tgPad.SaveAs(\"CherpeLong.eps\")\n\tXAxis = TH1ShortScin.GetXaxis() #TH1ShortScin\n\tXAxis.SetTitle(\"Energy (MeV)\")\n\tXAxis.SetTitleOffset(1.2)\n\tYAxis = TH1ShortScin.GetYaxis()\n\tYAxis.SetTitle(\"Entries\")\n\tTH1ShortScin.Draw()\n\tgPad.SaveAs(\"EnergyShortScin.eps\")\n\tXAxis = TH1ShortCher.GetXaxis() #TH1ShortCher\n\tXAxis.SetTitle(\"# Cher p.e.\")\n\tXAxis.SetTitleOffset(1.2)\n\tYAxis = TH1ShortCher.GetYaxis()\n\tYAxis.SetTitle(\"Entries\")\n\tTH1ShortCher.Draw()\n\tgPad.SaveAs(\"CherpeShort.eps\")\n\tXAxis = TH1EnergyTot.GetXaxis() #TH1EnergyTot\n\tXAxis.SetTitle(\"Energy (MeV)\")\n\tXAxis.SetTitleOffset(1.2)\n\tYAxis = TH1EnergyTot.GetYaxis()\n\tYAxis.SetTitle(\"Entries\")\n\tTH1EnergyTot.Draw()\n\tgPad.SaveAs(\"EnergyTot.eps\")",
"def CL_histogram_MMD(sketch,Phi,domain,dimension,nb_cat_per_dim=None,bins_cont=10):\n ## 0) Parsing the inputs\n # Number of categorical inputs\n if nb_cat_per_dim is None:\n nb_cat_per_dim = np.zeros(Phi.d)\n \n is_integer_dimension = False\n if nb_cat_per_dim[dimension] > 0:\n # The data is integer-type\n is_integer_dimension = True\n bins = int(nb_cat_per_dim[dimension])\n else:\n bins = bins_cont\n\n m = sketch.size\n # 1) Construct the A matrix\n A = 1j*np.zeros((m,bins)) # Pre-allocation\n bin_edges = np.linspace(domain[dimension,0],domain[dimension,1],bins+1)\n box = domain.copy()\n for p in range(bins):\n # move to the next box\n if is_integer_dimension:\n box[dimension,0] = p\n box[dimension,1] = p\n else:\n box[dimension,0] = bin_edges[p]\n box[dimension,1] = bin_edges[p+1]\n A[:,p] = fourierSketchOfBox(box,Phi,nb_cat_per_dim) \n \n # 1.b) cast to real \n Ari = np.r_[A.real, A.imag]\n \n # 2) create b vector\n b = np.r_[sketch.real, sketch.imag]\n \n # 3) solve the optimization problem\n def _f_grad(x):\n r = Ari@x-b\n f = 0.5*np.linalg.norm(r)**2\n grad = Ari.T@r\n return (f,grad)\n \n # Starting point\n x0 = np.ones(bins)/bins\n # Linear constraints\n A_constr = np.zeros((bins,bins))\n l_constr = 0*np.ones(bins) # Positive constraints\n A_constr[:bins,:bins] = np.eye(bins)\n upper_bound = 5 # weird that it must be large\n u_constr = upper_bound*np.ones(bins) # Sum-to one constraints\n constr = LinearConstraint(A_constr,l_constr,u_constr)\n\n # Solve\n sol = minimize(_f_grad, x0, method='trust-constr', bounds=None, constraints=constr, jac=True, options={'verbose': 0})\n\n return project_probabilitySimplex(sol.x)",
"def Make_Binned_ANN_ROC_Curves(title,Signal_title,Background_title,bins,log=False):\n #hsv = plt.get_cmap('hsv')\n #color = hsv(np.linspace(0,1.0,len(bins)-1))\n #color = ['b', 'g', 'r', 'c', 'm', 'y']\n if len(bins)<=6:\n color = ['red','green','blue','orange','brown']\n else:\n color = ['deepskyblue','rosybrown','olivedrab','royalblue','firebrick','chartreuse','navy','red','darkorchid','lightseagreen','mediumvioletred','blue']\n nbins = 60\n\tdis_string = \"ANN_\"\n\n Signal_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Signal_title),\"READ\")\n Background_file = rt.TFile(\"Thesis_Plots/root_files/{}_ANN_histograms.root\".format(Background_title),\"READ\")\n\n plt.figure(\"ROC\")\n plt.clf()\n\n for bin_ in range(len(bins)-1):\n Dis_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n Dis_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(dis_string+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),nbins,0)\n CSV_Signal_Eff = FCM.Get_ROC_Efficiencies(Signal_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n CSV_BG_Eff = FCM.Get_ROC_Efficiencies(Background_file.Get(\"CSV_\"+str(bins[bin_])+\"_\"+str(bins[bin_+1])),(0,1),ratio_bins,0)\n if log:\n plt.semilogy(Dis_Signal_Eff,Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.semilogy(CSV_Signal_Eff,CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n else:\n plt.plot(Dis_Signal_Eff,1-Dis_BG_Eff, color = color[bin_], linestyle = '-',label=str(bins[bin_])+\"_\"+str(bins[bin_+1]))\n plt.plot(CSV_Signal_Eff,1-CSV_BG_Eff, color = color[bin_],linestyle = '--',)\n\n if log:\n\t\tif diff:\n\t\t\tplt.semilogy([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.semilogy([0,0],[0,0],'k-',label = 'L4/L1')\n plt.semilogy([0,0],[0,0],'k-.',label = 'CSV')\n plt.semilogy([0,1],[0.1,0.1],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"$\\epsilon$_background\")\n plt.legend(loc=4)\n else:\n\t\tif diff:\n\t\t\tplt.plot([0,0],[0,0],'k-',label = 'L4-L1')\n\t\telse:\n \tplt.plot([0,0],[0,0],'k-',label = 'L4/L1')\n plt.plot([0,0],[0,0],'k-.',label = 'CSV')\n #plt.plot([0,1],[0.9,0.9],'k:',label=\"10% mistag\")\n plt.plot([0,1],[0.9,0.9],'k:')\n plt.xlabel(r\"$\\epsilon$_signal\")\n plt.ylabel(r\"1-$\\epsilon$_background\")\n plt.legend(loc=3)\n #plt.title(title+\"_ROC-Curves\")\n\n plt.savefig(\"Thesis_Plots/{}_ROC_Curves.png\".format(title))\n print \"saved as Thesis_Plots/{}_ROC_Curves.png\".format(title)",
"def FE_discretize_numeric_variables(train, bin_dict, test='', strategy='kmeans',verbose=0):\r\n df = copy.deepcopy(train)\r\n test = copy.deepcopy(test)\r\n num_cols = len(bin_dict)\r\n nrows = int((num_cols/2)+0.5)\r\n #print('nrows',nrows)\r\n if verbose:\r\n fig = plt.figure(figsize=(10,3*num_cols))\r\n for i, (col, binvalue) in enumerate(bin_dict.items()):\r\n new_col = col+'_discrete'\r\n if strategy == 'gaussian':\r\n kbd = GaussianMixture(n_components=binvalue, random_state=99)\r\n df[new_col] = kbd.fit_predict(df[[col]]).astype(int)\r\n if not isinstance(test, str):\r\n test[new_col] = kbd.predict(test[[col]]).astype(int)\r\n else:\r\n kbd = KBinsDiscretizer(n_bins=binvalue, encode='ordinal', strategy=strategy)\r\n df[new_col] = kbd.fit_transform(df[[col]]).astype(int)\r\n if not isinstance(test, str):\r\n test[new_col] = kbd.transform(test[[col]]).astype(int)\r\n if verbose:\r\n ax1 = plt.subplot(nrows,2,i+1)\r\n ax1.scatter(df[col],df[new_col])\r\n ax1.set_title(new_col)\r\n if not isinstance(test, str):\r\n return df, test\r\n else:\r\n return df",
"def distribution_magnitude_histogram(cur, var, table, label):\n x = select(cur,var, table)\n print(\"Number of entries: \", len(x))\n print(\"Maximum: \", max(x))\n print(\"Minimum: \", min(x))\n \n fig = plt.figure()\n ax = fig.add_subplot(1,1,1)\n ax.set_xlabel(\"Sentiment Magnitude\")\n ax.set_ylabel(\"Number of Sentences\")\n fig.suptitle(label)\n ax.hist(x, bins = 20)\n plt.show()",
"def variance_normalize(self):\n self.img = self.img / np.sqrt(np.sum(self.img ** 2))",
"def model_hist(xvar, yvar, modfuncs, nbins=95, crange=(-10.0, 10.0)):\n hists = [TH2D(\n 'hmodel{0}{1}'.format(c, i), 'hmodel{0}{1}'.format(c, i),\n nbins, crange[0], crange[1],\n nbins, crange[0], crange[1]\n ) for (i, c) in ic]\n for xbin in range(nbins):\n xlo = hists[0].GetXaxis().GetBinLowEdge(xbin+1)\n xup = hists[0].GetXaxis().GetBinUpEdge(xbin+1)\n for ybin in range(nbins):\n ylo = hists[0].GetXaxis().GetBinLowEdge(ybin+1)\n yup = hists[0].GetXaxis().GetBinUpEdge(ybin+1)\n name = 'bin_{0}_{1}'.format(xbin, ybin)\n xvar.setRange(name, xlo, xup)\n yvar.setRange(name, ylo, yup)\n for hist, modfunc in zip(hists, modfuncs):\n integral = modfunc.createIntegral(\n RooArgSet(xvar, yvar),\n RooFit.NormSet(RooArgSet(xvar, yvar)),\n RooFit.Range(name)\n ).getVal()\n hist.SetBinContent(xbin+1, ybin+1, integral)\n return hists"
] | [
"0.64501214",
"0.5601664",
"0.5529052",
"0.55120814",
"0.54677695",
"0.5449569",
"0.5407651",
"0.53764164",
"0.5360028",
"0.5337787",
"0.53310555",
"0.5325349",
"0.5292115",
"0.529134",
"0.5287892",
"0.5283185",
"0.5263711",
"0.5261005",
"0.5248296",
"0.52357006",
"0.52268076",
"0.52246106",
"0.5220019",
"0.5214336",
"0.5212858",
"0.5193024",
"0.51807946",
"0.5169738",
"0.51696086",
"0.5169486"
] | 0.59027475 | 1 |
Test the Categorical feature class. | def test_categorical_feature():
feature = Categorical("abc")
for element in "abc":
feature.set(element)
feature.set("ignore this")
feature.push()
for element in "abc":
getattr(feature, "set_" + element)()
feature.push()
array = feature.array()
assert array.shape == (6, 3)
for i, row in enumerate(array):
assert sum(row) == 1.0 and row[i % 3] == 1.0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_categorical():\n # assert the distribution of the samples is close to the distribution of the data\n # using cstest:\n # - uniform (assert p-value > 0.05)\n # - very skewed / biased? (assert p-value > 0.05)\n # - inversely correlated (assert correlation < 0)",
"def test_get_cat_score(self):\n classes = ['blue skin', 'pointy ears']\n negated_classes = []\n categories = ['ear feature', 'skin feature']\n\n categorical_score = self.annot_scorer._get_categorical_score(\n classes, negated_classes, categories,\n self.negation_weight, self.mock_ic_values\n )\n\n assert categorical_score == 0.7002519289078384",
"def test_categorical(self):\n with Model() as model:\n Categorical('x', np.array([0.25, 0.75]))\n steps = assign_step_methods(model, [])\n assert isinstance(steps, BinaryGibbsMetropolis)\n with Model() as model:\n Categorical('y', np.array([0.25, 0.70, 0.05]))\n steps = assign_step_methods(model, [])\n assert isinstance(steps, CategoricalGibbsMetropolis)",
"def predict_category(self):\n pass",
"def test_category(self):\n\n # Test empty categories\n self.assertFalse(self.colorspace.hasCategory('ocio'))\n self.assertEqual(len(self.colorspace.getCategories()), 0)\n with self.assertRaises(IndexError):\n self.colorspace.getCategories()[0]\n\n # Test with defined TEST_CATEGORIES.\n for i, y in enumerate(TEST_CATEGORIES):\n self.assertEqual(len(self.colorspace.getCategories()), i)\n self.colorspace.addCategory(y)\n self.assertTrue(self.colorspace.hasCategory(y))\n\n # Test the output list is equal to TEST_CATEGORIES.\n self.assertListEqual(\n list(self.colorspace.getCategories()), TEST_CATEGORIES)\n\n # Test the length of list is equal to the length of TEST_CATEGORIES.\n self.assertEqual(len(self.colorspace.getCategories()),\n len(TEST_CATEGORIES))\n\n iterator = self.colorspace.getCategories()\n for a in TEST_CATEGORIES:\n self.assertEqual(a, next(iterator))\n\n # Test the length of categories is zero after clearCategories()\n self.colorspace.clearCategories()\n self.assertEqual(len(self.colorspace.getCategories()), 0)\n\n # Testing individually adding and removing a category.\n self.colorspace.addCategory(TEST_CATEGORIES[0])\n self.assertEqual(len(self.colorspace.getCategories()), 1)\n self.colorspace.removeCategory(TEST_CATEGORIES[0])\n self.assertEqual(len(self.colorspace.getCategories()), 0)",
"def test(name, data, classifier):\n classification = classifier.classify(data)\n print('Item ' + name + ' is a ' + classification)",
"def data_categorical(df, cat_features = [], cont_features = []):\n subset_cat = []\n subset_dict={}\n # Add all the object type features to config.cat_features \n for col in df.columns:\n if df[col].dtype == 'object' and col not in cont_features:\n subset_cat.append(col)\n if col not in cat_features :\n cat_features.append(col)\n if cat_features !=[]:\n print('Categorical features : ', ' '.join(cat_features))\n printmd('**Number of unique values for every feature:**')\n print(pd.DataFrame(df[cat_features].nunique(), columns = ['Unique values']).sort_values(by = 'Unique values', ascending=False))\n printmd(\"**5 uniques samples of every Categorical Features :**\")\n for col in cat_features :\n subset_dict[col]= df[col].unique()[:5]\n print(pd.DataFrame.from_dict(subset_dict, orient='index').transpose())\n return (cat_features)",
"def test_compare_categories_categorical_variables(self):\r\n for method in self.cat_methods:\r\n compare_categories(self.dm1_fp, self.map1_fp, method,\r\n self.cat_categories, self.num_perms, self.test_dir)\r\n results_fp = join(self.test_dir, '%s_results.txt' % method)\r\n self.files_to_remove.append(results_fp)\r\n results_f = open(results_fp, 'U')\r\n results = results_f.readlines()\r\n results_f.close()\r\n\r\n # Make sure the files aren't empty.\r\n self.assertTrue(len(results) > 0)",
"def test_category_and_its_feature(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_class('charlie', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo', 'charlie'], mono=True)\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['alpha', 'bravo'])\n self.assertEqual(['bravo'], total_order)",
"def classify(self, example):\n raise NotImplementedError()",
"def check_classifier():\n content = []\n labels = []\n file = 'COMP3074-CW1-Dataset.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'name.csv'\n content, labels = get_tag(file, \"question_book\", content, labels)\n file = 'Small_talk.csv'\n content, labels = get_tag(file, \"small_talk\", content, labels, )\n x_train, x_test, y_train, y_test = train_test_split(content, # Sample feature set to be divided\n labels, # The sample result to be divided (label)\n stratify=labels, # Keep the category proportions\n # the same in training and testing\n test_size=0.25, # Refers to the proportion of\n # samples reserved for testing\n random_state=22) # Random seed\n count_vect = CountVectorizer(stop_words=stopwords.words('english'))\n x_train_counts = count_vect.fit_transform(x_train)\n tfidf_transformer = TfidfTransformer(use_idf=True, # Tf_idf\n sublinear_tf=True).fit(x_train_counts)\n x_train_tf = tfidf_transformer.transform(x_train_counts) # Standardize the inherent attributes of the training set,\n # reduce dimensionality and normalize\n classify = LogisticRegression(random_state=0).fit(x_train_tf, y_train) # Logistic regression\n return classify, tfidf_transformer, count_vect",
"def test_category(self):\n # XXX identifiers would be groovy\n self.check_search(\n dict(category=u'36:self'), # trap\n [u'Ingrain'],\n 'simple category search, vs self',\n exact=True,\n )\n self.check_search(\n dict(category=u'14:target'), # protect\n [u'Conversion 2', u'False Swipe'],\n 'simple category search, vs target',\n exact=True,\n )\n\n # Multiple categories\n # sleep OR attack up\n self.check_search(\n dict(category=[u'29:self', u'15:target'], category_operator=u'any'),\n [u'Rest', u'Swagger'],\n 'multiple category search (OR)',\n exact=True,\n )\n\n # sleep AND heal self\n self.check_search(\n dict(category=[u'29:self', u'13:self'], category_operator=u'all'),\n [u'Rest'],\n 'multiple category search (AND)',\n exact=True,\n )",
"def test_classify_cuisine(self):\n pass",
"def test_extract_categories():\n pass",
"def test_classify(self):\n classifiers, estimates =\\\n ada_boost.train_dataset(self.larger_matrix,\n self.larger_class_labels,\n 9)\n data_to_classify = [1, 0.5]\n classifications = ada_boost.classify(data_to_classify, classifiers)\n expected = np.mat([-1.])\n self.assertEqual(classifications, expected)",
"def classify_test(classifier, test_data):\n for d in test_data:\n test(d[\"name\"], d[\"attribute\"], classifier)",
"def test_categorical_log_frequency():\n # assert the distribution of the samples is close to the distribution of the data\n # using cstest:\n # - uniform (assert p-value > 0.05)\n # - very skewed / biased? (assert p-value > 0.05)\n # - inversely correlated (assert correlation < 0)",
"def test_0005_create_categories(self):\n self.create_category(name='Test 0060 Workflow Features', description='Test 0060 - Workflow Features')",
"def test_get_categories(self):\n pass",
"def test_assign_categorical(curve):\n assert curve.dtypes[0] == 'float'\n curve.dtypes = 'category'\n assert curve.dtypes[0] == 'category'",
"def get_categorical_features(self, x: pd.DataFrame) -> pd.DataFrame:\n return x[self.categorical_features]",
"def test_text_classifier_curate(self):\n pass",
"def test_text_classifier_test(self):\n pass",
"def test_category_manip_pipeline(self):\n raise NotImplementedError(\"\")",
"def test_with_tuple(self, seed):\n categories = (\"asdfa\", 2)\n dim = Categorical(\"yolo\", categories)\n samples = dim.sample(seed=seed)\n assert len(samples) == 1\n assert samples[0] == \"asdfa\"\n assert dim._probs == (0.5, 0.5)\n\n assert categories == dim.categories\n\n assert 2 in dim\n assert 3 not in dim\n\n assert (\n str(dim) == \"Categorical(name=yolo, prior={asdfa: 0.50, 2: 0.50}, \"\n \"shape=(), default value=None)\"\n )\n\n assert dim.name == \"yolo\"\n assert dim.type == \"categorical\"\n assert dim.shape == ()",
"def _is_categorical(df, field):\n return df[field].dtype.name == 'category'",
"def test_categorical_column_validates_categories(self):\n\n categories = 1\n\n with pytest.raises(CitrinationClientError):\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)\n\n categories = [\"Grey\", 1]\n with pytest.raises(CitrinationClientError):\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)\n\n categories = [\"Grey\", \"Blue\"]\n CategoricalColumn(name=self.name, role=self.role, group_by_key=self.group_by_key, categories=categories)",
"def test_category_and_its_feature_dep(self):\n class RunnerBlah(Runner):\n def __init__(self, renv):\n super(RunnerBlah, self).__init__(renv)\n self.register_feature_class('bravo', Feature)\n self.register_feature_category_class(\n 'alpha', features=['bravo'], defaults=['bravo'])\n self.register_feature_class(\n 'foxtrot', Feature, requires=['alpha', 'bravo'])\n self.register_feature_category_class('echo', features=['foxtrot'])\n\n renv = create_runtime(RunnerBlah)\n renv.create_runner('runner')\n\n ctrl = renv.feature_ctrl\n\n total_order, _ = ctrl.get_activation_order(['foxtrot'])\n self.assertEqual(['bravo', 'foxtrot'], total_order)",
"def classify(self, features):\n\n # TODO: finish this.\n features = np.array(features)\n return self.classifier.classify(features)",
"def get_categorical_features(self):\n return self.categorical_features"
] | [
"0.73366314",
"0.7113809",
"0.71037066",
"0.6780652",
"0.66445255",
"0.66398925",
"0.6559521",
"0.6554185",
"0.6519982",
"0.6498263",
"0.6448205",
"0.64314634",
"0.64148504",
"0.64140356",
"0.63725084",
"0.6347099",
"0.6345432",
"0.62697804",
"0.62644917",
"0.62623185",
"0.624088",
"0.62361515",
"0.6230513",
"0.62102383",
"0.6208405",
"0.6201192",
"0.62002",
"0.61810195",
"0.61779237",
"0.6156097"
] | 0.7951883 | 0 |
Test the Hashed feature class. | def test_hashed_feature():
def mock(c):
return ord(c) - ord('a')
group = Group({"a": Hashed(buckets=3, hash=mock), "b": Hashed(buckets=5, hash=mock), })
for i in range(10):
group.set_a("abcde" [i % 3])
group.set_b("abcde" [i % 5])
group.push()
array = group.array()
assert array.shape == (10, 8)
for i, row in enumerate(array):
for column, value in zip(array.columns, row):
feature, index = column.split("_")
if feature == "a":
assert value == float((i % 3) == int(index))
else:
assert value == float((i % 5) == int(index)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_hashing():\n X = Vectorizer(strategy=\"Hashing\", n_features=10).fit_transform(X_text)\n assert X.shape == (10, 10)\n assert \"hash1\" in X",
"def test_all_features_with_data(self):\n feature1 = Feature('looktest1')\n feature1.set_percentage(5)\n\n feature2 = Feature('looktest2')\n feature2.activate()\n feature2.add_to_whitelist(3)\n\n feature3 = Feature('looktest3')\n feature3.activate()\n feature3.add_to_blacklist(4)\n feature3.add_to_blacklist(5)\n\n feature4 = Feature('looktest4')\n feature4.activate()\n feature4.add_to_whitelist(3)\n feature4.add_to_whitelist(5)\n feature4.add_to_blacklist(4)\n\n all_features = Feature.all_features(include_data=True)\n self.assertEqual(len(all_features), 4)\n\n for key in ['looktest1', 'looktest2', 'looktest3', 'looktest4']:\n self.assertTrue(key in all_features)\n if not key == 'looktest1':\n self.assertEqual(all_features[key]['percentage'], 100)\n\n self.assertEqual(all_features['looktest1']['percentage'], 5)\n self.assertFalse('whitelist' in all_features['looktest1'])\n self.assertFalse('blacklist' in all_features['looktest1'])\n\n self.assertTrue('whitelist' in all_features['looktest2'])\n self.assertEqual(all_features['looktest2']['whitelist'], [3])\n self.assertFalse('blacklist' in all_features['looktest2'])\n\n self.assertFalse('whitelist' in all_features['looktest3'])\n self.assertTrue('blacklist' in all_features['looktest3'])\n self.assertEqual(all_features['looktest3']['blacklist'], [4, 5])\n\n self.assertTrue('whitelist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['whitelist'], [3, 5])\n self.assertTrue('blacklist' in all_features['looktest4'])\n self.assertEqual(all_features['looktest4']['blacklist'], [4])",
"def test_creating_simple_feature():\n # given & when\n feature = Feature(1, \"Feature\", \"I am a feature\", \"foo.feature\", 1, tags=None)\n\n # then\n assert feature.id == 1\n assert feature.keyword == \"Feature\"\n assert feature.sentence == \"I am a feature\"\n assert feature.path == \"foo.feature\"\n assert feature.line == 1\n assert feature.tags == []",
"def test_hash(self):\r\n self.assertEqual(processor_hash('test'), 'GqNJWF7X7L07nEhqMAZ+OVyks1Y=')\r\n self.assertEqual(processor_hash('edx '), '/KowheysqM2PFYuxVKg0P8Flfk4=')",
"def test_basic(self):\n self.assertEqual(hash_str(\"world!\", salt=\"hello, \").hex()[:6], \"68e656\")",
"def test_registry():\n assert(CQT.get_id() in msaf.base.features_registry.keys())\n assert(PCP.get_id() in msaf.base.features_registry.keys())\n assert(Tonnetz.get_id() in msaf.base.features_registry.keys())\n assert(MFCC.get_id() in msaf.base.features_registry.keys())\n assert(Tempogram.get_id() in msaf.base.features_registry.keys())",
"def test_hash(self):\n self.assertEqual(hash(self.compound), hash((\"t1\", \"test compound\")))",
"def test_all_features(self):\n to_create = ['looktest1', 'looktest2', 'looktest3']\n for f in to_create:\n Feature(f).activate()\n\n all_features = Feature.all_features()\n self.assertEqual(len(all_features), len(to_create))\n for f in to_create:\n self.assertTrue(f in all_features)",
"def feature():\n pass",
"def test_is_active_of_homework_positive():\n assert oop_hw.is_active()",
"def test_serialization(self):\n for hashtype in [HashTypes.SHA1, HashTypes.SHA2, HashTypes.SHA3, ]:\n self.do_test_serialization(hashtype)",
"def test_add_feature(self):\n fc1 = self.read_feature()\n fc2 = self.read_feature('Aegean_Sea')\n\n # add a feature already in the feature collection\n fc1.add_feature(fc1.features[0])\n assert len(fc1.features) == 1\n\n # add a new feature to the feature collection\n fc1.add_feature(fc2.features[0])\n assert len(fc1.features) == 2\n\n self.check_feature(fc1.features[0])\n self.check_feature(fc1.features[1], expected_name='Aegean Sea')",
"def test_features(iris):\n assert iris.num_features == 4\n assert iris.feature_names == [\n \"sepal length (cm)\",\n \"sepal width (cm)\",\n \"petal length (cm)\",\n \"petal width (cm)\",\n ]",
"def test_fish():\n test_path = tempfile.mkdtemp()\n x_train, metadata = fish(test_path)\n try:\n assert x_train.shape == (97, 20)\n except:\n shutil.rmtree(test_path)\n raise()",
"def test_hash(self):\n self.assertEqual(hash(self._version1), hash(self._version1))\n self.assertNotEqual(hash(self._version2), hash(self._version1))\n self.assertEqual(hash(\"0.1\"), hash(self._version1))",
"def loadFeatures(self, filename):\n f = open(filename, 'rb')\n loadhash = pickle.load(f)\n b = self.spikes.view(np.uint8)\n hashkey = hashlib.sha1(b).hexdigest()\n\n if loadhash == hashkey:\n print(\"Spikeset hashes match, loading features info.\")\n self.calculateFeatures(pickle.load(f))\n else:\n print(\"Hashes don't match, features are from a different dataset. Be careful.\")\n self.calculateFeatures(pickle.load(f))",
"def test_get_specific_algo(self):\n\n expected = False\n actual = PyFunceble.path.isfile(self.file)\n self.assertEqual(expected, actual)\n\n File(self.file).write(\"\\n\".join(self.data_to_write))\n expected = True\n actual = PyFunceble.path.isfile(self.file)\n\n self.assertEqual(expected, actual)\n\n expected = self.expected_hashed[\"sha512\"]\n actual = Hash(self.file, algorithm=\"sha512\", only_hash=True).get()\n self.assertEqual(expected, actual)\n\n File(self.file).delete()\n\n expected = False\n actual = PyFunceble.path.isfile(self.file)\n self.assertEqual(expected, actual)",
"def test_read_feature_collection(self):\n fc = self.read_feature()\n assert len(fc.features) == 1\n feature = fc.features[0]\n self.check_feature(feature)",
"def test_confirm_features_in_class_variable(self):\n # Given I have Romaine's core\n from tests.common import romaine\n core = romaine.Core()\n\n # When I locate features in /tmp/romaine_tests/features\n # And I locate features in tests/features\n core.locate_features('/tmp/romaine_tests/features')\n core.locate_features('tests/features')\n\n # Then the core's feature_paths_list variable contains:\n # | path |\n # | /tmp/romaine_tests/features/feature1 |\n # | /tmp/romaine_tests/features/feature2 |\n # | /tmp/romaine_tests/features/subdir/feature3 |\n # | tests/features/feature1 |\n # | tests/features/feature2 |\n # | tests/features/subdir/feature3 |\n self.assertEqual(\n sorted(core.feature_file_paths),\n [\n '/tmp/romaine_tests/features/feature1',\n '/tmp/romaine_tests/features/feature2',\n '/tmp/romaine_tests/features/subdir/feature3',\n 'tests/features/feature1',\n 'tests/features/feature2',\n 'tests/features/subdir/feature3',\n ]\n )",
"def test_gtf(self):\n #TODO write bed tests",
"def test_Fuselage_full():\n fus = Fuselage()\n assert('OML' in fus)",
"def test_split_feature(tree):\r\n print(\"test_split_feature()...\", end = \"\")\r\n assert (tree.process_split_feature() == True)\r\n print(\"Passed!\")",
"def _test(self):\n pass",
"def _test(self):\n pass",
"def _test(self):\n pass",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def _test(self):",
"def _test(self):"
] | [
"0.65287024",
"0.62354875",
"0.6188721",
"0.60676974",
"0.59955835",
"0.5943524",
"0.59145314",
"0.5907544",
"0.59006155",
"0.5855149",
"0.5854101",
"0.5758078",
"0.5741417",
"0.5683079",
"0.5673396",
"0.5668186",
"0.5665878",
"0.56312555",
"0.5623874",
"0.5621978",
"0.55877644",
"0.55796444",
"0.5576072",
"0.5576072",
"0.5576072",
"0.55751795",
"0.55751795",
"0.55751795",
"0.55751795",
"0.55751795"
] | 0.70252043 | 0 |
Test if array concatenation works. | def test_array_concat():
array = Array(columns="abc")
for i in range(10):
array.append([1, 2, 3])
# Any 2-dimensional array witht the same number of rows should work.
other = [[4, 5, 6]] * len(array)
array.concat(other)
assert array.shape == (10, 6)
assert len(array.columns) == 6
assert all(type(column) is str for column in array.columns)
for row in array:
assert tuple(row) == (1, 2, 3, 4, 5, 6)
# Now this should fail since the columns have the same names.
other = Array(columns="abc")
for i in range(10):
other.append([7, 8, 9])
assert_raises(ValueError, array.concat, other)
# Adding a prefix should make it work.
array.concat(other, prefix="other")
assert array.shape == (10, 9)
assert len(array.columns) == 9
for row in array:
assert tuple(row) == (1, 2, 3, 4, 5, 6, 7, 8, 9) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test01(self):\n a = np.arange(1000)\n b = bcolz.carray(a, chunklen=1, rootdir=self.rootdir)\n b.append(a)\n # print \"b->\", `b`\n c = np.concatenate((a, a))\n assert_array_equal(c, b[:], \"Arrays are not equal\")",
"def test03(self):\n a = np.arange(1e4)\n c = np.arange(2e5)\n b = bcolz.carray(a, rootdir=self.rootdir)\n b.append(c)\n # print \"b->\", `b`\n d = np.concatenate((a, c))\n assert_array_equal(d, b[:], \"Arrays are not equal\")",
"def test02a(self):\n a = np.arange(1000)\n b = bcolz.carray(a, chunklen=10*1000, rootdir=self.rootdir)\n b.append(a)\n # print \"b->\", `b`\n c = np.concatenate((a, a))\n assert_array_equal(c, b[:], \"Arrays are not equal\")",
"def test02b(self):\n a = np.arange(100*1000)\n b = bcolz.carray(a, chunklen=10*1000, rootdir=self.rootdir)\n b.append(a)\n # print \"b->\", `b`\n c = np.concatenate((a, a))\n assert_array_equal(c, b[:], \"Arrays are not equal\")",
"def test02c(self):\n a = np.arange(1000*1000)\n b = bcolz.carray(a, chunklen=100*1000-1, rootdir=self.rootdir)\n b.append(a)\n # print \"b->\", `b`\n c = np.concatenate((a, a))\n assert_array_equal(c, b[:], \"Arrays are not equal\")",
"def test_pad_and_concatenate_with_1d(self):\n array1 = 1.0\n array2 = 2.0\n result = numpy_pad_and_concatenate(array1, array2)\n self.assertTrue(np.array_equal(np.array([1.0, 2.0]), result))\n\n tensor1 = torch.tensor(1.0)\n tensor2 = torch.tensor(2.0)\n result = torch_pad_and_concatenate(tensor1, tensor2)\n self.assertTrue(torch.equal(result, torch.Tensor([1.0, 2.0])))",
"def add_mismatched_arrays(array1, array2, truncate=False):\n # Cast these arrays to the largest common type\n array1 = np.array(array1, dtype=np.promote_types(array1.dtype, array2.dtype))\n array2 = np.array(array2, dtype=np.promote_types(array1.dtype, array2.dtype))\n\n # TODO: find a more elegant way to do this whole function\n\n if truncate:\n if len(array1) < len(array2):\n result = array1.copy()\n result += array2[:len(array1)]\n else:\n result = array2.copy()\n result += array1[:len(array2)]\n else:\n if len(array1) < len(array2):\n result = array2.copy()\n result[:len(array1)] += array1\n else:\n result = array1.copy()\n result[:len(array2)] += array2\n\n return result",
"def assertArrayEquals(testcase, arr1, arr2):\n from itertools import zip_longest\n import numpy as np\n testcase.assertTrue(\n all([\n np.array_equal(e, a)\n for e, a\n in zip_longest(arr1, arr2)\n ])\n )",
"def test_concatenate_quaternions():\n # Until ea9adc5, this combination of a list and a numpy array raised\n # a ValueError:\n q1 = [1, 0, 0, 0]\n q2 = np.array([0, 0, 0, 1])\n q12 = pr.concatenate_quaternions(q1, q2)\n assert_array_almost_equal(q12, np.array([0, 0, 0, 1]))\n\n random_state = np.random.RandomState(0)\n for _ in range(5):\n q1 = pr.quaternion_from_axis_angle(pr.random_axis_angle(random_state))\n q2 = pr.quaternion_from_axis_angle(pr.random_axis_angle(random_state))\n\n R1 = pr.matrix_from_quaternion(q1)\n R2 = pr.matrix_from_quaternion(q2)\n\n q12 = pr.concatenate_quaternions(q1, q2)\n R12 = np.dot(R1, R2)\n q12R = pr.quaternion_from_matrix(R12)\n\n pr.assert_quaternion_equal(q12, q12R)",
"def __array_append(self, in_a,in_b):\n in_b = np.array([in_b]) if isinstance(in_b,(int,float,long,complex)) else in_b\n return np.concatenate((in_a,in_b))",
"def testConcatSourceMultipleButOneConcatable(self):\n env = self.env\n\n # Even if multiple input files, if only one is concat-able, won't concat.\n cs = env.ConcatSource('foo3.cc', ['a.cc', 'd.o'])\n self.assertEqual(map(str, cs), ['d.o', 'a.cc'])",
"def test_op_add_offload_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a + offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def test_op_add_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_r = offl_a + o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def test_concat(self):\n\n test_cases = [\n Case(\n description=\"lists of strings\",\n val=[\"a\", \"b\"],\n args=[[\"c\", \"d\"]],\n kwargs={},\n expect=[\"a\", \"b\", \"c\", \"d\"],\n ),\n Case(\n description=\"missing argument\",\n val=[\"a\", \"b\"],\n args=[],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"too many arguments\",\n val=[\"a\", \"b\"],\n args=[[\"c\", \"d\"], \"\"],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"arguments not a list\",\n val=[\"a\", \"b\"],\n args=[5],\n kwargs={},\n expect=FilterArgumentError,\n ),\n Case(\n description=\"not an array\",\n val=\"a, b\",\n args=[[\"c\", \"d\"]],\n kwargs={},\n expect=FilterValueError,\n ),\n Case(\n description=\"array contains non string\",\n val=[\"a\", \"b\", 5],\n args=[[\"c\", \"d\"]],\n kwargs={},\n expect=[\"a\", \"b\", 5, \"c\", \"d\"],\n ),\n Case(\n description=\"undefined left value\",\n val=self.env.undefined(\"test\"),\n args=[[\"c\", \"d\"]],\n kwargs={},\n expect=[\"c\", \"d\"],\n ),\n Case(\n description=\"undefined argument\",\n val=[\"a\", \"b\"],\n args=[self.env.undefined(\"test\")],\n kwargs={},\n expect=FilterArgumentError,\n ),\n ]\n\n self._test(Concat, test_cases)",
"def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return None\n return [arr1[i] + arr2[i] for i in range(len(arr1))]",
"def test_op_add_offload_array_complex(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=complex)\n o = a + complex(1.0, -1.3)\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a + offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def hpat_arrays_append_overload(A, B):\n\n use_A_array = isinstance(A, (RangeIndexType, Int64IndexType))\n use_B_array = isinstance(B, (RangeIndexType, Int64IndexType))\n if isinstance(A, (types.Array, RangeIndexType, Int64IndexType)):\n if isinstance(B, (types.Array, RangeIndexType, Int64IndexType)):\n def _append_single_numeric_impl(A, B):\n _A = A.values if use_A_array == True else A # noqa\n _B = B.values if use_B_array == True else B # noqa\n return numpy.concatenate((_A, _B,))\n\n return _append_single_numeric_impl\n\n elif (isinstance(B, (types.UniTuple, types.List))\n and isinstance(B.dtype, (types.Array, RangeIndexType, Int64IndexType))):\n B_dtype_is_index = isinstance(B.dtype, (RangeIndexType, Int64IndexType))\n numba_common_dtype = find_common_dtype_from_numpy_dtypes([A.dtype, B.dtype.dtype], [])\n\n # TODO: refactor to use numpy.concatenate when Numba supports building a tuple at runtime\n def _append_list_numeric_impl(A, B):\n\n total_length = len(A) + numpy.array([len(arr) for arr in B]).sum()\n new_data = numpy.empty(total_length, numba_common_dtype)\n\n stop = len(A)\n _A = numpy.array(A) if use_A_array == True else A # noqa\n new_data[:stop] = _A\n for arr in B:\n _arr = arr.values if B_dtype_is_index == True else arr # noqa\n start = stop\n stop = start + len(_arr)\n new_data[start:stop] = _arr\n return new_data\n\n return _append_list_numeric_impl\n\n elif A == string_array_type:\n if B == string_array_type:\n def _append_single_string_array_impl(A, B):\n total_size = len(A) + len(B)\n total_chars = num_total_chars(A) + num_total_chars(B)\n new_data = sdc.str_arr_ext.pre_alloc_string_array(total_size, total_chars)\n\n pos = 0\n pos += append_string_array_to(new_data, pos, A)\n pos += append_string_array_to(new_data, pos, B)\n\n return new_data\n\n return _append_single_string_array_impl\n elif (isinstance(B, (types.UniTuple, types.List)) and B.dtype == string_array_type):\n def _append_list_string_array_impl(A, B):\n array_list = [A] + list(B)\n total_size = numpy.array([len(arr) for arr in array_list]).sum()\n total_chars = numpy.array([num_total_chars(arr) for arr in array_list]).sum()\n\n new_data = sdc.str_arr_ext.pre_alloc_string_array(total_size, total_chars)\n\n pos = 0\n pos += append_string_array_to(new_data, pos, A)\n for arr in B:\n pos += append_string_array_to(new_data, pos, arr)\n\n return new_data\n\n return _append_list_string_array_impl",
"def _concat_arrays(arrays):\n # torch\n if isinstance(arrays[0], torch.Tensor):\n return torch.cat(arrays)\n\n # numpy\n if not isinstance(arrays[0], np.ndarray):\n arrays = np.asarray(arrays)\n\n return np.concatenate(arrays)",
"def add_arrays(arr1, arr2):\n if len(arr1) != len(arr2):\n return (None)\n newList = []\n for i in range(len(arr1)):\n newList.append(arr1[i] + arr2[i])\n return (newList)",
"def testRegisterConcatenation(self):\n reg_one = ShiftRegister(2)\n reg_one.shift(\"a\")\n reg_one.shift(\"b\")\n reg_two = ShiftRegister(3)\n reg_two.shift(\"c\")\n reg_two.shift(\"d\")\n reg_two.shift(\"e\")\n reg_cat = reg_one.concatenate(reg_two)\n self.assertEqual(''.join(reg_cat), \"abcde\")",
"def assert_content_equals_array(result, expected):\n assert isinstance(result, (pa.Array, pa.ChunkedArray))\n if isinstance(result, pa.ChunkedArray):\n result = pa.concat_arrays(result.iterchunks())\n assert result.equals(expected)",
"def cat_arrays(arr1, arr2):\n return [x for x in arr1+arr2]",
"def add_arrays(arr1, arr2):\n n = len(arr1)\n m = len(arr2)\n if n != m:\n return None\n return [arr1[i] + arr2[i] for i in range(n)]",
"def concatenate(arrays, **kwargs):\n unit = unit_of(arrays[0])\n result = np.concatenate([to_unitless(arr, unit) for arr in arrays], **kwargs)\n return result * unit",
"def test_concat_impl(self, value, expected_concat_value):\n # Need to convert np arrays to tensors first.\n value = tf.nest.map_structure(tf.constant, value)\n concat_value = concat._concat_impl(value)\n self.assertAllEqual(concat_value, expected_concat_value)",
"def solution(array1, array2):\n array1, array2 = np.array(array1), np.array(array2)\n return np.concatenate((array1, array2.flatten()))",
"def test_concatenate_errors(self):\n header = BDFHeader.from_path(TestData.bdf_2048)\n header2 = BDFHeader.from_path(TestData.bdf_256)\n with pytest.raises(ValueError):\n header.concatenate(header2)",
"def test_op_add_offload_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n o = a + 1.0\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a + o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_r = offl_a + offl_o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))",
"def concatenate_data():",
"def testConcatDisabled(self):\n env = self.env\n\n # If CONCAT_SOURCE_ENABLE is not set, files are passed through\n env['CONCAT_SOURCE_ENABLE'] = False\n cs = env.ConcatSource('foo4.cc', ['a.cc', 'b.cc', 'c.cc'])\n self.assertEqual(map(str, cs), ['a.cc', 'b.cc', 'c.cc'])"
] | [
"0.71296877",
"0.7055316",
"0.69996417",
"0.67035186",
"0.6662046",
"0.64107263",
"0.6404458",
"0.6371694",
"0.6330895",
"0.63107747",
"0.6218703",
"0.6131409",
"0.6072472",
"0.60144913",
"0.5992456",
"0.597871",
"0.5963048",
"0.5954356",
"0.59503627",
"0.5942833",
"0.59251606",
"0.59075296",
"0.588849",
"0.58516836",
"0.5808522",
"0.57904404",
"0.57831866",
"0.57751584",
"0.57558775",
"0.5745632"
] | 0.7229425 | 0 |
KNearest Neighbors classifier. Return the most frequent class among the k nearest points | def knn(p, k, x, t):
# Number of instances in data set
N = x.shape[0]
Euclidean_Distance = numpy.square(x - p) #Euclidean distance
dis = numpy.sum(Euclidean_Distance, axis=1) #sum of the euclidean distance
inds = numpy.argsort(dis)[:k] #sort the indices of the distance array
tgt_cat = Counter([t[i] for i in inds]) #count the times of equivalent target labels
top_class = max(tgt_cat, key= tgt_cat.get) #top class among the k nearest points
#top_class = 0
return top_class | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def knn_classify_point(point, data, k, distance_metric):\n k_closest_points = get_k_closest_points(point, data, k, distance_metric)\n classification_counts = {}\n for item in k_closest_points:\n classification_type = item.classification\n if classification_type not in classification_counts:\n classification_counts[classification_type] = 0\n else:\n classification_counts[classification_type] += 1\n classification_counts = sorted(classification_counts, key = classification_counts.get)\n return classification_counts[-1]",
"def knn_classify(k, labeled_points, new_point):\n by_distance = sorted(labeled_points,\n key=lambda point, _: la.distance(point, new_point))\n\n #find the labels for the k clsest\n k_nearest_labels = [label for _, label in by_distance[:k]]\n #and ket them vote\n return majority_vote(k_nearest_labels)",
"def k_nn(frame, newPoint, colClass, k): \n counts = []\n \n # find all distances wrt the newPoint\n dist = find_distances(frame, newPoint)\n\n # find the nearest k points, extract their labels and save them in a list\n labels = [label for distance,label in dist[:k]] \n \n # for each class label, count how many occurrencies have been found\n for label in frame[colClass].unique():\n # save the number of occurrencies in a list of tuples (number, label)\n counts.append((labels.count(label), label)) \n \n # sort the list in descending order, and use the first label of the tuples'\n # list to make the prediction \n counts.sort(reverse=True)\n prediction = counts[0][1] \n \n return prediction",
"def majority_voting(distances, labels, k):\n nearest_index = np.argsort(distances)\n k_neighbor_labels = []\n for i in range(k):\n index = nearest_index[i]\n label = labels[index]\n k_neighbor_labels.append(label)\n major_class = np.argmax(np.bincount(k_neighbor_labels))\n return major_class",
"def knn_classification(x_test, df_training, attrib_column, k):\n return majority_vote(k_nearest_neighbors(x_test, df_training,k),df,attrib_column)",
"def nearest_neighbors_classifier(data):\n clf = KNeighborsClassifier(3, 'distance')\n clf.name = \"KNN\"\n train_predict_and_results(data, clf)",
"def predict_knn(data, example, k):\n # Use distance to find most similar examples, sort\n distTuples = list()\n for d in data:\n distTuples.append((d, example.distance(d)))\n distTuples.sort(key=lambda x: x[1])\n\n # Find most common labels\n labels = list()\n for i in range(k):\n newLabel = True\n for j in range(len(labels)):\n if labels[j][0] == distTuples[i][0].label:\n labels[j] = (labels[j][0], labels[j][1] + 1)\n newLabel = False\n if newLabel:\n labels.append((distTuples[i][0].label, 1))\n return max(labels, key=lambda x: x[1])[0]",
"def knn(x, x_train, y_train, k=1):\n y_pred = np.zeros(len(x), dtype=np.int8)\n for i, sample in enumerate(x):\n # Calculate distance from this sample to every training sample\n dist = [np.linalg.norm(sample-train) for train in x_train]\n\n # Find the k nearest training samples\n k_nearest_labels = []\n for j in range(k):\n closest = np.argmin(dist)\n k_nearest_labels.append(y_train[closest])\n dist.pop(closest)\n\n # This sample's label the one the appears most frequently in\n # the k nearest, or the first nearest if all appear equally\n labels, counts = np.unique(k_nearest_labels, return_counts=True)\n y_pred[i] = labels[np.argmax(counts)]\n return y_pred",
"def KNN(x_train, x_test, y_train, k=3):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(x_train, y_train)\n y_pred = knn.predict(x_test)\n return y_pred",
"def knn(train_data, train_labels, test_data, test_labels, k):\n pred_labels = []\n for t in test_data:\n dist = calculate_distances(train_data, t)\n pred_class = majority_voting(dist, train_labels, k)\n pred_labels.append(pred_class)\n correct_pred_count = np.sum(pred_labels == test_labels)\n acc = correct_pred_count/len(test_labels)\n return acc",
"def find_best_k(X_train, y_train, X_test, y_test, min_k=1, max_k=25):\n best_k = 0\n best_score = 0.0\n for k in range(min_k, max_k+1, 2):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(X_train, y_train)\n preds = knn.predict(X_test)\n f1 = f1_score(y_test, preds)\n if f1 > best_score:\n best_k = k\n best_score = f1\n print(\"Best Value for k: {}\".format(best_k))\n print(\"F1-Score: {}\".format(best_score))",
"def knn(k, Xtrain, Ytrain, Xtest):\n d = euclidean_distances(Xtest, Xtrain, squared=True)\n nnc = Ytrain[np.argsort(d)[..., :k].flatten()].reshape(Xtest.shape[0], k)\n pred = [max(nnc[i], key=Counter(nnc[i]).get) for i in range(nnc.shape[0])]\n return np.array(pred)",
"def predictClass(training_data, test_row, k):\n\n neighbors = getNeighbors(training_data, test_row, k)\n output_vals = [row[-1] for row in neighbors]\n \n counts = dict()\n\n for i in output_vals:\n counts[i] = counts.get(i, 0) + 1\n\n v = [value for value in counts.values()]\n\n #Pick a class on random if ties occur\n prediction = choice([key for key in counts if counts[key] == max(v)])\n\n return prediction",
"def find_best_k(x_train, y_train, ks):\n params = {'n_neighbors': ks}\n knn = neighbors.KNeighborsRegressor()\n model = GridSearchCV(knn, params, cv=5)\n model.fit(x_train, y_train)\n best_k = model.best_params_\n return best_k",
"def _predict(self, x):\n # Compute the distance between x and each data point in X_train\n distances = [self._get_distance(x, x_train) for x_train in self.X_train]\n # Get the labels of the k nearest samples to x based on the distances\n k_nearest_indices = np.argsort(distances)[:self.k]\n k_nearest_labels = [self.y_train[idx] for idx in k_nearest_indices]\n # Determine the most common of the k nearest labels\n most_common_class = Counter(k_nearest_labels).most_common(1)[0][0]\n\n return most_common_class",
"def classify(k, sorted_labels):\n k_neighbors = sorted_labels[:k]\n men_occurencies = np.count_nonzero(k_neighbors == 'M')\n women_occurencies = np.count_nonzero(k_neighbors == 'W')\n\n return 'M' if men_occurencies > women_occurencies else 'W'",
"def K_Nearest_Neighbours_Model(train_features, train_labels, k_value=5, algorithm_auto=\"auto\"):\n # create an instance of the KNN SciKit learn class\n model = KNeighborsClassifier(n_neighbors=k_value, algorithm=algorithm_auto)\n # fit the model to the training data and labels\n model.fit(train_features, train_labels.values.ravel())\n # return the .fit() model\n return model",
"def getKNNClassifier():\n codebook = loadCodebook()\n \n args.nVisualWords = codebook.shape[0]\n \n # find nearest neighbor in the codebook\n knn = cv2.KNearest()\n # construct kd-tree with labels from 0 - (nCodewords-1)\n knn.train(codebook,np.arange(args.nVisualWords))\n \n return knn",
"def classify_with_knn(train_data, train_labels, test_data, test_labels, k=3, metric='minkowski'):\n from sklearn.neighbors import KNeighborsClassifier\n from sklearn.metrics import f1_score, roc_auc_score\n\n neigh = KNeighborsClassifier(n_neighbors=k, metric=metric)\n neigh.fit(train_data, train_labels)\n accuracy = neigh.score(test_data, test_labels)\n pred_labels = neigh.predict(test_data)\n F1 = f1_score(test_labels, pred_labels)\n AUC = roc_auc_score(test_labels, pred_labels)\n\n return accuracy, F1, AUC",
"def topk_accuracy(scores, labels, ks, selected_class=None):\n if selected_class is not None:\n idx = labels == selected_class\n scores = scores[idx]\n labels = labels[idx]\n rankings = scores.argsort()[:, ::-1]\n # trim to max k to avoid extra computation\n maxk = np.max(ks)\n\n # compute true positives in the top-maxk predictions\n tp = rankings[:, :maxk] == labels.reshape(-1, 1)\n\n # trim to selected ks and compute accuracies\n return [tp[:, :k].max(1).mean() for k in ks]",
"def knn_predict(p, points, outcomes, k):\n\tind = find_nearest_neighbors(p, points, k)\n\treturn majority_vote(outcomes[ind])",
"def classify(self, document, k, distance_type=\"sqeuclidean\"):\n if k == 0:\n raise ValueError(\"Must enter positive value for k parameter.\")\n \n # If only one neighbor, do more optimal calculation\n if k == 1:\n return self.__classify_nearest_neighbor(document, distance_type)\n \n # List of distance - class tuples\n nearest_neighbors = list()\n \n for index in self.vectors.shape[0]:\n vector = self.vectors[index, :].data.tolist()\n distance = self.distance(document, vector, distance_type)\n n = len(nearest_neighbors)\n \n if n < k:\n nearest_neighbors = sorted(nearest_neighbors.append((distance, self.classes[index])))\n else: \n for i in range(n):\n if distance < nearest_neighbors[i][0]:\n j = n - 1\n while j > i:\n nearest_neighbors[j] = nearest_neighbors[j - 1]\n j -= 1\n nearest_neighbors[i] = (distance, self.classes[index])\n break\n \n occurrences = dict()\n for neighbor in nearest_neighbors:\n if neighbor[1] not in occurrences.keys():\n occurrences[neighbor[1]] = 1\n else:\n occurrences[neighbor[1]] += 1\n \n class_count = [(ocurrence, class_) for class_, ocurrence in occurrences]\n return class_count[max(class_count)[1]]",
"def kNN_train(self, x_train, y_train, x_test, k = 5, processing = None, distMethod = \"Manhattan\"):\n y_test = list()\n\n if processing == \"Scalar\":\n # print(\"Preprocessing = Scalar\")\n stdScalar = preprocessing.StandardScaler().fit(x_train)\n x_train = stdScalar.transform(x_train)\n x_test = stdScalar.transform(x_test)\n\n elif processing == \"MinMax\":\n\n # print(\"Preprocessing = MinMax\")\n mmScalar = preprocessing.MinMaxScaler()\n x_train = mmScalar.fit_transform(x_train)\n x_test = mmScalar.fit_transform(x_test)\n\n elif processing == \"None\":\n self.true = True\n # print(\"No Preprocessing\")\n\n else:\n print(\"wrong processing\")\n exit()\n\n for i in range(0, len(x_test)):\n y_test_temp = list()\n zeroCount = 0\n oneCount = 0\n\n # find distance of a instance in test test to all instances in training set\n for j in range(0, len(x_train)):\n if distMethod == \"Manhattan\":\n y_test_temp.append(self.manhattan(x_train[j], x_test[i]))\n elif distMethod == \"Euclidean\":\n y_test_temp.append(self.euclidean(x_train[j], x_test[i]))\n else:\n print \"something wrong with distance calculation\"\n exit()\n\n # take indices of k nearest points\n # print y_test_temp\n temp = np.asarray(y_test_temp).argsort()[:k]\n # check class of each of k nearest points\n for tmp in temp:\n if y_train[tmp] == 0:\n zeroCount += 1\n elif y_train[tmp] == 1:\n oneCount += 1\n else:\n print(\"something wrong in counting\")\n\n # classify\n if zeroCount >= oneCount:\n y_test.append(int(0))\n elif oneCount > zeroCount:\n y_test.append(int(1))\n else:\n print(\"somethign wrong\")\n\n # print y_test\n return y_test",
"def predict_labels(self, distances, k=1):\n\n num_test = distances.shape[0]\n Y_pred = np.zeros((num_test,))\n\n \n for i in range(num_test):\n # extracting k-nearest-neighbors for each test-point\n kNN_idxs = np.argsort(distances[i,:])[0:k]\n \n # voting among the k-nearest-neighbors\n kNN_labels = {}\n # print(type(kNN_labels))\n\n for j in range(k):\n m_label = self.Y_train[kNN_idxs[j]]\n if m_label in kNN_labels.keys():\n # print(type(kNN_labels))\n kNN_labels[m_label] += 1 # increment count\n else:\n # print(m_label,'....', type(kNN_labels))\n kNN_labels[m_label] = 1 # initial count when the label occurs\n \n # counting the winning label\n\n winning_label = kNN_labels.keys()[0] # initialization\n \n for label in kNN_labels.keys():\n if kNN_labels[label] > kNN_labels[winning_label]:\n winning_label = label\n elif kNN_labels[label] == kNN_labels[winning_label]:\n # tie breaker\n if label < winning_label:\n winning_label = label\n \n\n Y_pred[i] = winning_label # storing winning label for each test-point\n \n return Y_pred",
"def classify(self, point=None, k=1, dist=None, prbout=0):\n if not point:\n return []\n\n neighbors = self.kdtree.search_knn(point, k, dist)\n prb = self.decision(neighbors)\n # print prb\n if prbout == 0:\n return prb[0][0]\n elif prbout == 1:\n return prb",
"def run_knn(\n features: List[List[float]],\n labels: List[Optional[bool]],\n k: int = 1,\n) -> List[bool]:\n # Filter out the features that are already clustered\n features_l, labels_l = zip(*[(f, l) for f, l in zip(features, labels) if isinstance(l, bool)])\n\n # Fit a nearest neighbour algorithm\n neighbours = KNeighborsClassifier(\n n_neighbors=k,\n ).fit(features_l, labels_l)\n\n # Predict all the features' labels\n return neighbours.predict(features) # type: ignore",
"def knn(trainingSetData, testSetData, k):\n trainingSet = trainingSetData.drop([14], axis=1) # drop income\n testSet = testSetData.drop([14], axis=1) # drop income\n\n distances = {}\n # this will store the distances re-sorted in ascending/descending order\n sort = {}\n # income band results (>=50k or <50K)\n incomePredictions = []\n\n # Calculating euclidean distance between each row of training data and test data instance\n for testInstance in range(len(testSet)): # len(testSet)\n \n # Store current test Point:\n testInstance = testSet.iloc[testInstance] \n \n distances = euclideanDistanceRow(testInstance, trainingSet)\n\n # sort the distances in order of smallest first:\n sorted_d = sorted(distances.items(), key=lambda x: x[1], reverse=False)\n\n neighbors = []\n\n # Extracting top k neighbors\n for x in range(k):\n neighbors.append(sorted_d[x])\n\n\n classVotes = {}\n\n # Calculating the most freq class in the neighbors\n results = {\"lessThan50\": 0, \"moreThan50\": 0}\n\n # creating a dataframe to which we will add the income values:\n\n for x in range(len(neighbors)):\n if (trainingSetData.iloc[neighbors[x][0]][14] == 0.0):\n results[\"lessThan50\"] += 1\n elif (trainingSetData.iloc[neighbors[x][0]][14] == 1.0):\n results[\"moreThan50\"] += 1\n\n print('results',results)\n\n if (results[\"lessThan50\"] > results[\"moreThan50\"]):\n incomePredictions.append(0.0)\n elif (results[\"lessThan50\"] < results[\"moreThan50\"]):\n incomePredictions.append(1.0)\n\n return incomePredictions",
"def get_neighbors(training_set, \r\n labels, \r\n test_instance, \r\n k, \r\n distance=distance):\r\n distances = []\r\n for index in range(len(training_set)):\r\n dist = distance(test_instance, training_set[index])\r\n distances.append((training_set[index], dist, labels[index]))\r\n distances.sort(key=lambda x: x[1])\r\n neighbors = distances[:k]\r\n return neighbors",
"def knn(k, train_data, train_labels, valid_data):\n dist = l2_distance(valid_data.T, train_data.T)\n nearest = np.argsort(dist, axis=1)[:, :k]\n\n train_labels = train_labels.reshape(-1)\n valid_labels = train_labels[nearest]\n\n # Note this only works for binary labels:\n valid_labels = (np.mean(valid_labels, axis=1) >= 0.5).astype(np.int)\n valid_labels = valid_labels.reshape(-1, 1)\n\n return valid_labels",
"def knnSame(k, Xtrain, Ytrain):\n d = euclidean_distances(Xtrain, squared=True)\n np.fill_diagonal(d, np.inf)\n nnc = Ytrain[np.argsort(d)[..., :k].flatten()].reshape(Xtrain.shape[0], k)\n pred = [max(nnc[i], key=Counter(nnc[i]).get) for i in range(nnc.shape[0])]\n return np.array(pred)"
] | [
"0.7780232",
"0.77610284",
"0.7689671",
"0.7591956",
"0.75637233",
"0.7306566",
"0.7278552",
"0.72072935",
"0.71550226",
"0.71461403",
"0.71255",
"0.71031046",
"0.70180625",
"0.69530344",
"0.6946558",
"0.6925374",
"0.6919172",
"0.6916706",
"0.6903651",
"0.6782697",
"0.6778249",
"0.67604357",
"0.6715812",
"0.67094284",
"0.66881603",
"0.66718286",
"0.66690904",
"0.6635434",
"0.66141915",
"0.65977496"
] | 0.83432674 | 0 |
Given data (observed x and labels t) and choice k of nearest neighbors, plots the decision boundary based on a grid of classifications over the feature space. | def plot_decision_boundary(k, x, t, granularity=100, figures_root='../figures', data_name=None):
print(f'KNN for K={k}')
# Initialize meshgrid to be used to store the class prediction values
# this is used for computing and plotting the decision boundary contour
pointsX = numpy.linspace(numpy.min(x[:, 0]) - 0.1, numpy.max(x[:, 0]) + 0.1, granularity)
pointsY = numpy.linspace(numpy.min(x[:, 1]) - 0.1, numpy.max(x[:, 1]) + 0.1, granularity)
Xv, Yv = numpy.meshgrid(pointsX, pointsY)
# Calculate KNN classification for every point in meshgrid
classes = numpy.zeros(shape=(Xv.shape[0], Xv.shape[1]))
for i in range(Xv.shape[0]):
for j in range(Xv.shape[1]):
c = knn(numpy.array([Xv[i][j], Yv[i][j]]), k, x, t)
# print('{0} {1} {2}'.format(i, j, c))
classes[i][j] = c
# plot the binary decision boundary contour
plt.figure()
plt.pcolormesh(Xv, Yv, classes, cmap=CMAP_LIGHT)
ti = f'KNN with K = {k}'
plt.title(ti)
plt.draw()
save_path = None
if data_name is not None:
save_path = os.path.join(figures_root, f'knn_{data_name}_k={k}')
# else:
# save_path = os.path.join(figures_root, f'knn_k={k}')
# plot the data (on top of the decision boundary color mesh)
plot_data(x, t, new_figure=False, save_path=save_path)
return classes | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def draw_knn_boundaries(knn, h=0.02): # h = Step size in the mesh\n ax = plt.gca()\n [xmin, xmax] = ax.get_xlim()\n [ymin, ymax] = ax.get_ylim()\n # Generate the axis associated to the first feature: \n x_axis = np.arange(xmin, xmax, h)\n # Generate the axis associated to the 2nd feature: \n y_axis = np.arange(ymin, ymax, h)\n # Generate a meshgrid (2D grid) from the 2 axis:\n x_grid, y_grid = np.meshgrid(x_axis, y_axis)\n # Vectorize the grids into column vectors:\n x_grid_vectorized = x_grid.flatten()\n x_grid_vectorized = np.expand_dims(x_grid_vectorized, axis=1)\n y_grid_vectorized = y_grid.flatten()\n y_grid_vectorized = np.expand_dims(y_grid_vectorized, axis=1)\n # Concatenate the vectorized grids\n grid = np.concatenate((x_grid_vectorized, y_grid_vectorized), axis=1)\n # Now you can use 'grid' as data to classify by the knn \n\n # Predict concatenated features to get the decision boundaries:\n decision_boundaries = ... #TODO!\n\n # Reshape the decision boundaries into a 2D matrix:\n decision_boundaries = decision_boundaries.reshape(x_grid.shape)\n plt.pcolormesh(x_grid, y_grid, decision_boundaries, cmap=cmap_light, zorder=1)\n return ax",
"def plot_boundary(X, y, resolution=100, n_neighbors=1):\n \n xmin, xmax, ymin, ymax = np.min(X[:,0]), np.max(X[:,0]), np.min(X[:,1]), np.max(X[:,1])\n \n xs, ys = np.linspace(xmin-0.1, xmax+0.1, num=resolution), np.linspace(ymin-0.1, ymax+0.1, num=resolution)\n xgrid, ygrid = np.meshgrid(xs, ys)\n \n \n clf = KNN(n_neighbors=n_neighbors)\n clf.fit(X, y)\n \n Xpred = np.stack((xgrid.flatten(), ygrid.flatten()), axis=1)\n ypred = clf.predict(Xpred)\n ypred = ypred.reshape((resolution, resolution))\n \n ind1 = np.where(ypred[:-1,:] != ypred[1:,:])\n ind2 = np.where(ypred[:,:-1] != ypred[:,1:])\n \n xret = np.concatenate((xgrid[ind1].flatten(), xgrid[ind2].flatten()))\n yret = np.concatenate((ygrid[ind1].flatten(), ygrid[ind2].flatten()))\n \n return xret, yret",
"def plot_decision_regions(X, y, classifier, resolution=0.02):\n #setup marker generator and color map\n markers = ('s', 'x', 'o', '^', 'v')\n colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')\n cmap = ListedColormap(colors[:len(np.unique(y))])\n\n #plot the decision surface\n #just find the limit and/reduce 1\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n #np.arange(start, stop, step): create list of tupple from start to stop with step of step\n #np.meshgrid convert: create accessible arrays from list of tupple\n #(-1,-2) (-1,0) (-1,1) xx1 = [-1 -1 -1][0 0 0 ][1 1 1]\n #(0,-2)(0,0)(0,1) ==> \n #(1,-2)(1,0)(1,1) xx2 = [-2 -2 -2][0 0 0 ][1 1 1]\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n\n #ravel() xx1 = [-1 -1 -1 0 0 0 1 1 1]\n # xx2 = [-2 -2 -2 0 0 0 1 1 1]\n #array() [[-1 -1 -1 0 0 0 1 1 1]\n # [-2 -2 -2 0 0 0 1 1 1]] concatenation... sort of\n #.T , transpose from in this case a 2x9 to 9x2\n\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha = 0.3, cmap=cmap)\n plt.xlim(xx1.min(), xx1.max())\n plt.ylim(xx2.min(), xx2.max())\n\n #plot class samples\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0],\n y=X[y == cl, 1],\n alpha=0.8,\n c=colors[idx],\n marker=markers[idx],\n label=cl,\n edgecolor='black')",
"def plot(self):\r\n \r\n\r\n print(\"Printing decision surfaces of decision trees\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n for _ in range (self.n_estimators):\r\n plt.subplot(2, 3, _ + 1)\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = self.clfs[_].predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface of a decision tree using paired features\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig1 = plt\r\n\r\n # Figure 2\r\n print(\"Printing decision surface by combining the individual estimators\")\r\n plot_colors = \"rb\"\r\n plot_step = 0.02\r\n n_classes = 2\r\n x_min, x_max = self.X.iloc[:, 0].min() - 1, self.X.iloc[:, 0].max() + 1\r\n y_min, y_max = self.X.iloc[:, 1].min() - 1, self.X.iloc[:, 1].max() + 1\r\n xx, yy = np.meshgrid(np.arange(x_min, x_max, plot_step),np.arange(y_min, y_max, plot_step))\r\n plt.tight_layout(h_pad=0.5, w_pad=0.5, pad=2.5)\r\n Z = config.Classifier_AB.predict(np.c_[xx.ravel(), yy.ravel()])\r\n Z = np.array(Z)\r\n Z = Z.reshape(xx.shape)\r\n cs = plt.contourf(xx, yy, Z, cmap=plt.cm.RdBu)\r\n for i, color in zip(range(n_classes), plot_colors):\r\n if i == 0:\r\n idx = np.where(self.y == -1)\r\n if i == 1:\r\n idx = np.where(self.y == 1)\r\n for i in range (len(idx[0])):\r\n plt.scatter(self.X.loc[idx[0][i]][0], self.X.loc[idx[0][i]][1],c=color,cmap=plt.cm.RdBu, edgecolor='black', s=15)\r\n plt.suptitle(\"Decision surface by combining individual estimators\")\r\n plt.legend(loc='lower right', borderpad=0, handletextpad=0)\r\n plt.axis(\"tight\")\r\n\r\n plt.show()\r\n fig2 = plt\r\n\r\n return [fig1,fig2]",
"def plot_decision_regions(self, option, canvas):\n\t\tle = preprocessing.LabelEncoder()\t\t# integer encoder\n\t\tle.fit(self.y)\n\t\tclassifier = self.classifier.fit(self.X, le.transform(self.y))\n\t\tclasses = classifier.classes_\n\t\tnum_classes = len(classes)\n\n\t\tif option == 'train':\n\t\t\tX = self.X\n\t\t\ty = self.y\n\t\telif option == 'test':\n\t\t\tX = self.test_X\n\t\t\ty = self.test_y\n\n\t\tb1 = self.X.iloc[:, 0]\n\t\tb2 = self.X.iloc[:, 1]\n\t\tb1_slack = (b1.max() - b1.min()) * 0.1\n\t\tb2_slack = (b2.max() - b2.min()) * 0.1\n\t\tb1_min, b1_max = b1.min() - b1_slack, b1.max() + b1_slack \t# x-axis range\n\t\tb2_min, b2_max = b2.min() - b2_slack, b2.max() + b2_slack\t# y-axis range\n\t\tstep_1 = (b1_max - b1_min) / 200\n\t\tstep_2 = (b2_max - b2_min) / 200\n\t\tmd1, md2 = np.meshgrid(np.arange(b1_min, b1_max, step_1), np.arange(b2_min, b2_max, step_2))\n\n\t\trcParams.update({'font.size': 7})\n\t\tcanvas.figure.clear()\n\t\tax = canvas.figure.subplots()\n\t\tlevels = np.arange(-0.19, 1, 0.2) + 0.2\n\n\t\tif num_classes == 2:\n\t\t\tcm_bkgd = plt.cm.RdBu\n\t\t\tcm_pts = ListedColormap(['#FF0000', '#0000FF'])\n\t\t\tZ = classifier.predict_proba(np.c_[md1.ravel(), md2.ravel()])[:, 1]\n\t\t\tZ = Z.reshape(md1.shape)\n\t\t\tax.contourf(md1, md2, Z, vmin=0, vmax=1, cmap=cm_bkgd, alpha=0.8)\n\n\t\telif num_classes == 3:\n\t\t\tcm_bkgd_1 = plt.cm.Reds\n\t\t\tcm_bkgd_2 = plt.cm.Greens\n\t\t\tcm_bkgd_3 = plt.cm.Blues\n\t\t\tcm_pts = cm_pts = ListedColormap(['#FF0000', '#00FF00', '#0000FF'])\n\t\t\tZ = classifier.predict_proba(np.c_[md1.ravel(), md2.ravel()])\n\t\t\tZ1 = Z[:, 0]\n\t\t\tZ2 = Z[:, 1]\n\t\t\tZ3 = Z[:, 2]\n\n\t\t\tP1 = np.maximum(0, Z1 - np.maximum(Z2, Z3))\n\t\t\tP2 = np.maximum(0, Z2 - np.maximum(Z1, Z3))\n\t\t\tP3 = np.maximum(0, Z3 - np.maximum(Z1, Z2))\n\t\t\tP1 = P1.reshape(md1.shape)\n\t\t\tP2 = P2.reshape(md1.shape)\n\t\t\tP3 = P3.reshape(md1.shape)\n\n\t\t\tax.contourf(md1, md2, P1, levels, cmap=cm_bkgd_1, alpha=0.8)\n\t\t\tax.contourf(md1, md2, P2, levels, cmap=cm_bkgd_2, alpha=0.8)\n\t\t\tax.contourf(md1, md2, P3, levels, cmap=cm_bkgd_3, alpha=0.8)\n\n\t\td1 = X.iloc[:, 0] \t# x-axis\n\t\td2 = X.iloc[:, 1]\t# y-axis\n\t\tax.scatter(d1, d2, c=le.transform(y), cmap=cm_pts, alpha=0.6, edgecolors='k')\n\t\tax.set_xlim(md1.min(), md1.max())\n\t\tax.set_ylim(md2.min(), md2.max())\n\t\tax.set_xticks(())\n\t\tax.set_yticks(())\n\t\tax.set_xlabel(X.columns[0])\n\t\tax.set_ylabel(X.columns[1])\n\n\t\tcanvas.figure.tight_layout()\n\t\tcanvas.draw()",
"def plot_tree_clarans(data, k):\n\n n = len(data)\n num_points = int(scipy.special.binom(n, k))\n num_neigh = k * (n - k)\n\n if (num_points > 50) or (num_neigh > 10):\n print(\n \"Either graph nodes are more than 50 or neighbors are more than 10, the graph would be too big\"\n )\n return\n\n # all possibile combinations of k elements from input data\n name_nodes = list(itertools.combinations(list(data.index), k))\n\n dot = graphviz.Digraph(comment=\"Clustering\")\n\n # draw nodes, also adding the configuration cost\n for i in range(num_points):\n tot_cost, meds = compute_cost_clarans(data, list(name_nodes[i]))\n tc = round(tot_cost, 3)\n\n dot.node(str(name_nodes[i]), str(name_nodes[i]) + \": \" + str(tc))\n\n # only connect nodes if they have k-1 common elements\n for i in range(num_points):\n for j in range(num_points):\n if i != j:\n if (\n len(set(list(name_nodes[i])) & set(list(name_nodes[j])))\n == k - 1\n ):\n dot.edge(str(name_nodes[i]), str(name_nodes[j]))\n\n graph = graphviz.Source(dot) # .view()\n display(graph)",
"def plot_decision_boundary(data, x, y, labels, model, **kwargs):\n xx, yy, Z = setup_contours(data=data, x=x, y=y, model=model)\n\n x0, x1 = data[x].values, data[y].values\n x0lim = x0.min(), x0.max()\n x1lim = x1.min(), x1.max()\n\n col = data[labels].values\n plt.figure(figsize=(10, 10))\n\n plt.scatter(x0, x1, c=col, **kwargs)\n CS = plt.contourf(xx, yy, Z, **kwargs)\n CS2 = plt.contour(CS, CS.levels[::2], **kwargs)\n cbar = plt.colorbar(CS, **kwargs)\n cbar.ax.set_ylabel('Fitted Probability')\n # Add the contour line levels to the colorbar\n cbar.add_lines(CS2)\n\n plt.xlim(x0lim)\n plt.ylim(x1lim)\n plt.xlabel(x)\n plt.ylabel(y)\n plt.legend()",
"def visclassifier(fun,xTr,yTr):\n\n yTr = np.array(yTr).flatten()\n \n symbols = [\"ko\",\"kx\"]\n marker_symbols = ['o', 'x']\n mycolors = [[0.5, 0.5, 1], [1, 0.5, 0.5]]\n classvals = np.unique(yTr)\n\n plt.figure()\n\n res=300\n xrange = np.linspace(min(xTr[:, 0]), max(xTr[:, 0]),res)\n yrange = np.linspace(min(xTr[:, 1]), max(xTr[:, 1]),res)\n pixelX = repmat(xrange, res, 1)\n pixelY = repmat(yrange, res, 1).T\n\n xTe = np.array([pixelX.flatten(), pixelY.flatten()]).T\n\n testpreds = fun(xTe)\n Z = testpreds.reshape(res, res)\n # Z[0,0] = 1 # optional: scale the colors correctly\n plt.contourf(pixelX, pixelY, np.sign(Z), colors=mycolors)\n\n for idx, c in enumerate(classvals):\n plt.scatter(xTr[yTr == c,0],\n xTr[yTr == c,1],\n marker=marker_symbols[idx],\n color='k'\n )\n\n plt.axis('tight')\n plt.show()",
"def plot_decision_regions(X, y, classifier, resolution=.02, test_idx=None):\n # setup marker generator & color map\n plt.figure()\n markers = ('x', 'o')\n colors = ('red', 'blue')\n\n # calculate and plot the decision surface\n x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1\n x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1\n xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),\n np.arange(x2_min, x2_max, resolution))\n Z = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)\n Z = Z.reshape(xx1.shape)\n plt.contourf(xx1, xx2, Z, alpha=.35, cmap=ListedColormap(colors=colors[:len(np.unique(y))]))\n plt.xlim(xx1.min(), xx2.max())\n plt.ylim(xx2.min(), xx2.max())\n\n # scatter plot all values of the data sets\n for idx, cl in enumerate(np.unique(y)):\n plt.scatter(x=X[y == cl, 0],\n y=X[y == cl, 1],\n c=colors[idx],\n marker=markers[idx],\n label=cl,\n edgecolors='black')\n if test_idx:\n # circle test data\n X_test, y_test = X[test_idx, :], y[test_idx]\n plt.scatter(X_test[:, 0],\n X_test[:, 1],\n c='',\n edgecolors='black',\n alpha=1.0,\n linewidths=1,\n marker='o',\n s=100,\n label='test set')",
"def nearest_neighbors_classifier(data):\n clf = KNeighborsClassifier(3, 'distance')\n clf.name = \"KNN\"\n train_predict_and_results(data, clf)",
"def plot_decision_boundary(pred_func):\n # Set min and max values\n x_min, x_max = X[:, 0].min() - .5, X[:, 0].max() + .5\n y_min, y_max = X[:, 1].min() - .5, X[:, 1].max() + .5\n h = 0.01\n # Generate a grid of points\n xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n # Predict the function value for the whole gid\n Z = pred_func(np.c_[xx.ravel(), yy.ravel()])\n Z = Z.reshape(xx.shape)\n # Plot the contour and training examples\n plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)\n plt.show()",
"def plot_2D_boundary(plot_range, points, decisionfcn, labels, values=[0]):\n\n clist = ['b', 'r', 'g', 'k', 'm', 'y'] # colors for the classes\n\n # evaluate on a grid and plot contour of decision function\n x = np.arange(plot_range[0], plot_range[1], .1)\n y = np.arange(plot_range[2], plot_range[3], .1)\n xx, yy = np.meshgrid(x, y)\n xxx, yyy = xx.flatten(), yy.flatten() # lists of x,y in grid\n zz = np.array(decisionfcn(xxx, yyy))\n zz = zz.reshape(xx.shape)\n\n # plot contour(s) at values\n plt.contour(xx, yy, zz, values)\n\n # for each class, plot the points with ’*’ for correct, ’o’ for incorrect\n for i in range(len(points)):\n d = decisionfcn(points[i][:, 0], points[i][:, 1])\n correct_ndx = labels[i] == d\n incorrect_ndx = labels[i] != d\n plt.plot(\n points[i][correct_ndx, 0],\n points[i][correct_ndx, 1],\n '*',\n color=clist[i])\n plt.plot(\n points[i][incorrect_ndx, 0],\n points[i][incorrect_ndx, 1],\n 'o',\n color=clist[i])\n plt.axis('equal')\n plt.show()",
"def optimal_neighbors(X_data,\n y_data,\n standardize = True,\n pct_test=0.25,\n seed=802,\n response_type='reg',\n max_neighbors=20,\n show_viz=True): \n \n \n if standardize == True:\n # optionally standardizing X_data\n scaler = StandardScaler()\n scaler.fit(X_data)\n X_scaled = scaler.transform(X_data)\n X_scaled_df = pd.DataFrame(X_scaled)\n X_data = X_scaled_df\n\n\n\n # train-test split\n X_train, X_test, y_train, y_test = train_test_split(X_data,\n y_data,\n test_size = pct_test,\n random_state = seed)\n\n\n # creating lists for training set accuracy and test set accuracy\n training_accuracy = []\n test_accuracy = []\n \n \n # setting neighbor range\n neighbors_settings = range(1, max_neighbors + 1)\n\n\n for n_neighbors in neighbors_settings:\n # building the model based on response variable type\n if response_type == 'reg':\n clf = KNeighborsRegressor(n_neighbors = n_neighbors)\n clf.fit(X_train, y_train)\n \n elif response_type == 'class':\n clf = KNeighborsClassifier(n_neighbors = n_neighbors)\n clf.fit(X_train, y_train) \n \n else:\n print(\"Error: response_type must be 'reg' or 'class'\")\n \n \n # recording the training set accuracy\n training_accuracy.append(clf.score(X_train, y_train))\n \n # recording the generalization accuracy\n test_accuracy.append(clf.score(X_test, y_test))\n\n\n # optionally displaying visualization\n if show_viz == True:\n # plotting the visualization\n fig, ax = plt.subplots(figsize=(12,8))\n plt.plot(neighbors_settings, training_accuracy, label = \"training accuracy\")\n plt.plot(neighbors_settings, test_accuracy, label = \"test accuracy\")\n plt.ylabel(\"Accuracy\")\n plt.xlabel(\"n_neighbors\")\n plt.legend()\n plt.show()\n \n \n # returning optimal number of neighbors\n print(f\"The optimal number of neighbors is: {test_accuracy.index(max(test_accuracy))+1}\")\n return test_accuracy.index(max(test_accuracy))+1",
"def plot_decision_boundary(resolution=100, colors=('b', 'k', 'r'), levels=(-1, 0, 1)):\n\n # Generate coordinate grid of shape [resolution x resolution]\n # and evaluate the model over the entire space\n xrange = np.linspace(x_train[:,0].min(), x_train[:,0].max(), resolution)\n yrange = np.linspace(x_train[:,1].min(), x_train[:,1].max(), resolution)\n grid = [[decision_function(alpha, y_train,\n Kernel1, x_train,\n np.array([xr, yr]), b) for xr in xrange] for yr in yrange]\n grid = np.array(grid).reshape(len(xrange), len(yrange))\n\n # Plot decision contours using grid and\n # make a scatter plot of training data\n ax.contour(xrange, yrange, grid, levels=levels, linewidths=(1, 1, 1),\n linestyles=('--', '-', '--'), colors=colors)\n ax.scatter(x_train[:,0], x_train[:,1],\n c=y_train, cmap=plt.cm.viridis, lw=0, alpha=0.25)\n\n # Plot support vectors (non-zero alphas)\n # as circled points (linewidth > 0)\n mask = np.round(alpha, decimals=2) != 0.0\n ax.scatter(x_train[mask,0], x_train[mask,1],\n c=y_train[mask], cmap=plt.cm.viridis, lw=1, edgecolors='k')\n\n return grid, ax",
"def plot_decision_boundary(model, X, y):\r\n \r\n x1_array, x2_array = np.meshgrid(np.arange(-4, 4, 0.01), np.arange(-4, 4, 0.01))\r\n grid_coordinates = np.c_[x1_array.ravel(), x2_array.ravel()]\r\n Z = model.predict(grid_coordinates)\r\n Z = Z.reshape(x1_array.shape)\r\n plt.contourf(x1_array, x2_array, Z, cmap=plt.cm.bwr)\r\n plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.bwr)\r\n plt.show()",
"def fit(self, data, labels, labels_pred):\n self.n_samples, dim = data.shape\n self.labels_unique = np.unique(labels)\n self.n_classes = len(self.labels_unique)\n if self.n_neighbors is None:\n # Set number of nearest neighbors based on the maximum number of samples per class and the neighborhood\n # constant\n num = 0\n for c in self.labels_unique:\n ind = np.where(labels == c)[0]\n if ind.shape[0] > num:\n num = ind.shape[0]\n\n self.n_neighbors = int(np.ceil(num ** self.neighborhood_constant))\n\n logger.info(\"Number of samples: {:d}. Data dimension = {:d}.\".format(self.n_samples, dim))\n logger.info(\"Number of classes: {:d}.\".format(self.n_classes))\n logger.info(\"Number of neighbors (k): {:d}.\".format(self.n_neighbors))\n logger.info(\"Fraction of outliers (alpha): {:.4f}.\".format(self.alpha))\n if self.model_dim_reduction:\n data = transform_data_from_model(data, self.model_dim_reduction)\n dim = data.shape[1]\n logger.info(\"Applying dimension reduction to the data. Projected dimension = {:d}.\".format(dim))\n\n # Distance from each sample in `data` to the `1 - alpha` level sets corresponding to each class\n distance_level_sets = np.zeros((self.n_samples, self.n_classes))\n self.index_knn = dict()\n self.epsilon = dict()\n indices_sub = dict()\n for j, c in enumerate(self.labels_unique):\n logger.info(\"Processing data from class '{}':\".format(c))\n logger.info(\"Building a KNN index for all the samples from class '{}'.\".format(c))\n indices_sub[c] = np.where(labels == c)[0]\n data_sub = data[indices_sub[c], :]\n self.index_knn[c] = KNNIndex(\n data_sub, n_neighbors=self.n_neighbors,\n metric=self.metric, metric_kwargs=self.metric_kwargs,\n approx_nearest_neighbors=self.approx_nearest_neighbors,\n n_jobs=self.n_jobs,\n low_memory=self.low_memory,\n seed_rng=self.seed_rng\n )\n # Distances to the k nearest neighbors of each sample\n _, nn_distances = self.index_knn[c].query_self(k=self.n_neighbors)\n # Radius or distance to the k-th nearest neighbor for each sample\n radius_arr = nn_distances[:, self.n_neighbors - 1]\n\n # Smallest radius `epsilon` such that only `alpha` fraction of the samples from class `c` have radius\n # greater than `epsilon`\n if self.alpha > 0.:\n self.epsilon[c] = np.percentile(radius_arr, 100 * (1 - self.alpha), interpolation='midpoint')\n\n # Exclude the outliers and build a KNN index with the remaining samples\n mask_incl = radius_arr <= self.epsilon[c]\n mask_excl = np.logical_not(mask_incl)\n num_excl = mask_excl[mask_excl].shape[0]\n else:\n # Slightly larger value than the largest radius\n self.epsilon[c] = 1.0001 * np.max(radius_arr)\n\n # All samples are included in the density level set\n mask_incl = np.ones(indices_sub[c].shape[0], dtype=np.bool)\n mask_excl = np.logical_not(mask_incl)\n num_excl = 0\n\n if num_excl:\n logger.info(\"Excluding {:d} samples with radius larger than {:.6f} and building a KNN index with \"\n \"the remaining samples.\".format(num_excl, self.epsilon[c]))\n self.index_knn[c] = KNNIndex(\n data_sub[mask_incl, :], n_neighbors=self.n_neighbors,\n metric=self.metric, metric_kwargs=self.metric_kwargs,\n approx_nearest_neighbors=self.approx_nearest_neighbors,\n n_jobs=self.n_jobs,\n low_memory=self.low_memory,\n seed_rng=self.seed_rng\n )\n # Distance to the nearest neighbor of each sample that is part of the KNN index\n _, dist_temp = self.index_knn[c].query_self(k=1)\n ind = indices_sub[c][mask_incl]\n distance_level_sets[ind, j] = dist_temp[:, 0]\n\n # Distance to the nearest neighbor of each sample that is not a part of the KNN index (outliers)\n _, dist_temp = self.index_knn[c].query(data_sub[mask_excl, :], k=1)\n ind = indices_sub[c][mask_excl]\n distance_level_sets[ind, j] = dist_temp[:, 0]\n else:\n # No need to rebuild the KNN index because no samples are excluded.\n # Distance to the nearest neighbor of each sample\n distance_level_sets[indices_sub[c], j] = nn_distances[:, 0]\n\n logger.info(\"Calculating the trust score for the estimation data.\")\n for c in self.labels_unique:\n # Compute the distance from each sample from class `c` to the level sets from the remaining classes\n data_sub = data[indices_sub[c], :]\n for j, c_hat in enumerate(self.labels_unique):\n if c_hat == c:\n continue\n\n _, dist_temp = self.index_knn[c_hat].query(data_sub, k=1)\n distance_level_sets[indices_sub[c], j] = dist_temp[:, 0]\n\n self.scores_estim = self._score_helper(distance_level_sets, labels_pred)\n return self",
"def find_knn_hyperparams():\n n_neighbors = np.arange(5, 10)\n ps = np.arange(1, 10)\n results = []\n\n for p in ps:\n result = []\n for _ in range(10):\n data = FaceDataset(\"embeddings/known\", n=50)\n train_data, train_labels = data.train()\n test_data, test_labels = data.test()\n accs = []\n for n in n_neighbors:\n clf = KNeighborsClassifier(n_neighbors=n, weights=\"distance\", p=p)\n clf, _ = train(clf, train_data, train_labels)\n acc, _ = test(clf, test_data, test_labels)\n accs.append(acc)\n result.append(accs)\n result = np.mean(result, axis=0)\n results.append(result)\n\n plots = []\n for i in range(len(ps)):\n p = plotly.graph_objs.Scatter(x=n_neighbors, y=results[i], name=\"p={}\".format(ps[i]))\n plots.append(p)\n\n plotly.offline.plot(plots, filename=\"knn.html\")\n print(\"C={}\".format(n_neighbors[np.argmax(results)]))",
"def show_nn(X):\n neigh = NearestNeighbors(n_neighbors=2)\n nbrs = neigh.fit(X)\n distances, indices = nbrs.kneighbors(X)\n distances = np.sort(distances, axis=0)\n distances = distances[:,1]\n plt.plot(distances)",
"def plot_decision_boundary(model: torch.nn.Module, X: torch.Tensor, y: torch.Tensor):\n # Put everything to CPU (works better with NumPy + Matplotlib)\n model.to(\"cpu\")\n X, y = X.to(\"cpu\"), y.to(\"cpu\")\n\n # Setup prediction boundaries and grid\n x_min, x_max = X[:, 0].min() - 0.1, X[:, 0].max() + 0.1\n y_min, y_max = X[:, 1].min() - 0.1, X[:, 1].max() + 0.1\n xx, yy = np.meshgrid(np.linspace(x_min, x_max, 101), np.linspace(y_min, y_max, 101))\n\n # Make features\n X_to_pred_on = torch.from_numpy(np.column_stack((xx.ravel(), yy.ravel()))).float()\n\n # Make predictions\n model.eval()\n with torch.inference_mode():\n y_logits = model(X_to_pred_on)\n\n # Test for multi-class or binary and adjust logits to prediction labels\n if len(torch.unique(y)) > 2:\n y_pred = torch.softmax(y_logits, dim=1).argmax(dim=1) # mutli-class\n else:\n y_pred = torch.round(torch.sigmoid(y_logits)) # binary\n\n # Reshape preds and plot\n y_pred = y_pred.reshape(xx.shape).detach().numpy()\n plt.contourf(xx, yy, y_pred, cmap=plt.cm.RdYlBu, alpha=0.7)\n plt.scatter(X[:, 0], X[:, 1], c=y, s=40, cmap=plt.cm.RdYlBu)\n plt.xlim(xx.min(), xx.max())\n plt.ylim(yy.min(), yy.max())",
"def decisionBoundary(root, figure, fileName):\n stepValue = 0.001\n classClassification = [1, 2, 3, 4]\n colorClassification = ['b', 'g', 'r', 'm']\n markerClassification = ['x', '+', '*', 'o']\n classesList = [\"Bolts\", \"Nuts\", \"Rings\", \"Scraps\"]\n decisionPlot = figure.add_subplot(111)\n attributeValues, classes, _ = readData(fileName)\n attributeValues = np.array(attributeValues)\n classes = np.array(classes)\n \n \n\n attribute1, attribute2 = np.meshgrid(np.arange(0, 1, stepValue), np.arange(0, 1, stepValue))\n\n predicted_class = []\n for i in range(attribute1.shape[0]):\n predicted_class.append([])\n for j in range(attribute1.shape[1]):\n result = [attribute1[i][j], attribute2[i][j]]\n predicted_value = classify(np.array(result), root)\n predicted_class[i].append(predicted_value)\n\n decisionPlot.contourf(attribute1, attribute2, np.array(predicted_class))\n\n for a in classClassification:\n attribute1=[]\n attribute2=[]\n \n for j in range(len(attributeValues[:])):\n \n if classes[j]==a:\n attribute1 +=[attributeValues[j][0]]\n for k in range(len(attributeValues[:])):\n if classes[k]==a:\n attribute2 +=[attributeValues[k][1]]\n \n \n decisionPlot.scatter(attribute1, attribute2, color=colorClassification[a - 1], marker=markerClassification[a - 1]\n , label=classesList[a - 1], s=100)\n\n decisionPlot.legend(loc='upper right')\n decisionPlot.set_xlabel(\"Six fold Rotational Symmetry\")\n decisionPlot.set_ylabel(\"Eccentricity\")\n decisionPlot.set_title(\"Decision boundary\")\n return decisionPlot",
"def knn(p, k, x, t):\r\n\r\n # Number of instances in data set\r\n N = x.shape[0]\r\n\r\n Euclidean_Distance = numpy.square(x - p) #Euclidean distance\r\n dis = numpy.sum(Euclidean_Distance, axis=1) #sum of the euclidean distance\r\n inds = numpy.argsort(dis)[:k] #sort the indices of the distance array\r\n tgt_cat = Counter([t[i] for i in inds]) #count the times of equivalent target labels\r\n top_class = max(tgt_cat, key= tgt_cat.get) #top class among the k nearest points\r\n\r\n\r\n #top_class = 0\r\n\r\n return top_class",
"def knn(trainingSetData, testSetData, k):\n trainingSet = trainingSetData.drop([14], axis=1) # drop income\n testSet = testSetData.drop([14], axis=1) # drop income\n\n distances = {}\n # this will store the distances re-sorted in ascending/descending order\n sort = {}\n # income band results (>=50k or <50K)\n incomePredictions = []\n\n # Calculating euclidean distance between each row of training data and test data instance\n for testInstance in range(len(testSet)): # len(testSet)\n \n # Store current test Point:\n testInstance = testSet.iloc[testInstance] \n \n distances = euclideanDistanceRow(testInstance, trainingSet)\n\n # sort the distances in order of smallest first:\n sorted_d = sorted(distances.items(), key=lambda x: x[1], reverse=False)\n\n neighbors = []\n\n # Extracting top k neighbors\n for x in range(k):\n neighbors.append(sorted_d[x])\n\n\n classVotes = {}\n\n # Calculating the most freq class in the neighbors\n results = {\"lessThan50\": 0, \"moreThan50\": 0}\n\n # creating a dataframe to which we will add the income values:\n\n for x in range(len(neighbors)):\n if (trainingSetData.iloc[neighbors[x][0]][14] == 0.0):\n results[\"lessThan50\"] += 1\n elif (trainingSetData.iloc[neighbors[x][0]][14] == 1.0):\n results[\"moreThan50\"] += 1\n\n print('results',results)\n\n if (results[\"lessThan50\"] > results[\"moreThan50\"]):\n incomePredictions.append(0.0)\n elif (results[\"lessThan50\"] < results[\"moreThan50\"]):\n incomePredictions.append(1.0)\n\n return incomePredictions",
"def make_prediction_grid(predictors, outcomes, limits, h, k):\n\t(x_min, x_max, y_min, y_max) = limits\n\txs = np.arange(x_min, x_max, h)\n\tys = np.arange(y_min, y_max, h)\n\txx, yy = np.meshgrid(xs, ys)\n\n\tprediction_grid = np.zeros(xx.shape, dtype = int)\n\tfor i,x in enumerate(xs):\n\t\tfor j, y in enumerate(ys):\n\t\t\tp = np.array([x, y])\n\t\t\tprediction_grid[j,i] = knn_predict(p, predictors, outcomes, k)\n\n\treturn(xx, yy, prediction_grid)",
"def visualize(self, reduced_data):\n\t\t# Step size of the mesh. Decrease to increase the quality of the VQ.\n\t\th = .02 # point in the mesh [x_min, m_max]x[y_min, y_max].\n\t\t\n\t\t# Plot the decision boundary. For that, we will assign a color to each\n\t\tx_min, x_max = reduced_data[:, 0].min() + 1, reduced_data[:, 0].max() - 1\n\t\ty_min, y_max = reduced_data[:, 1].min() + 1, reduced_data[:, 1].max() - 1\n\t\txx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))\n\n\t\t# Obtain labels for each point in mesh. Use last trained model.\n\t\tZ = self.estimator.predict(np.c_[xx.ravel(), yy.ravel()])\n\n\t\t# Put the result into a color plot\n\t\tZ = Z.reshape(xx.shape)\n\t\t\n\t\tplt.figure(1)\n\t\tplt.clf()\n\t\tplt.imshow(Z, interpolation='nearest',\n\t\t extent=(xx.min(), xx.max(), yy.min(), yy.max()),\n\t\t cmap=plt.cm.Paired,\n\t\t aspect='auto', origin='lower')\n\n\t\tplt.plot(reduced_data[:, 0], reduced_data[:, 1], 'k.', markersize=4)\n\t\t# Plot the centroids as a white X\n\t\tcentroids = self.estimator.cluster_centers_\n\t\tplt.scatter(centroids[:, 0], centroids[:, 1],\n\t\t marker='x', s=169, linewidths=3,\n\t\t color='w', zorder=10)\n\t\tplt.title('K-means clustering with random data (PCA-reduced data)\\n'\n\t\t 'Centroids are marked with white cross')\n\t\tplt.xlim(x_min, x_max)\n\t\tplt.ylim(y_min, y_max)\n\t\tplt.xticks(())\n\t\tplt.yticks(())\n\t\tplt.show()",
"def decision_plot(self, X, y):\n\n y = self._slice_target_index(y=y)\n\n for index in range(_n_targets(y)):\n if sklearn.utils.multiclass.type_of_target(y) == 'continuous-multioutput':\n self.fit(X, y.iloc[:, index].values.ravel(order='K'))\n else:\n self.fit(X, y)\n explainer, shap_values = self.explainer(X=X)\n shap.decision_plot(base_value=explainer.expected_value, shap_values=shap_values,\n feature_names=list(X.columns), show=self.show)",
"def visualizePredictions(testData,knn_predictions):\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n testData.dataDict[testData.reference] = knn_predictions\r\n testData.visualize.scatterPlot('Petal length','Petal width')\r\n\r\n pass",
"def run_knn(\n features: List[List[float]],\n labels: List[Optional[bool]],\n k: int = 1,\n) -> List[bool]:\n # Filter out the features that are already clustered\n features_l, labels_l = zip(*[(f, l) for f, l in zip(features, labels) if isinstance(l, bool)])\n\n # Fit a nearest neighbour algorithm\n neighbours = KNeighborsClassifier(\n n_neighbors=k,\n ).fit(features_l, labels_l)\n\n # Predict all the features' labels\n return neighbours.predict(features) # type: ignore",
"def plotDistributionWithLimitsRefine(lXs, llYs, lKClassif,out=\"out.png\", title=\"title\", xax=\"xax\", yax=\"yax\",legend=\"\"):\n\n fig = plt.Figure(figsize=(40,20))\n fig.suptitle(title, fontsize=32)\n nbPlots = len(llYs)\n sqrt = int(math.ceil(math.sqrt(nbPlots)))\n ymax = 0.0\n for i,val in enumerate(llYs):\n if lKClassif[i] != \"refine\":\n ymax = max(max(val[0]),ymax)\n ymaxCurrent = max(max(val[2]),ymax)\n ymax = ymax*1.05\n xmax = 147\n gs = gridspec.GridSpec(1,2) \n ax = fig.add_subplot(gs[0])\n gsLimit = gridspec.GridSpecFromSubplotSpec(sqrt,sqrt, subplot_spec=gs[1])\n for i,val in enumerate(llYs):\n if lKClassif[i] != \"refine\":\n ax.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent = fig.add_subplot(gsLimit[i]) \n axCurrent.fill_between(lXs, val[1], val[2], alpha=0.35, edgecolor='black', facecolor=Graphics.lColors[i%25])\n axCurrent.set_title(\"Cluster K{}, (position: {})\".format(i,lKClassif[i]))\n axCurrent.fill_between(lXs, val[3], val[4], alpha=0.85, edgecolor='darkgray', facecolor='lightgray')\n axCurrent.plot(lXs,val[0],color=Graphics.lColors[i%25])\n axCurrent.set_ylim(0,ymaxCurrent)\n axCurrent.set_xlim(1,xmax)\n axCurrent.text(10, ymaxCurrent*0.90, \"#nucleosomes: {}\".format(legend[i]), fontsize=12)\n axis_font = {'size':'28'}\n ax.set_ylim(0,ymax)\n ax.set_xlim(1,xmax)\n ax.legend([\"K{}\".format(x) for x in range(0,nbPlots)])\n ax.set_title(\"all nucleosomes\", **axis_font)\n ax.set_xlabel(xax, **axis_font)\n ax.set_ylabel(yax, **axis_font)\n ax.tick_params(labelsize=20)\n canvas = FigureCanvasAgg(fig)\n canvas.print_figure(out, dpi=80)",
"def trainAndTune(self, trainingData, trainingLabels, validationData, validationLabels, kgrid):\n\n \"*** YOUR CODE HERE ***\"\n\t#create dictionary of all features for each label\n dict = {}\n for feature in self.features:\n\t\tfor label in self.legalLabels:\n\t\t\tdict[feature, label] = util.Counter()\n\t\t\tfor i in [0,1]: #values of a counter from datum\n\t\t\t\tdict[(feature, label)][i] = 0\n\t\t\t\t#print str(feature) + str(label) + ' ' + str(dict[(feature, label)])\n labelCount = util.Counter()\n for i in range(len(trainingData)):\n\t\t#increment occurrences of each label found in the training data\n\t\tlabel = trainingLabels[i]\n\t\tlabelCount[label] += 1\n\t\tfor feature in trainingData[i]:\n\t\t\t#increment dictionary value by 1 when a feature label combination with a value is found\n\t\t\tdict[(feature, label)][trainingData[i][feature]] += 1\n #normalize labelCount to get P(y) for each label y, or the prior probability \n self.prior = util.normalize(labelCount)\n\t\n bestk = 0\n bestcond = {}\n topguesses = 0\n\t#iterate through each k to find the best k\n for k in kgrid:\n\t\t#empty cond probs\n\t\tself.condprobs = {} \n\t\t#smooth data\n\t\tfor feature_label in dict:\n\t\t\ttmpcounter = dict[feature_label] \n\t\t\t#print feature_label\n\t\t\ttmpcounter.incrementAll(tmpcounter.keys(), k)\n\t\t\t#set condprobs to cond probs with current k value\n\t\t\tself.condprobs[feature_label] = util.normalize(tmpcounter)\n\t\tguesses = self.classify(validationData)\n\t\tguesscorrect = 0\n\t\t#print[guesses]\n\t\tfor i in range(len(guesses)):\n\t\t\tif guesses[i] == validationLabels[i]:\n\t\t\t\tguesscorrect += 1\n\t\tif guesscorrect > topguesses:\n\t\t\tprint \"Guess \",k ,\" is better than \",bestk\n\t\t\ttopguesses = guesscorrect\n\t\t\tbestcond = self.condprobs\n\t\t\tbestk = k\n self.condprobs = bestcond\n self.k = bestk",
"def KNN(x_train, x_test, y_train, k=3):\n knn = KNeighborsClassifier(n_neighbors=k)\n knn.fit(x_train, y_train)\n y_pred = knn.predict(x_test)\n return y_pred"
] | [
"0.7444855",
"0.6746422",
"0.66787404",
"0.64081985",
"0.6387924",
"0.63505673",
"0.62327814",
"0.62229425",
"0.61925626",
"0.6169651",
"0.61491215",
"0.61412793",
"0.6130474",
"0.61028457",
"0.60486627",
"0.6044063",
"0.6039184",
"0.60355693",
"0.599889",
"0.5996375",
"0.59319514",
"0.5906268",
"0.58779544",
"0.58564925",
"0.5840142",
"0.5834796",
"0.5776254",
"0.5763396",
"0.57293314",
"0.57292587"
] | 0.7342933 | 1 |
Get a nova client instance. | def get_nova(self, version='2.1'):
if self.nova is None:
self.nova = novaclient.Client(version, session=self.get_session())
return self.nova | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_nova_client(self):\n region_name = CONF.region_name\n session = self._get_keystone_session()\n return novaclient.client.Client(2, session=session, region_name=region_name)",
"def get_novaclient(self):\n # TODO: We ought to be able to derive this from the keystone client,\n # but it's proving trickier than I expected --isd\n return novaclient.Client(self.cluster_account.cluster_user_name,\n self.cluster_account.cluster_password,\n self.name,\n self.cluster_account.cluster.auth_url)",
"def _get_neutron_client(self):\n session = self._get_keystone_session()\n return neutronclient.v2_0.client.Client(session=session)",
"def gen_nova_client(self):\n\n print \"\\t* Generating nova client\"\n client = nClient.get_client_class('2')\n self.novaclient = client(self.username,\n self.password,\n self.tenant_name,\n self.auth_url,\n service_type='compute')",
"def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])",
"def get_keystone_client():\n username = os.environ.get('OS_USERNAME')\n password = os.environ.get('OS_PASSWORD')\n tenant = os.environ.get('OS_TENANT_NAME')\n url = os.environ.get('OS_AUTH_URL')\n assert username is not None\n assert password is not None\n assert tenant is not None\n assert url is not None\n cl = client.Client(username=username, password=password,\n tenant_name=tenant, auth_url=url)\n return cl",
"def Client(api_version, *args, **kwargs):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n api_version,\r\n API_VERSIONS,\r\n )\r\n return neutron_client(*args, **kwargs)",
"def _keystone_client(context, version=(3, 0)):\n auth_plugin = token.Token(\n auth_url=CONF.keystone_authtoken.auth_uri,\n token=context.auth_token,\n project_id=context.project_id)\n client_session = session.Session(auth=auth_plugin,\n verify=False if\n CONF.keystone_authtoken.insecure else\n (CONF.keystone_authtoken.cafile or True))\n return client.Client(auth_url=CONF.keystone_authtoken.auth_uri,\n session=client_session, version=version)",
"def nova(self, obj):\n\n if self._novaclient is not None:\n return self._novaclient\n params = self._build_conn_params(obj.user, obj.project)\n self._novaclient = driver_base.SenlinDriver().compute(params)\n return self._novaclient",
"def _get_glance_client(self):\n session = self._get_keystone_session()\n return glanceclient.client.Client(2, session=session)",
"def make_client(instance):\n network_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug('Instantiating network client: %s', network_client)\n\n endpoint = instance.get_endpoint_for_service_type(\n API_NAME,\n region_name=instance._region_name,\n )\n\n return network_client(\n username=instance._username,\n tenant_name=instance._project_name,\n password=instance._password,\n region_name=instance._region_name,\n auth_url=instance._auth_url,\n endpoint_url=endpoint,\n token=instance.auth.get_token(instance.session),\n insecure=instance._insecure,\n ca_cert=instance._cacert,\n )",
"def get_keystone(self, version='3'):\n if self.keystone is None:\n iface = os.getenv('OS_ENDPOINT_TYPE', \"public\")\n self.keystone = keystoneclient.Client(\n version=version,\n session=self.get_session(),\n interface=iface)\n return self.keystone",
"def client():\n\n client = Client()\n return client",
"def get_cinder(self, version='2'):\n if self.cinder is None:\n iface = os.getenv('OS_ENDPOINT_TYPE', \"public\")\n self.cinder = cinderclient.Client(version,\n session=self.get_session(),\n interface=iface)\n return self.cinder",
"def get_client(version, **kwargs):\n endpoint = kwargs.get('os_endpoint') or kwargs.get('ceilometer_url')\n\n return Client(version, endpoint, **kwargs)",
"def _get_client(self):\n _client = KOPS(provider=self.provider, config=self.config)\n return _client",
"def _get_client(self):\n if self._client is None:\n self._client = self.boto.client(service_name='elb', region_name=self.boto.cli_region)\n\n return self._client",
"def get_keystoneclient(self):\n try:\n if self.token is None:\n client = keystoneclient.Client(user_name=self.cluster_account.cluster_user_name,\n password=self.cluster_account.cluster_password,\n auth_url=self.cluster_account.cluster.auth_url,\n tenant_name=self.name,\n )\n self.token = json.dumps(client.auth_ref)\n else:\n client = keystoneclient.Client(auth_ref=json.loads(self.token))\n # keystoneclient authenticates lazily, i.e. It doensn't actually\n # authenticates until the first time it needs the token for\n # someting. We'd like to find out about failures now (in\n # particular, it's easier to clear a bad token here than somewhere\n # else in the code. authenticate() forces it to auth right now:\n client.authenticate()\n return client\n except AuthorizationFailure:\n # Clear the token if auth failed:\n self.token = None\n raise",
"def get_client(self):\n return self.client",
"def client(self):\n\n if self._client is None:\n self._client = self._get_client()\n return self._client",
"def make_client(self, context):\n return Client(self.settings['client_routing'], context=context)",
"def get_instance(instance):\n command = 'nova show %s' % instance\n return parse_output(Popen(command.split(), stdout=STDOUT,\n stderr=STDERR).communicate()[0])",
"def get_client():\n return Client(__address, authkey='strumamor')",
"def _get_client():\n\n return datastore.Client()",
"def _get_client():\n\n return datastore.Client()",
"def make_rest_client(\n service_key, options=None,\n app_name=None, app_version=None, version=None,\n **kwargs):\n cloud = get_config(\n service_key=service_key, options=options,\n app_name=app_name, app_version=app_version,\n **kwargs)\n return cloud.get_session_client(service_key, version=version)",
"def client():\n return Client(**common_data.AUTH_ARGS)",
"def get_client():\n\n return MongoClientManager().client",
"def get_client(self, name):\n return self.get_clients(as_dict=True).get(name)",
"def get_client():\n\n client = Elasticsearch(host=HOST, port=PORT, timeout=300)\n\n # wait for yellow status\n for _ in range(100):\n time.sleep(.1)\n try:\n # This errors because of decorators. Silence it.\n # pylint: disable=E1123\n client.cluster.health(wait_for_status='yellow')\n return client\n except ConnectionError:\n continue\n else:\n # timeout\n raise SkipTest(\"Elasticsearch failed to start.\")"
] | [
"0.8543932",
"0.7357674",
"0.73333573",
"0.72716653",
"0.7151151",
"0.6966708",
"0.69495815",
"0.6702617",
"0.667086",
"0.6567023",
"0.6537803",
"0.6491878",
"0.64906466",
"0.64767176",
"0.6461029",
"0.64514875",
"0.63861746",
"0.63556963",
"0.63503575",
"0.63300276",
"0.6299703",
"0.62650293",
"0.62271005",
"0.6176792",
"0.6176792",
"0.6146839",
"0.61323464",
"0.60742897",
"0.6069157",
"0.60650337"
] | 0.76938033 | 1 |
Get a glance client instance. | def get_glance(self, version='2'):
if self.glance is None:
self.glance = glanceclient(version, session=self.get_session())
return self.glance | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_glance_client(self):\n session = self._get_keystone_session()\n return glanceclient.client.Client(2, session=session)",
"def client(self):\n\n if self._client is None:\n self._client = self._get_client()\n return self._client",
"def client():\n\n client = Client()\n return client",
"def get_client(self):\n return self.client",
"def get_client():\n client = soundcloud.Client(client_id=CLIENT_ID)\n return client",
"def get_client():\n return Client(__address, authkey='strumamor')",
"def get_unibox_client(self):\n if self._gls_unibox_client is None:\n client = Client(\n self.gls_server,\n self.gls_port\n )\n client.test = self.gls_is_test\n self._gls_unibox_client = client\n\n return self._gls_unibox_client",
"def _get_client():\n\n return datastore.Client()",
"def _get_client():\n\n return datastore.Client()",
"def client(self):\n\t\t# pylint: disable=invalid-name\n\t\treturn self._client",
"def getClient(self):\n authDict = Configuration().read_auth_data_from_config()\n client_key = authDict['client_key']\n client_secret = authDict['client_secret']\n token = authDict['token']\n token_secret = authDict['token_secret']\n\n authorize_OAuth_ob = authorizeOAuth.AuthorizeOAuth(client_key,\n client_secret,\n token,\n token_secret,\n Configuration().read_board_id_config())\n\n trello_client_wrapper = authorize_OAuth_ob.getClient()\n self.set_list(trello_client_wrapper)\n return trello_client_wrapper",
"def client():\n return Client(**common_data.AUTH_ARGS)",
"def make_client(instance):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n instance._api_version[API_NAME],\r\n API_VERSIONS,\r\n )\r\n instance.initialize()\r\n url = instance._url\r\n url = url.rstrip(\"/\")\r\n if '2.0' == instance._api_version[API_NAME]:\r\n client = neutron_client(username=instance._username,\r\n tenant_name=instance._tenant_name,\r\n password=instance._password,\r\n region_name=instance._region_name,\r\n auth_url=instance._auth_url,\r\n endpoint_url=url,\r\n token=instance._token,\r\n auth_strategy=instance._auth_strategy,\r\n insecure=instance._insecure,\r\n ca_cert=instance._ca_cert)\r\n return client\r\n else:\r\n raise exceptions.UnsupportedVersion(_(\"API version %s is not \"\r\n \"supported\") %\r\n instance._api_version[API_NAME])",
"def get_client():\n client_class = _import_by_path(settings.REDISIO_CLIENT_CLASS)\n return client_class(host=settings.REDISIO_HOST,\n port=settings.REDISIO_PORT,\n db=settings.REDISIO_DB)",
"def get_client():\n return storage.Client(project=project_id)",
"def Client(self):\n return self._client",
"def _get_client(self):\n credentials = service_account.Credentials.from_service_account_info(self.service_account_info)\n client = googleapiclient.discovery.build('container', 'v1', credentials=credentials)\n\n return client",
"def client():\n return IceCubedSyncClient(\"api_key\", \"secret\")",
"def _get_client_impl(self):\n api_version = self._get_api_version(None)\n if api_version not in self._client_impls:\n self._create_client_impl(api_version)\n return self._client_impls[api_version]",
"def client(self):\r\n if self._client is None:\r\n self._client = self._client_cls(self._server, self._params, self)\r\n return self._client",
"def get(self, id: int) -> Client:\n\n return self.__clients[id]",
"def make_client(instance):\n network_client = utils.get_client_class(\n API_NAME,\n instance._api_version[API_NAME],\n API_VERSIONS)\n LOG.debug('Instantiating network client: %s', network_client)\n\n endpoint = instance.get_endpoint_for_service_type(\n API_NAME,\n region_name=instance._region_name,\n )\n\n return network_client(\n username=instance._username,\n tenant_name=instance._project_name,\n password=instance._password,\n region_name=instance._region_name,\n auth_url=instance._auth_url,\n endpoint_url=endpoint,\n token=instance.auth.get_token(instance.session),\n insecure=instance._insecure,\n ca_cert=instance._cacert,\n )",
"def client(self):\n return self._client",
"def get_client(version, **kwargs):\n endpoint = kwargs.get('os_endpoint') or kwargs.get('ceilometer_url')\n\n return Client(version, endpoint, **kwargs)",
"def GetClientInstance(release_track=calliope_base.ReleaseTrack.ALPHA):\n api_version = _RELEASE_TRACK_TO_API_VERSION.get(release_track)\n return core_apis.GetClientInstance(_API_NAME, api_version)",
"def Client(api_version, *args, **kwargs):\r\n neutron_client = utils.get_client_class(\r\n API_NAME,\r\n api_version,\r\n API_VERSIONS,\r\n )\r\n return neutron_client(*args, **kwargs)",
"def make_client(self, context):\n return Client(self.settings['client_routing'], context=context)",
"def _get_client(self):\n if self._client is None:\n self._client = self.boto.client(service_name='elb', region_name=self.boto.cli_region)\n\n return self._client",
"def _get_client(self):\n _client = KOPS(provider=self.provider, config=self.config)\n return _client",
"def client(self):\n\n return self._client"
] | [
"0.8825718",
"0.7007617",
"0.69371164",
"0.6927475",
"0.6900914",
"0.68201137",
"0.6812655",
"0.6797418",
"0.6797418",
"0.6663279",
"0.6625562",
"0.6587854",
"0.65376866",
"0.6535913",
"0.65322053",
"0.6514193",
"0.64772695",
"0.64697844",
"0.6460001",
"0.64531165",
"0.64016825",
"0.6366114",
"0.63543135",
"0.63537127",
"0.6349735",
"0.634593",
"0.6321513",
"0.6312319",
"0.6306634",
"0.6287351"
] | 0.7610261 | 1 |
Get a swift client.Connection instance. | def get_swift(self):
if self.swift is None:
self.swift = swiftclient.Connection(
auth_version='3',
authurl=self.auth_kwargs["auth_url"],
user=self.auth_kwargs["username"],
key=self.auth_kwargs["password"],
tenant_name=self.auth_kwargs["project_name"]
)
return self.swift | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_connection(self):\n c = httplib.HTTPConnection(self.server)\n return c",
"def get_swiftclient():\n swift_conn = swiftclient.client.Connection(\n authurl=os.environ.get(\"OS_AUTH_URL\"),\n user=os.environ.get(\"OS_USERNAME\"),\n key=os.environ.get(\"OS_PASSWORD\"),\n tenant_name=os.environ.get(\"OS_TENANT_NAME\"),\n auth_version=\"2.0\",\n )\n return swift_conn",
"def get_connection(self):\n from pymongo.connection import Connection\n \n if self._connection is None:\n self._connection = Connection(self.host, self.port)\n return self._connection",
"def __get_connection(self) -> HTTPConnection:\n return HTTPConnection(self.__host, self.__port)",
"def _make_swift_connection(self, auth_url, user, key):\n snet = self.snet\n logger.debug(_(\"Creating Swift connection with \"\n \"(auth_address=%(auth_url)s, user=%(user)s, \"\n \"snet=%(snet)s)\") % locals())\n return swift_client.Connection(\n authurl=auth_url, user=user, key=key, snet=snet)",
"def __GetConnection(self):\n\n self.conn = httplib.HTTPConnection(BLIP_API_URL)\n return self.conn",
"def connection(self) -> \"Connection[Any]\":\n return self._conn",
"def get_connection(self):\n if self.conn is None or self.conn.closed != 0:\n self._connect()\n logger.debug(f'The connection object is: {self.conn}.')\n return self.conn",
"def connect():\n return connection.Connection(username=api_user,\n api_key=api_key,\n region=api_region)",
"def connection(self) -> Connection:\n if not self._connection:\n self._connection = self.engine.connect()\n\n return self._connection",
"def get(self, conn_alias: str) -> \"BaseDBAsyncClient\":\n storage: Dict[str, \"BaseDBAsyncClient\"] = self._get_storage()\n try:\n return storage[conn_alias]\n except KeyError:\n connection: BaseDBAsyncClient = self._create_connection(conn_alias)\n storage[conn_alias] = connection\n return connection",
"def get_conn(self) -> ServiceBusClient:\n conn = self.get_connection(self.conn_id)\n connection_string: str = str(conn.schema)\n if connection_string:\n client = ServiceBusClient.from_connection_string(connection_string, logging_enable=True)\n else:\n extras = conn.extra_dejson\n credential: str | DefaultAzureCredential = self._get_field(extras=extras, field_name=\"credential\")\n fully_qualified_namespace = self._get_field(extras=extras, field_name=\"fully_qualified_namespace\")\n if not credential:\n credential = DefaultAzureCredential()\n client = ServiceBusClient(\n fully_qualified_namespace=fully_qualified_namespace,\n credential=credential, # type: ignore[arg-type]\n )\n\n self.log.info(\"Create and returns ServiceBusClient\")\n return client",
"def get_connection(self):\n return self._connection",
"def get_connection(self):\n return self._connection",
"def get_connection(self):\n if self.__connection is None:\n from pymongo import MongoClient\n from ir_config import IRConfig\n self.__connection = MongoClient(\n IRConfig.get_instance().get('db_host', self.__default_host), \n IRConfig.get_instance().get_int('db_port', self.__default_port))\n return self.__connection",
"def get_connection(self):\n return self.connection",
"def get_connection(self):\n return self.connection",
"def connection(self, connection=None):\n if connection is None:\n return self.engine.acquire()\n return ConnectionProxy(connection=connection)",
"def get_connection(self):\n return self.application.get_connection()",
"def connection(self):\n try:\n con = self.thread.connection\n except AttributeError:\n con = self.steady_connection()\n self.thread.connection = con\n return con",
"def get_connection(self, timeout=None):\n timeout = timeout if timeout else self.timeout\n if self.ssl:\n return HTTPSConnection(self.hostname, self.port, timeout=timeout)\n return HTTPConnection(self.hostname, self.port, timeout=timeout)",
"def _get_connection(self, conf):\n return get_session()",
"def get_conn(self) -> WebClient:\n return self.client",
"def connection():\n return _MockConnection()",
"def _get_connection(reconnect=False):\n global _connection\n identity = get_identity()\n # Connect to the database if not already connected\n if _connection.get(identity) is None or reconnect:\n try:\n _connection[identity] = Connection(**_connection_settings)\n except Exception, e:\n raise ConnectionError(\"Cannot connect to the database:\\n%s\" % e)\n return _connection[identity]",
"def get_connection(hostname, logger):\n return Connection(\n hostname,\n logger=logger,\n sudo=needs_sudo(),\n )",
"def get_conn(self):\n conn = self.get_connection(self.conn_id)\n service_options = conn.extra_dejson\n return BlockBlobService(account_name=conn.login,\n account_key=conn.password, **service_options)",
"def get(self, conn_id: str) -> Connection:\n return Connection.from_dict(self.query(f'{CONNECTION_URL}/{conn_id}'))",
"def get_connection(self):\n\n\t\treturn dbapi.connect(credentials.SERVER,\\\n\t\t\t\t\t\t\t credentials.PORT,\\\n\t\t\t\t\t\t\t credentials.USER,\\\n\t\t\t\t\t\t\t credentials.PASSWORD)",
"async def get(self, conn_id: str) -> Connection:\n return Connection.from_dict(await self.query(f'{CONNECTION_URL}/{conn_id}'))"
] | [
"0.72027934",
"0.7086128",
"0.6808257",
"0.67928064",
"0.6765041",
"0.6715244",
"0.66940707",
"0.66649985",
"0.6593805",
"0.6565049",
"0.65543133",
"0.6501697",
"0.64977556",
"0.64977556",
"0.6481265",
"0.6448724",
"0.6448724",
"0.64482147",
"0.6444177",
"0.643537",
"0.64260095",
"0.640639",
"0.6387979",
"0.6386397",
"0.63863015",
"0.6384387",
"0.637795",
"0.6331377",
"0.6307586",
"0.63047826"
] | 0.7363057 | 0 |
'voltage' should be a dict of numpy arrays of floatingpoint numbers. The keys of 'voltage' are integers, 03. Each element of 'voltage' should start and end near zero. 'repetitions' and 'rate' should be integers. | def __init__(
self, voltage={0:(0, 0)}, rate=500, repetitions=1,
board_name='cDAQ1Mod1', voltage_limits=None, num_channels=7):
self.board_name = board_name #Check Measurement and Automation Explorer
self._taskHandle = ctypes.c_void_p(0)
self.num_channels = num_channels
DAQmxErrChk(api.DAQmxCreateTask("", ctypes.byref(self._taskHandle)))
DAQmxErrChk(api.DAQmxCreateAOVoltageChan(
self._taskHandle,
self.board_name + "/ao0:%i"%(num_channels - 1),
"",
ctypes.c_double(-10.0), #Minimum voltage
ctypes.c_double(10.0), #Maximum voltage
10348, #DAQmx_Val_Volts; don't question it!
ctypes.c_void_p(0), #NULL
))
self.num_points_written = ctypes.c_long(0)
self._unwritten_voltages = False
self._unplayed_voltages = False
self.set_voltage_and_timing(voltage, rate, repetitions, voltage_limits)
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_voltage_data(self):\n self.voltage_record = {}\n self.threshold_value = {}\n for l in self.network.layers:\n if 'v' in self.network.layers[l].__dict__:\n self.voltage_record[l] = self.network.monitors['{:}_voltages'.format(l)].get('v')\n if 'thresh' in self.network.layers[l].__dict__:\n self.threshold_value[l] = self.network.layers[l].thresh",
"def str_voltages(self, key, bypass_voltage):\r\n # If we already have the key, we're done\r\n if key in self.string_keys:\r\n return self.string_keys[key]\r\n model = key[0] # unpack the key\r\n pattern = key[1:]\r\n index = len(self.string_voltages)\r\n self.string_keys[key] = index\r\n # compute the combined voltage array\r\n try:\r\n cindex, multiple = pattern[0]\r\n svoltages = self.cell_voltages[cindex] * multiple\r\n for cindex, multiple in pattern[1:]:\r\n svoltages += self.cell_voltages[cindex] * multiple\r\n except:\r\n svoltages = self.cell_voltages[pattern[0]] * pattern[1]\r\n\r\n if bypass_voltage > 0:\r\n bypassed = svoltages < -bypass_voltage\r\n svoltages[bypassed] = -bypass_voltage\r\n self.string_voltages.append({\r\n 'voltages': svoltages,\r\n 'bypass': bypassed,\r\n })\r\n else:\r\n self.string_voltages.append({\r\n 'voltages': svoltages,\r\n 'bypass': None,\r\n })\r\n logger.debug(f'[{index:04d}] SV {pattern}')\r\n return index",
"def update_fft(data):\n if data is None or data['rate'] is None:\n raise PreventUpdate\n x = np.fft.rfftfreq(len(data['val_list']), d=data['rate'])[10:]\n y = np.abs(np.fft.rfft(data['val_list']))[10:]\n return {'x': [x], 'y': [y]}, [0], len(y)",
"def voltage_conversion(self):\r\n\t\tvoltage = ((self.data[0] * 256 + self.data[1]) / 65536.0) * 5.0\r\n\t\t\r\n\t\treturn {'v' : voltage}",
"def __init__(self, parent):\n \n #60 32 bit integers are recorded for the amplifier sample time index \n self.sample_time_index = []\n for i in range(60):\n sample_time = np.int32(struct.unpack('i', parent.rhd.read(4)))[0]\n self.sample_time_index.append(sample_time)\n\n #Amplifier voltages for each channel\n self.electrode_traces = {}#key: channel name value: voltage trce\n for amp in parent._AMPLIFIER_CHANNELS:\n electrode_voltage_trace = []\n #60 samples per channel, int16\n for i in range(60):\n electrode_voltage = np.uint16(struct.unpack('H', parent.rhd.read(2)))[0]\n electrode_voltage_trace.append(electrode_voltage)\n self.electrode_traces[amp] = electrode_voltage_trace \n\n #Get voltage from Aux input channels\n self.auxilary_traces = {}\n for aux in parent._AUX_CHANNELS:\n aux_voltage_trace = []\n #15 samples per channel, int16\n for i in range(15):\n aux_voltage = np.uint16(struct.unpack('H', parent.rhd.read(2)))[0]\n aux_voltage_trace.append(aux_voltage)\n self.auxilary_traces[aux] = aux_voltage_trace \n\n #get voltage from supply voltage channels\n self.supply_voltages = {}\n for sup in parent._SUPPLY_VOLTAGE_CHANNELS:\n sup_voltage_list = []\n for i in range(1):\n sup_voltage = np.uint16(struct.unpack('H', parent.rhd.read(2)))[0]\n sup_voltage_list.append(sup_voltage)\n self.supply_voltages[sup] = sup_voltage_list \n\n #get voltage from temerature sensor channels\n self.temerature_sensor_readings = {}\n for n in range(parent._TEMP_SENSORS):\n temp_list = []\n for i in range(1):\n temperature = np.int16(struct.unpack('h', parent.rhd.read(2)))[0]\n temp_list.append(temperature)\n self.temerature_sensor_readings[n] = temp_list \n\n #Get voltage ADC inputs\n self.board_adc_input_voltages = {}\n for adc in parent._ADC_INPUT_CHANNELS:\n adc_input_list = []\n for i in range(60):\n adc_input = np.uint16(struct.unpack('H', parent.rhd.read(2)))[0]\n adc_input_list.append(adc_input)\n self.board_adc_input_voltages[adc] = adc_input_list \n\n #Get digital input values\n self.board_digital_inputs = {}\n for dig in parent._DIGITAL_INPUT_CHANNELS :\n digital_input_list = []\n for i in range(60):\n digital_input = np.uint16(struct.unpack('H', parent.rhd.read(2)))[0]\n digital_input_list.append(digital_input)\n self.board_digital_inputs[dig.native_channel_name] = digital_input_list",
"def s4_1min_1freq(powerData1,timevec,elevaData,azitmData):\n\t#TODO : empezar desde el primer minuto del tiempo, no desde el inicio del dictionaries\n\t#TODO : calcular s4 para l1 y l2 al mismo tiempo, deberia reducir la mitad del tiempo\n\t#TODO : dont take into account snr2 if this comes with a lot of 0000000 zeros\n\ts4_values1=[]\n\n\ts4_times=[]\n\ts4_avgSNR1 = [] #\n\ts4_avgSNR2 = [] #\n\ts4_points1 = [] # s4_points_per_minute\n\n\ts4_timesr=[]\n\ts4_elev=[]\n\ts4_azit=[]\n\n\tfor eachminute in range(0,1440):\n\t\ts4_times.append(eachminute/60.0)\n\n\ttmp_amplitudes1 = []\n\ttmp_amplitudesdB1=[]\n\ttmp_elevations = []\n\ttmp_azimuths = []\n\n\tinit_index=0\n\n\tarr=np.array(timevec)+np.ones([len(timevec)])*(18.0/3600.0) # #SEPTENTRIO USES DATA from 1 MINUTE GPS TIME\n\t########################\n\tfor eachminute in s4_times:\n\t\tidxarray = (arr >= eachminute) & (arr < (eachminute+(1/60.0)) )# bool array\n\t\ttmp_amplitudesdB1 = powerData1[idxarray]\n\t\ttmp_elevations = elevaData[idxarray]\n\t\ttmp_azimuths = azitmData[idxarray]\n\t\ttmp_amplitudes1=list(map(pow10,tmp_amplitudesdB1))#use numpy.power\n\n\n\t\tif len(tmp_amplitudes1)>0:\n\t\t\ts4_1 = np.std(tmp_amplitudes1,ddof=1) / np.mean(tmp_amplitudes1)\n\t\telse:\n\t\t\ts4_1 = float(\"nan\")\n\n\t\ts4_values1.append(s4_1)\n\t\ts4_avgSNR1.append(np.mean(tmp_amplitudesdB1))\n\t\ts4_timesr.append(eachminute+1/60.0) #Septentrio has the timestamp 1 min in advance\n\t\ts4_points1.append(len(tmp_amplitudes1))\n\t\ts4_elev.append(np.mean(tmp_elevations))\n\t\ts4_azit.append(np.mean(tmp_azimuths))\n\n\treturn s4_values1,s4_timesr,s4_points1,s4_elev,s4_azit,s4_avgSNR1",
"def incremental_rv(\n wavelength: ndarray,\n flux: ndarray,\n *,\n mask: Optional[Union[Quantity, ndarray]] = None,\n percent: float = 10,\n **kwargs,\n) -> Tuple[ndarray, ndarray]:\n positions = log_chunks(wavelength, percent)\n velocities = []\n for pos1, pos2 in zip(positions[:-1], positions[1:]):\n pos_mask = (wavelength >= pos1) & (wavelength < pos2)\n if np.sum(pos_mask) <= 1:\n # 1 or less points in this section\n continue\n\n x = wavelength[pos_mask]\n y = flux[pos_mask]\n if mask is not None:\n z = mask[pos_mask]\n else:\n z = mask # None\n try:\n rv_calc = rv_precision(x, y, mask=z, **kwargs).value\n except:\n rv_calc = np.nan\n velocities.append([np.nanmean(x), rv_calc])\n\n x, rv = np.asarray(velocities).T\n return x, rv",
"def get_voltage_rating(self):\n summary = self.get_version_summary()\n pattern = '\\$.*? .*? .*? .*? .*? (.*?) .*? .*? .*? \\r\\n' \n rating = int(re.findall(pattern,summary).pop())\n return rating",
"def get_ratio_metrics(\n ratio_metric_specs: Dict[iter8id, RatioMetricSpec], \n counter_metric_specs: Dict[iter8id, CounterMetricSpec], \n counter_metrics: Dict[iter8id, Dict[iter8id, CounterDataPoint]], \n versions: Iterable[Version],\n start_time: datetime) -> Dict[iter8id, Dict[iter8id, RatioDataPoint]]:\n rmd = {version.id: {} for version in versions} # initialize rmd\n\n # populate rmd\n for ratio_metric_spec in ratio_metric_specs.values():\n query_spec = RatioQuerySpec(\n version_label_keys = versions[0].version_labels.keys(),\n numerator_template = counter_metric_specs[ratio_metric_spec.numerator].query_template,\n denominator_template = counter_metric_specs[ratio_metric_spec.denominator].query_template,\n start_time = start_time\n )\n prmq = PrometheusRatioMetricQuery(query_spec, versions)\n current_time = datetime.now(timezone.utc)\n rmd_from_prom = prmq.query_from_spec(current_time)\n\n for version in versions:\n if version.id in rmd_from_prom:\n rmd[version.id][ratio_metric_spec.id] = rmd_from_prom[version.id]\n else:\n if version.id in counter_metrics and counter_metrics[version.id][ratio_metric_spec.denominator].value:\n rmd[version.id][ratio_metric_spec.id] = RatioDataPoint(\n value = 0,\n timestamp = current_time,\n status = StatusEnum.zeroed_ratio\n )\n else:\n rmd[version.id][ratio_metric_spec.id] = RatioDataPoint(\n value = None,\n timestamp = current_time,\n status = StatusEnum.absent_version_in_prom_response\n )\n \"\"\"if a version cannot be found in the list of ratio metrics returned by prometheus, then the value of the ratio is set to zero if denominator is non-zero, and is set to None otherwise.\n \"\"\"\n\n return rmd",
"def test_get_voltage_maps(self):\n pass",
"def parsesetting(conf, rate, loopnum):\n global numpy, math, funcchoose\n cp = numpy.array([float(val)/1000 for val in conf[0] if val != ''])\n ncp = len(cp)\n ct = numpy.array([float(val)/1000 for val in conf[1][:ncp]])\n cv = numpy.array([float(val) for val in conf[2][:ncp]])\n \n dcp = numpy.array([float(val)/1000 for val in conf[3][:ncp]]) \n dct = numpy.array([float(val)/1000 for val in conf[4][:ncp]]) \n dcv = numpy.array([float(val)/1000 for val in conf[5][:ncp]]) \n\n special = numpy.array([int(val) for val in conf[6][:ncp]])\n reserve = [val for val in conf[7][:ncp]]\n for i in range(len(reserve)):\n reserve[i] = [float(part) for part in reserve[i].split(';')]\n\n cp += loopnum * dcp\n totalt = cp[-1] + ct[-1] # the last interval plus change\n\n changes = []\n for i in range(ncp):\n vprev = cv[i-1] + loopnum * dcv[i-1]\n vthis = cv[i] + loopnum * dcv[i]\n timescale = ct[i] + loopnum * dct[i]\n if timescale == 0:\n changes += [[vthis]]\n else:\n intervals = int(timescale * rate) # implicit rounding down\n tsteps = numpy.linspace(0, intervals/rate, intervals + 1)\n\n try:\n funcshape = funcchoose[special[i]]\n except KeyError:\n raise NotImplementedError(\"Time dependence: %d\" %special[i])\n\n if funcshape == 'adiabatic':\n A, B = numpy.power([vprev, vthis], -0.5)\n a = (A - B) / timescale\n vals = 1 / (A - a * tsteps)**2\n elif funcshape == 'exponential':\n timeconstant = reserve[i][0] / 1000 # it is in ms\n if vthis < vprev:\n vals = numpy.max([vprev * numpy.exp(-tsteps/timeconstant), [vthis] * (intervals+1)], axis=0)\n else:\n vals = numpy.min([vprev * numpy.exp(tsteps/timeconstant), [vthis] * len(tsteps)], axis=0)\n elif funcshape == 'sine':\n params = reserve[i]\n\n deltaamp = params[2]\n deltafreq = params[3]\n amplitude = params[0] + loopnum * deltaamp\n freq = params[1] + loopnum * deltafreq\n\n vals = 0.5 * amplitude * numpy.sin(2 * numpy.pi * tsteps * freq) + vthis\n elif funcshape == 'linear':\n vals = (vthis - vprev) * tsteps / timescale + vprev\n else:\n raise ValueError\n\n if tsteps[-1] < timescale:\n vals = numpy.append(vals, vthis)\n vals = numpy.append(vals, vthis)\n changes += [list(vals)]\n\n intervals = int(math.ceil(totalt * rate))\n tlist = numpy.linspace(0, intervals/rate, intervals+1)\n\n icp = 0\n counter = 0\n values = []\n for t in tlist:\n if icp < (ncp-1) and t >= cp[icp + 1]:\n icp += 1\n counter = 0\n\n if counter == 0:\n nvals = len(changes[icp])\n\n if counter < nvals:\n newval = changes[icp][counter]\n counter += 1\n else:\n newval = changes[icp][-1]\n values += [newval]\n return numpy.array(values)",
"def addRateParams(spec, data_card, channels, modifiers):\n measurements = [\n measurement[\"config\"][\"poi\"] for measurement in spec[\"measurements\"]\n ]\n signal_mods = [modifier[0] for modifier in modifiers if modifier[0] in measurements]\n\n for idxc, channel in enumerate(channels):\n for idxs, sample in enumerate((spec[\"channels\"][idxc][\"samples\"])):\n is_signal = any(mod[\"name\"] in signal_mods for mod in sample[\"modifiers\"])\n if not is_signal:\n for mod in spec[\"channels\"][idxc][\"samples\"][idxs][\"modifiers\"]:\n # normfactor or shapefactor\n if \"normfactor\" in mod[\"type\"] or \"shapefactor\" in mod[\"type\"]:\n for measurement in spec[\"measurements\"]:\n for param in measurement[\"config\"][\"parameters\"]:\n data_card.rateParams.update(\n {f\"{channel}AND\" + sample[\"name\"]: []}\n )\n if mod[\"name\"] == param[\"name\"]:\n data_card.rateParams[\n f\"{channel}AND\" + sample[\"name\"]\n ].append([[mod[\"name\"], 1, 0, param[\"bounds\"]], \"\"])\n else:\n data_card.rateParams[\n f\"{channel}AND\" + sample[\"name\"]\n ].append([[mod[\"name\"], 1, 0], \"\"])",
"def increaseFreq(self, desHz):\n from scipy.interpolate import interp1d\n import time\n from numpy import linspace, floor\n from decimal import getcontext, Decimal\n\n if desHz > 1000: # set max freq here \n raise ValueError('Max Frequency is 1000 (3 decimal places)')\n now = time.asctime(time.localtime(time.time())) \n stamp = ''.join(['%% The following created by alog_manip.MOOSalog.MOOSalog.increaseFreq\\n%% ', now])\n increase_msg = ''.join(['%% Resultant Frequency: ',str(desHz),' Hz'])\n # hiHz = {}\n self.outData = {} # erase pre-existing dict\n self.outData['header'] = [stamp,increase_msg,'%%%%'] + self.srcData['header']\n\n def create_msgs():\n \"\"\" Puts interpolated data into dict outData\n Primary interpolation function for increaseFreq\n Consider using uniaxial spline --> would have one function for all of dictionary dat\n \"\"\"\n getcontext().prec = 3 # will round to 3 decimal places\n orig_times = sorted(dat)\n for n in range(len(dat) - 1):\n linfun = interp1d([orig_times[n], orig_times[n+1]], \\\n [dat[orig_times[n]], dat[orig_times[n+1]]])\n dt = orig_times[n+1] - orig_times[n] # current\n freq = 1/dt # current\n if dt < (1/desHz):\n print('found instance where Freq already at/above desired Freq')\n else:\n new_dt = dt*freq/desHz\n new_times = linspace(orig_times[n],orig_times[n+1],floor(dt/new_dt))\n # print(new_times)\n new_values = linfun(new_times)\n # rounded_values = [float(Decimal(\"%.3f\" % e)) for e in new_values]\n rounded_times = [float(Decimal(\"%.3f\" % e)) for e in new_times]\n for m in range(len(rounded_times)):\n # this_time = int(new_times[m]*100000)/100000 # 5 decimal places in timstamp\n self.outData[sens][meas][rounded_times[m]] = new_values[m]\n\n ## go thru and pull out dictionaries {time: value} then send to interpolation func\n for sens in self.srcData:\n if sens is not 'header':\n self.outData[sens] = {}\n for meas in self.srcData[sens]:\n self.outData[sens][meas] = {}\n dat = self.srcData[sens][meas]\n if len(dat) == 1:\n self.outData[sens][meas] = dat # only 1 data point, no interp\n else:\n create_msgs()",
"def test_1d_freq():\n \n dic,data = ng.pipe.read(\"common_data/1d_pipe/test.ft\")\n assert data.shape == (4096,)\n assert data.dtype == 'float32'\n assert round(data[0],2) == -63789.66\n assert round(data[1],2) == -63159.88\n assert round(data[100],2) == -29308.34\n write_readback(dic,data)\n check_ppm_limits(dic,data,0,[297.92, -99.82])",
"def vibrate(self, pattern):\n raise NotImplementedError",
"def get_slack_voltage(self):\n first_val = self.__voltage_no_load\n second_val = self.__voltage_no_load * np.exp(-2j * np.pi/3)\n third_val = self.__voltage_no_load * np.exp(2j * np.pi/3)\n matrix = np.array(([first_val], [second_val], [third_val]), dtype=np.complex128)\n slack_voltage = np.tile(matrix, (self.get_nb_brackets()-1, 1))\n return slack_voltage",
"def pressure_dict(calib, f, t):\n #array mode\n try:\n pressure = []\n \"\"\"Equation expecting pressure period in microseconds, so divide f by 1,000,000. \"\"\"\n uf = [x/1000000 for x in f]\n for f_x, t_x in zip(uf, t):\n T0 = calib['T1'] + calib['T2']*t_x + calib['T3']*math.pow(t_x,2) + calib['T4']*math.pow(t_x,3)\n w = 1-T0*T0*f_x*f_x\n temp = (0.6894759*((calib['C1']+calib['C2']*t_x+calib['C3']*t_x*t_x)*w*(1-(calib['D1']+calib['D2']*t_x)*w)-14.7))\n pressure.append(round(temp,2))\n #single mode\n except:\n T0 = calib['T1'] + calib['T2']*t + calib['T3']*math.pow(t,2) + calib['T4']*math.pow(t,3)\n w = 1-T0*T0*f*f\n pressure = (0.6894759*((calib['C1']+calib['C2']*t+calib['C3']*t*t)*w*(1-(calib['D1']+calib['D2']*t)*w)-14.7))\n return pressure",
"def populateDict(self):\n for dev in self.dcDict:\n range = self.dcDict[dev]['range']\n for devChannel in self.dcDict[dev]['devChannels']:\n channel = self.dcDict[dev]['devChannels'][devChannel]['channel']\n comstring = str(channel)+'r'\n yield self.ser.write(comstring)\n encoded = yield self.ser.read(3)\n seq = int(binascii.hexlify(encoded[0:2]),16)\n voltage = round(range[0] + float(seq) / (2**16 - 1) * float(range[1]-range[0]),2)\n self.dcDict[dev]['devChannels'][devChannel]['value'] = voltage",
"def _volumetric_flux(recarray, modeltime, extrapolate_kper=False):\n pd = import_optional_dependency(\n \"pandas\",\n error_message=\"ZoneBudget._volumetric_flux() requires pandas.\",\n )\n\n nper = len(modeltime.nstp)\n volumetric_data = {}\n zones = np.unique(recarray[\"zone\"])\n\n for key in recarray.dtype.names:\n volumetric_data[key] = []\n\n if extrapolate_kper:\n volumetric_data.pop(\"kstp\")\n perlen = modeltime.perlen\n totim = np.add.accumulate(perlen)\n for per in range(nper):\n idx = np.where(recarray[\"kper\"] == per)[0]\n\n if len(idx) == 0:\n continue\n\n temp = recarray[idx]\n\n for zone in zones:\n if zone == 0:\n continue\n\n zix = np.where(temp[\"zone\"] == zone)[0]\n\n if len(zix) == 0:\n raise Exception\n\n for key in recarray.dtype.names:\n if key == \"totim\":\n volumetric_data[key].append(totim[per])\n\n elif key == \"tslen\":\n volumetric_data[\"perlen\"].append(perlen[per])\n\n elif key == \"kstp\":\n continue\n\n elif key == \"kper\":\n volumetric_data[key].append(per)\n\n elif key == \"zone\":\n volumetric_data[key].append(zone)\n\n else:\n tmp = np.nanmean(temp[zix][key])\n vol = tmp * perlen[per]\n volumetric_data[key].append(vol)\n\n else:\n n = 0\n tslen = {}\n dtotim = {}\n totim = modeltime.totim\n for ix, nstp in enumerate(modeltime.nstp):\n for stp in range(nstp):\n idx = np.where(\n (recarray[\"kper\"] == ix) & (recarray[\"kstp\"] == stp)\n )\n if len(idx[0]) == 0:\n continue\n elif n == 0:\n tslen[(stp, ix)] = totim[n]\n else:\n tslen[(stp, ix)] = totim[n] - totim[n - 1]\n dtotim[(stp, ix)] = totim[n]\n n += 1\n\n ltslen = [tslen[(rec[\"kstp\"], rec[\"kper\"])] for rec in recarray]\n if len(np.unique(recarray[\"totim\"])) == 1:\n ltotim = [dtotim[(rec[\"kstp\"], rec[\"kper\"])] for rec in recarray]\n recarray[\"totim\"] = ltotim\n\n for name in recarray.dtype.names:\n if name in (\"zone\", \"kstp\", \"kper\", \"tslen\", \"totim\"):\n volumetric_data[name] = recarray[name]\n else:\n volumetric_data[name] = recarray[name] * ltslen\n\n return pd.DataFrame.from_dict(volumetric_data)",
"def serialize_numpy(self, buff, numpy):\n try:\n _x = self\n buff.write(_struct_3I.pack(_x.header.seq, _x.header.stamp.secs, _x.header.stamp.nsecs))\n _x = self.header.frame_id\n length = len(_x)\n buff.write(struct.pack('<I%ss'%length, length, _x))\n _x = self\n buff.write(_struct_51B.pack(_x.temp_1_curr, _x.temp_1_min, _x.temp_1_max, _x.temp_2_curr, _x.temp_2_min, _x.temp_2_max, _x.temp_3_curr, _x.temp_3_min, _x.temp_3_max, _x.temp_4_curr, _x.temp_4_min, _x.temp_4_max, _x.temp_5_curr, _x.temp_5_min, _x.temp_5_max, _x.temp_6_curr, _x.temp_6_min, _x.temp_6_max, _x.akku_voltage_curr, _x.akku_voltage_min, _x.akku_voltage_max, _x.hals_motor_voltage_curr, _x.hals_motor_voltage_min, _x.hals_motor_voltage_max, _x.hals_logik_voltage_curr, _x.hals_logik_voltage_min, _x.hals_logik_voltage_max, _x.tablett_logik_voltage_curr, _x.tablett_logik_voltage_min, _x.tablett_logik_voltage_max, _x.arm_logik_voltage_curr, _x.arm_logik_voltage_min, _x.arm_logik_voltage_max, _x.tablett_motor_voltage_curr, _x.tablett_motor_voltage_min, _x.tablett_motor_voltage_max, _x.hals_motor_current_curr, _x.hals_motor_current_min, _x.hals_motor_current_max, _x.hals_logik_current_curr, _x.hals_logik_current_min, _x.hals_logik_current_max, _x.tablett_logik_current_curr, _x.tablett_logik_current_min, _x.tablett_logik_current_max, _x.arm_logik_current_curr, _x.arm_logik_current_min, _x.arm_logik_current_max, _x.tablett_motor_current_curr, _x.tablett_motor_current_min, _x.tablett_motor_current_max))\n except struct.error, se: self._check_types(se)\n except TypeError, te: self._check_types(te)",
"def set_voltages(): \n #0) set parameters\n from project_parameters import trapFile,multipoleControls,reg,driveFrequency,ax,az,phi,coefs\n import pickle\n with open(trapFile,'rb') as f:\n trap = pickle.load(f)\n V,X,Y,Z=trap.instance.DC,trap.instance.X,trap.instance.Y,trap.instance.Z\n tc=trap.configuration\n C = tc.multipoleControl\n el = []\n #1) check if trap_knobs has been run yet, creating multipoleControl and multipoleKernel\n if tc.trap_knobs != True:\n return 'WARNING: You must run trap_knobs first!'\n #2a) determine electrode voltages directly\n elif multipoleControls: # note plurality to contrast from attribute\n el = np.dot(C,coefs.T) # these are the electrode voltages\n #2b) determine electrode volages indirectly\n else:\n charge = tc.charge\n mass = tc.mass\n V0 = mass*(2*np.pi*frequencyRF)**2/charge\n U2 = az*V0/8\n U1 = U2+ax*V0/4\n U3 = 2*U1*np.tan(2*np.pi*(phi+tc.thetaRF)/180)\n U1p= np.sqrt(U1**2+U3**2/2)\n U4 = U1p*tc.Qrf[4]/tc.Qrf[1]\n U5 = U1p*tc.Qrf[5]/tc.Qrf[1]\n inp = np.array([E[0], E[1], E[2], U1, U2, U3, U4, U5]).T\n mCf = tc.multipoleCoefficients[1:9,:]\n el = np.dot(mCf.T,inp) # these are the electrode voltages\n el = np.real(el)\n #3) regularize if set to do so\n reg = 0\n if reg: \n C = el\n Lambda = np.linalg.lstsq(tc.multipoleKernel,C)\n Lambda=Lambda[0]\n el = el-(np.dot(tc.multipoleKernel,Lambda))\n return el",
"def calibrate_decide(voltage, serial):\n # Based on the SONIC serial number, get the Krypton calibration coeffs\n if serial == 'Gill R2A 0043':\n coeffs = krypton_1199\n elif serial == 'Gill HS 000046':\n coeffs = krypton_1094\n\n # make a storage array\n rho = np.zeros_like(voltage)\n\n # see the percentage of wrong measurements\n num_corrupt_values = (voltage < 0).sum() / len(voltage)\n # after the original script: set negative voltages to nan\n voltage[voltage <= 0] = 0.01\n # if too many values are corrupt, fill all with nans and return\n if num_corrupt_values > 0.2:\n rho.fill(np.nan)\n return rho\n else:\n\n # get rho using full range coeffs\n XKw = coeffs['path_len'] * coeffs['Kwf']\n logV0 = np.log(coeffs['V0f'])\n rho_temp = (np.log(voltage) - logV0) / XKw\n\n # determine new coeffs based on the \"temporary\" values\n if np.mean(rho_temp) > 9:\n if verbose:\n print('high')\n XKw = coeffs['path_len'] * coeffs['Kwh']\n logV0 = np.log(coeffs['V0h'])\n else:\n if verbose:\n print('low')\n XKw = coeffs['path_len'] * coeffs['Kwl']\n logV0 = np.log(coeffs['V0l'])\n # re-calculate rho with these coefficients\n rho = (np.log(voltage) - logV0) / XKw\n\n return rho",
"def update_recruiting(self, rate):\n self.recruit = int(np.ceil(self.INITIAL_POPULATION*rate))",
"def __init__(self):\n\n\n self.dtype = np.dtype([\n ('fault_flags', np.uint32),\n ('raw_x', np.int16),\n ('raw_y', np.int16),\n ('raw_z', np.int16),\n ('accel_x', np.float32),\n ('accel_y', np.float32),\n ('accel_z', np.float32), \n ('pitch', np.float32),\n ('roll', np.float32), \n ])\n \n self._accel_indices = [0, 4, 5, 6]\n \n self.data = np.array([(0, 0.1, 12, 1234, 0.12345678901234, 0.12345678901234, 0.12345678901234, 0.12345678901234, 0.12345678901234)], dtype=self.dtype)\n self.data['fault_flags'] = 0\n self.data['raw_x'] = 0.1\n self.data['raw_y'] = 12\n self.data['raw_z'] = 31929\n self.data['accel_x'] = 0.12345678901234\n self.data['accel_y'] = 0.23456789012345\n self.data['accel_z'] = 0.34567890123456\n self.data['pitch'] = 0.1000\n self.data['roll'] = 0.2000\n\n\n #print len(self.data.tostring(order=\"C\"))",
"def test_RV():\n\n spec = IGRINSSpectrum(file=file)\n\n assert spec.uncertainty is not None\n assert hasattr(spec, \"barycentric_correct\")\n\n correction_velocity = spec.estimate_barycorr()\n\n assert isinstance(spec.RA, astropy.units.quantity.Quantity)\n assert isinstance(spec.DEC, astropy.units.quantity.Quantity)\n assert correction_velocity is not None\n assert isinstance(correction_velocity, astropy.units.quantity.Quantity)\n\n new_spec = spec.barycentric_correct()\n assert new_spec is not None\n assert isinstance(new_spec, Spectrum1D)",
"def test_get_engVoltage(self):\n for app_num, servo_type in app_nr.items():\n try:\n par = self.get_parameter(servo_type, app_num, ENG_VOLTAGE_IDX, ENG_VOLTAGE_SUB)\n param_obj = self.__dict__[servo_type]._get_engVoltage()\n acs_par, completion = param_obj.get_sync()\n if(completion.code):\n print \"\\nError code found in engVoltage...\"\n continue\n self.data_match(acs_par, par)\n except NackEx:\n continue",
"def case():\r\n #ppc = {\"version\": '2'}\r\n ppc = {}\r\n ##----- Power Flow Data -----##\r\n ## system MVA base\r\n ppc[\"baseMVA\"] = 100.0\r\n\r\n ## bus data\r\n # bus_i type Pd Qd Gs Bs area Vm Va baseKV zone Vmax Vmin\r\n ppc[\"bus\"] = array([\r\n [1, 3, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [2, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [3, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [4, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [5, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [6, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [7, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [8, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [9, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [10, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [11, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [12, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [13, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [14, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [15, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0],\r\n [16, 1, 0, 0, 0, 0, 1, 1, 0, 0.4, 1, 1.1, 0.9, 0,0, 0, 0]\r\n ])\r\n\r\n ## generator data\r\n # bus, Pg, Qg, Qmax, Qmin, Vg, mBase, status, Pmax, Pmin, Pc1, Pc2,\r\n # Qc1min, Qc1max, Qc2min, Qc2max, ramp_agc, ramp_10, ramp_30, ramp_q, apf\r\n ppc[\"gen\"] = array([\r\n [1,\t0,\t0,\t10,\t-10,\t1.0224,\t100,\t1,\t10,\t-10,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0,\t0, 0, 0,0, 0, 0],\r\n [3 ,0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [5 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [10 , 0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [13 ,0, 0, 10e-3, -10e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0],\r\n [15 , 0, 0, 50e-3, -50e-3, 1, 100, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,0, 0, 0]\r\n ])\r\n load_b = array([2, 4, 9, 12, 14])\r\n ppc[\"bus\"][load_b, 2] = multiply(array([-2.1125, -0.2231, -0.1664, -0.0719, -1.4633]).T, 0.03)\r\n ppc[\"bus\"][load_b, 3] = multiply(array([1.6492, 0.4054, 0.8599, 0.8845, 0.6778]).T, 0.03)\r\n ## branch data\r\n # fbus, tbus, r, x, b, rateA, rateB, rateC, ratio, angle, status, angmin, angmax\r\n ppc[\"branch\"] = array([\r\n [1, 2, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 8, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [1, 15, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 3, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 6, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [2, 7, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [3, 4, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [4, 5, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 9, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 12, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [8, 13, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 10, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [9, 14, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [10, 11, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0],\r\n [15, 16, 0.0, 0.0, 0.0, 250, 250, 250, 0, 0, 1, -360, 360, 0,0, 0, 0, 0,0, 0, 0]\r\n ])\r\n R1 = 0.43\r\n L1 = 0.4e-3\r\n RS1 = 0.32\r\n LS1 = 0.39e-3\r\n Zbase = (0.4*0.4/100)\r\n branch_phase =array([\r\n [1, 1, 2, 188, R1, L1],\r\n [2, 1 ,8, 346, R1, L1],\r\n [3 ,1 ,15,501, R1 ,L1],\r\n [4, 2, 3, 130, RS1,LS1],\r\n [5, 2, 6, 145, RS1,LS1],\r\n [6, 2 ,7, 157, RS1,LS1],\r\n [7, 3, 4, 185, RS1,LS1],\r\n [8, 4, 5, 1000,RS1,LS1],\r\n [9, 8 ,9, 416, RS1,LS1],\r\n [10,8 ,12,130, RS1,LS1],\r\n [11,8 ,13,121, RS1,LS1],\r\n [12,9 ,10,130, RS1,LS1],\r\n [13,9 ,14,127, RS1,LS1],\r\n [14,10,11,251, RS1,LS1],\r\n [15,15,16,345, RS1,LS1]\r\n ])\r\n ppc[\"branch\"][:, [2,3]] = multiply(array([branch_phase[:, 4]*branch_phase[:, 3], branch_phase[:, 4]*branch_phase[:, 4]*100*pi]).T,0.001/Zbase)\r\n\r\n ##----- OPF Data -----##\r\n ## area data\r\n # area refbus\r\n\r\n\r\n ## generator cost data\r\n # 1 startup shutdown n x1 y1 ... xn yn\r\n # 2 startup shutdown n c(n-1) ... c0\r\n\r\n\r\n return ppc",
"def gt_voltages(self, key):\r\n # If we already have the (model, G, T) key, we're done\r\n if key in self.cell_keys:\r\n return self.cell_keys[key]\r\n model, insolation, temperature = key # unpack the key\r\n index = len(self.cell_voltages)\r\n self.cell_keys[key] = index\r\n self.cell_voltages.append(\r\n model.voltage([(i, insolation, temperature) for i in self.currents]))\r\n logger.debug(f'[{index:04d}] CV {insolation:.1f} {temperature:.1f}{DEG}C')\r\n return index",
"def setValues(\n self,\n frameRate: int = None,\n timeScale: int = None,\n vpDecorations: Boolean = ON,\n vpBackground: Boolean = OFF,\n compass: Boolean = OFF,\n ):\n pass",
"def get_voltages(self):\n if self.v is None or self.dirty is True:\n v = self.simulator.get_voltages()\n n_compartments = self.neuron_collection.total_compartments()\n self.v = np.array(v).reshape([len(v) / n_compartments, n_compartments])\n\n self.dirty = False\n t = int(self.T / self.dt)\n return self.v[:t, :]"
] | [
"0.5384202",
"0.53538734",
"0.5352361",
"0.529449",
"0.5280764",
"0.5153001",
"0.51174885",
"0.5016881",
"0.4978907",
"0.494465",
"0.49282992",
"0.48579437",
"0.48414347",
"0.48238522",
"0.48228803",
"0.4811493",
"0.48023042",
"0.47906768",
"0.4784123",
"0.47702235",
"0.47668195",
"0.47523436",
"0.4744751",
"0.47311082",
"0.46993333",
"0.46937576",
"0.4684164",
"0.46839955",
"0.46802992",
"0.4666011"
] | 0.55513275 | 0 |
Given rows of normal vectors to line L, return points (rows) that are somewhere on each line Just find intersection with some basis line. | def points_on_lines(hyperplanes):
intersections = []
for row in hyperplanes:
intersections.append(an_intersection(row[:-1], -row[-1]))
return np.array(intersections) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_intersect_points(line1, line2):\n intersect_points = matrix.matrix_sol([line1, line2])\n return intersect_points",
"def intersection(line1, line2):\n p0, p1, p2, p3 = map(\n lambda tup : np.array(tup[:2]),\n [line1[0], line1[1], line2[0], line2[1]]\n )\n p1, p2, p3 = map(lambda x : x - p0, [p1, p2, p3])\n transform = np.zeros((2, 2))\n transform[:,0], transform[:,1] = p1, p2\n if np.linalg.det(transform) == 0: return\n inv = np.linalg.inv(transform)\n new_p3 = np.dot(inv, p3.reshape((2, 1)))\n #Where does line connecting (0, 1) to new_p3 hit x axis\n x_intercept = new_p3[0] / (1 - new_p3[1]) \n result = np.dot(transform, [[x_intercept], [0]])\n result = result.reshape((2,)) + p0\n return result",
"def get_intersection(l0, l1):\n # Source: https://en.wikipedia.org/wiki/Line–line_intersection\n\n denominator = (l0[0] - l0[1]) * (l1[2] - l1[3]) -\\\n (l0[2] - l0[3]) * (l1[0] - l1[1])\n\n x_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[0] - l1[1]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[0] - l0[1])\n y_nominator = (l0[0] * l0[3] - l0[2] * l0[1]) * (l1[2] - l1[3]) -\\\n (l1[0] * l1[3] - l1[2] * l1[1]) * (l0[2] - l0[3])\n\n return [x_nominator / denominator, y_nominator / denominator]",
"def _intersection(line_points_0, line_points_1):\n u,v = line_points_0,line_points_1\n (A,B),(C,D) = line_points_0,line_points_1\n h1 = _homogenous_line(A,B)\n h2 = _homogenous_line(C,D)\n P = _intersection_homogenous(h1, h2)\n return P",
"def get_line_intersects_line(self) -> List[List[Line]]:\n intersections = []\n\n for line_bin in self.line_bins.values():\n for connection_pair in itertools.combinations(line_bin, 2):\n line_segments = (\n connection_pair[0].line_segments + connection_pair[1].line_segments\n )\n\n for segment_pair in itertools.combinations(line_segments, 2):\n if check_cross(segment_pair[0], segment_pair[1]):\n intersections.append(connection_pair)\n # for line_bin in self.line_bins.values():\n # segments = []\n # line_idx_map = []\n # for line_1, line_2 in itertools.combinations(line_bin, 2):\n # for segment in line_1.line_segments:\n # if segment[0] != segment[1]:\n # line_idx_map.append(line_1)\n # segments.append(((segment[0].x, segment[0].y), (segment[1].x, segment[1].y)))\n # for segment in line_2.line_segments:\n # if segment[0] != segment[1]:\n # line_idx_map.append(line_2)\n # segments.append(((segment[0].x, segment[0].y), (segment[1].x, segment[1].y)))\n #\n # for collision_point in segments_intersections(segments).values():\n # for intersection in collision_point:\n # intersections.append([line_idx_map[i] for i in intersection])\n return intersections",
"def parallelogram_vertices_from_grouped_lines(lines):\n if len(lines) > 2:\n raise Exception(\"parallelogram finder \\\n called with too many lines\")\n c_1 = lines[0]\n c_2 = lines[1]\n intercepts = None\n for l1, l2 in list(zip(c_1, c_2)) + list(zip(c_1, c_2[::-1])):\n x = solve_for_intersection(np.array([l1, l2]))\n if intercepts is None:\n intercepts = np.array([x])\n else:\n intercepts = np.vstack((intercepts, x))\n return intercepts",
"def lineLineIntersectXY(l1,l2,inside=True,params=False):\n\n x1=l1[0][0]\n y1=l1[0][1]\n z1=l1[0][2]\n \n x2=l1[1][0]\n y2=l1[1][1]\n z2=l1[1][2]\n\n x3=l2[0][0]\n y3=l2[0][1]\n z3=l2[0][2]\n \n x4=l2[1][0]\n y4=l2[1][1]\n z4=l2[1][2]\n\n ## check for x,y planar consistency\n if abs(z2-z1) > epsilon or abs(z3-z1) > epsilon or abs(z4-z1) > epsilon:\n raise ValueError('lines not in same x-y plane')\n\n ## do lines intersect anywhere?\n denom=(x1-x2)*(y3-y4)-(y1-y2)*(x3-x4)\n if denom*denom < epsilon:\n return False\n\n ## the lines do intersect, so let's see if they intersect\n ## inside both line segments\n t = ((x1-x3)*(y3-y4) - (y1-y3)*(x3-x4))/denom\n u = -1 * ((x1-x2)*(y1-y3) - (y1-y2)*(x1-x3))/denom\n\n ## return the paramater space intersection\n if params:\n return [t,u]\n \n ## do we care about falling inside the line segments? if so,\n ## check that the intersection falls within\n if inside and ( t < 0.0 or t > 1.0 or u < 0.0 or u > 1.0):\n return False\n\n return [x1 + t*(x2-x1), y1+t*(y2-y1), z1, 1.0]",
"def find_line_intersection(self, point, vector, Ns=50):\n point = np.asarray(point, dtype=float)\n vector = np.asarray(vector, dtype=float)\n if point.size == 3:\n point = np.array([point[0], point[2]])\n if vector.size == 3:\n vector = np.array([vector[0], vector[2]])\n normal = np.array([-vector[1], vector[0]])\n normal /= norm(normal)\n with self.fix_evaluator():\n def f(t):\n t = clip(t, 0, np.pi)\n rel_vec = self(t) - point\n return normal.dot(rel_vec)\n f0 = f(0)\n if f0 == 0.0:\n return 0.0\n step = np.pi/Ns\n a = 0\n while f(a+step)*f0 > 0:\n if a == np.pi:\n raise RuntimeError(\"Line seems to not intersect curve.\")\n a = min(np.pi, a+step)\n return brentq(f, a=a, b=a+step)",
"def endpoints(line_points):\n neighbors = []\n for p in line_points:\n aux = 0\n for q in line_points:\n if np.linalg.norm(p-q) == 1:\n aux += 1\n neighbors.append(aux)\n e_points = np.where(np.array(neighbors)==1)\n return line_points[e_points]",
"def intersection(line1, line2):\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n A = np.array([[np.cos(theta1), np.sin(theta1)], [np.cos(theta2), np.sin(theta2)]])\r\n b = np.array([[rho1], [rho2]])\r\n x0, y0 = np.linalg.solve(A, b)\r\n x0, y0 = int(np.round(x0)), int(np.round(y0))\r\n return [[x0, y0]]",
"def line_intersect(line1, line2):\n b1 = (line1[1][1] - line1[0][1]) / (line1[1][0] - line1[0][0])\n b2 = (line2[1][1] - line2[0][1]) / (line2[1][0] - line2[0][0])\n a1 = line1[0][1] - b1 * line1[0][0]\n a2 = line2[0][1] - b2 * line2[0][0]\n\n if a1 == a2 and b1 == b2:\n return line1\n\n xi = - (a1 - a2) / (b1 - b2)\n yi = a1 + b1 * xi\n if (line1[0][0] - xi) * (xi - line1[1][0]) >= 0\\\n and (line2[0][0] - xi) * (xi - line2[1][0]) >= 0\\\n and (line1[0][1] - yi) * (yi - line1[1][1]) >= 0\\\n and (line2[0][1] - yi) * (yi - line2[1][1]) >= 0:\n return xi, yi\n return None",
"def intersection(line1, line2):\n\n rho1, theta1 = line1[0]\n rho2, theta2 = line2[0]\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n\n return [x0, y0]",
"def intersection_line_line(ab, cd):\n a, b = ab\n c, d = cd\n\n line_vector_1 = vector_from_points(a, b)\n line_vector_2 = vector_from_points(c, d)\n d_vector = cross_vectors(line_vector_1, line_vector_2)\n\n normal_1 = cross_vectors(line_vector_1, d_vector)\n normal_2 = cross_vectors(line_vector_2, d_vector)\n plane_1 = (a, normal_1)\n plane_2 = (c, normal_2)\n\n intx_point_line_1 = intersection_line_plane(ab, plane_2)\n intx_point_line_2 = intersection_line_plane(cd, plane_1)\n\n return [intx_point_line_1, intx_point_line_2]",
"def intersection(line1, line2):\n rho1, theta1 = line1\n rho2, theta2 = line2\n A = np.array([\n [np.cos(theta1), np.sin(theta1)],\n [np.cos(theta2), np.sin(theta2)]\n ])\n b = np.array([[rho1], [rho2]])\n x0, y0 = np.linalg.solve(A, b)\n x0, y0 = int(np.round(x0)), int(np.round(y0))\n return [x0, y0]",
"def intersection( l1, l2):\n #coordonees de la lignes 1\n x1, y1, x2, y2 = l1.point\n #coordonees de la lignes 2\n x3, y3, x4, y4 = l2.point\n #\n a1 = y2 - y1\n b1 = x1 - x2\n a2 = y4 - y3\n b2 = x3 - x4\n #\n c1 = a1 * x1 + b1 * y1\n #\n c2 = a2 * x3 + b2 * y3\n #\n det = a1 * b2 - a2 * b1\n assert det, \"lines are parallel\"\n return (1. * (b2 * c1 - b1 * c2) / det, 1. * (a1 * c2 - a2 * c1) / det)",
"def getIntersection(line1, line2):\r\n\r\n rho1, theta1 = line1[0]\r\n rho2, theta2 = line2[0]\r\n\r\n a = np.array([\r\n [np.cos(theta1), np.sin(theta1)],\r\n [np.cos(theta2), np.sin(theta2)]\r\n ])\r\n\r\n b = np.array([[rho1], [rho2]])\r\n\r\n x, y = np.linalg.solve(a, b)\r\n\r\n x = int(x[0])\r\n y = int(y[0])\r\n\r\n return [np.round(y), np.round(x)]",
"def lineintersect(line1,line2):\n a1, a2, b1, b2=line1[0],line1[1],line2[0],line2[1]\n\n s = np.vstack([a1,a2,b1,b2]) # s for stacked\n h = np.hstack((s, np.ones((4, 1)))) # h for homogeneous\n l1 = np.cross(h[0], h[1]) # get first line\n l2 = np.cross(h[2], h[3]) # get second line\n x, y, z = np.cross(l1, l2) # point of intersection\n if z == 0: # lines are parallel\n return (float('inf'), float('inf'))\n return (x/z, y/z)",
"def intersectionOfTwoLines(p1, v1, p2, v2):\n # if we transform multiple points in one go\n if len(v1.shape) == 2:\n a1 = np.einsum('ij,ij->i', v1, v1)\n a2 = np.einsum('ij,ij->i', v1, v2)\n b1 = -np.einsum('ij,ij->i', v2, v1)\n b2 = -np.einsum('ij,ij->i', v2, v2)\n c1 = -np.einsum('ij,j->i', v1, p1 - p2)\n c2 = -np.einsum('ij,j->i', v2, p1 - p2)\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]).transpose(2, 0, 1), np.array([c1, c2]).T)\n res = res[:, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)\n else: # or just one point\n a1 = np.dot(v1, v1)\n a2 = np.dot(v1, v2)\n b1 = -np.dot(v2, v1)\n b2 = -np.dot(v2, v2)\n c1 = -np.dot(v1, p1 - p2)\n c2 = -np.dot(v2, p1 - p2)\n try:\n res = np.linalg.solve(np.array([[a1, b1], [a2, b2]]), np.array([c1, c2]))\n except np.linalg.LinAlgError:\n return np.ones(3)*np.nan\n res = res[None, None, :]\n return np.mean([p1 + res[..., 0] * v1, p2 + res[..., 1] * v2], axis=0)[0]",
"def get_line_circle_intersections(A, B, C, r):\n Lx = B[0] - A[0]\n Ly = B[1] - A[1]\n Lz = B[2] - A[2]\n\n # stranger things\n D = Lx**2 + Ly**2\n E = 2 * ( Lx * (A[0] - C[0]) + Ly * (A[1] - C[1]) )\n F = (\n (A[0] - C[0])**2\n + (A[1] - C[1])**2\n - r**2\n )\n det = E**2 - 4 * D * F\n \n # declare null vectors\n P1 = [0, 0, 0]\n P2 = [0, 0, 0]\n t1 = t2 = None\n eps = .00001\n if ( not (D <= eps) or (det < 0) ):\n if det == 0:\n print \"tangential intersection found\",\n t1 = t2 = -E / (2*D)\n else:\n print \"pass-through intersection found\",\n t1 = ( (-E + math.sqrt(det)) / (2 * D) )\n t2 = ( (-E - math.sqrt(det)) / (2 * D) )\n P1[0] = A[0] + t1 * Lx\n P1[1] = A[1] + t1 * Ly\n P1[2] = A[2] + t1 * Lz\n P2[0] = A[0] + t2 * Lx\n P2[1] = A[1] + t2 * Ly\n P2[2] = A[2] + t2 * Lz\n else:\n print \"no intersections are available\",\n\n return P1, P2",
"def point_of_intersection(l, pz=distance):\r\n # Must fix the error here. Right now, any vector can have a point in the plane.\r\n # Must make it so that only vectors pointing in the planes direction has a point there\r\n # Can be done by checking whether d is positive or not.\r\n # This is to prevent vectors that point away from the detector to be counted\r\n # The definitions below assume that the detector is centred in the origin and its length is oriented along the z-axis.\r\n p0 = np.array([0,0,pz]) # Point on the plane\r\n l0 = np.array([0,0,0]) # Point on the line\r\n n = np.array([0,0,1]) # Normal vector of the plane\r\n d = np.dot(p0-l0, n)/np.dot(l, n)\r\n point = [i*d for i in l]\r\n return point",
"def intersection(v1, v2):\n x = v1[0:2] + v2[0:2]\n y = v1[2:4] + v2[2:4]\n if( x[3] == 0 ): #To avoid a divide by zero, if x[3] is 0 then we just solve for where lineA equals x[2]\n t1 = (x[2] - x[0])/\\\n (x[1])\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]\n\n else: \n t1 = ( y[0] - y[2] + (y[3]/x[3])*(x[2] - x[0]) )/\\\n ( (y[3]*x[1])/x[3] - y[1] )\n return [ v1[0] + v1[1]*t1, v1[2] + v1[3]*t1 ]",
"def intersection(self, L):\n if self.slope() == L.slope():\n return None\n intpt_xcood = (self.c * L.b - L.c * self.b)/(self.a * L.b - L.a * self.b)\n intpt_ycood = (self.c * L.a - L.c * self.a)/(self.b * L.a - L.b * self.a)\n\n return (intpt_xcood, intpt_ycood)",
"def intersection(L1, L2):\n D = L1[0] * L2[1] - L1[1] * L2[0]\n Dx = L1[2] * L2[1] - L1[1] * L2[2]\n Dy = L1[0] * L2[2] - L1[2] * L2[0]\n if D != 0:\n x = Dx / D\n y = Dy / D\n return x, y\n else:\n return False",
"def linePointXY(l,p,inside=True,distance=False,params=False):\n a=l[0]\n b=l[1]\n # check for degenerate case of zero-length line\n abdist = dist(a,b)\n if abdist < epsilon:\n #raise ValueError('zero-length line passed to linePointXY')\n print('zero-length line passed to linePointXY')\n return False\n\n if distance and params:\n raise ValueError('incompatible distance and params parameters passed to linePointXY')\n\n x0=p[0]\n y0=p[1]\n z0=p[2]\n x1=a[0]\n y1=a[1]\n z1=a[2]\n x2=b[0]\n y2=b[1]\n z2=b[2]\n\n ## check to see if all three points lie in the same x,y plane\n if not isXYPlanar([p,a,b]):\n raise ValueError('non-XY points in linePointXY call')\n return false\n # if abs(z1-z0) > epsilon or abs(z2-z0) > epsilon:\n # return False\n\n linedist = abs( ((y2-y1)*x0 - (x2-x1)*y0 + x2*y1 - y2*x1)/abdist)\n\n ## this is the fast case:\n if not inside and distance:\n return linedist\n \n ## find out where the intersection between the original line and a\n ## line defined by the point and an orthogonal direction vector\n ## is. We do this by constructing two direction vectors\n ## orthogonal to the orgiginal line scaled by the line distance,\n ## and adding them to the point in question. Assuming that the\n ## line distance is not zero, only one of these constructed points\n ## will fall on the line\n\n ## compute unit direction vector for original line\n dir = sub(b,a)\n dir = scale3(dir,1.0/mag(dir))\n\n ## compute two orthogonal direction vectors of length linedist\n ordir1 = scale3(orthoXY(dir),linedist)\n ordir2 = scale3(ordir1, -1.0)\n \n ## there are two possible intersection points\n pi1 = add(p,ordir1)\n pi2 = add(p,ordir2)\n\n ## compute distances\n d1pa = dist(a,pi1)\n d1pb = dist(pi1,b)\n d1 = d1pa+d1pb # \"triangle\" with pi1\n\n d2pa = dist(a,pi2)\n d2pb = dist(pi2,b)\n d2 = d2pa+d2pb # \"triangle\" with pi2\n\n ## the shortest \"triangle\" distance will signal the point that\n ## is actually on the line, even if that point falls outside\n ## the a,b line interval\n \n if params or not inside: # if we don't care about being inside the\n # line segment\n if d1 <= d2:\n if distance:\n return d1\n elif params:\n return d1pb/abdist\n else:\n return pi1\n else:\n if distance:\n return d2\n elif params:\n return d2pb/abdist\n else:\n return pi2\n \n \n ## if the closest point on the line to point p lies between\n ## the endpoints of the line, then either d1 or d2 will equal\n ## abdist. IF neither do, then we know that the closest point lies\n ## outside the endpoints\n\n if abs(d1-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi1\n\n if abs(d2-abdist) < epsilon:\n if distance:\n return linedist\n else:\n return pi2\n\n ## closest point is outside the interval. That means that the\n ## distance from point p to whichever endpoint is smaller is the\n ## closest distance\n\n d3 = dist(a,p)\n d4 = dist(b,p)\n\n if d3 < d4:\n if distance:\n return d3\n else:\n return a\n else:\n if distance:\n return d4\n else:\n return b",
"def find_intersections_line_line(line1: Line, line2: Line) -> {Point}:\n if line1.slope != line2.slope:\n if line1.slope is Infinity:\n # Line 1 is vertical, use its x value as the x value to evaluate line2\n x = line1.point1.x\n y = line2(x)\n elif line2.slope is Infinity:\n # Line 2 is vertical, use its x value as the x value to evaluate line1\n x = line2.point1.x\n y = line1(x)\n else:\n x = (line2.intercept - line1.intercept) / (line1.slope - line2.slope)\n y = line1(x)\n return {Point(x, y)}\n else:\n return {}",
"def closest_point_on_line(point, line):\n a, b = line\n ab = subtract_vectors(b, a)\n ap = subtract_vectors(point, a)\n c = vector_component(ap, ab)\n return add_vectors(a, c)",
"def intersect(nodeL, nodeR, city):\n PL = get_node_points(nodeL)\n P = get_city_points(city)\n PR = get_node_points(nodeR)\n\n equation1 = matrix.linear_eq((PL, P))\n equation2 = matrix.linear_eq((PR, P))\n\n for item in linear_list:\n lineP1 = get_node_points(item[0])\n lineP2 = get_node_points(item[1])\n temp = item[2][:]\n temp[2] = temp[2] * (-1)\n inter_points = get_intersect_points(equation1, temp)\n if inter_points == \"parallel\":\n check1 = False\n else:\n x = is_between(inter_points[0], PL[0], P[0])\n y = is_between(inter_points[0], lineP1[0], lineP2[0])\n check1 = x & y\n if check1 == True:\n return True\n\n for item in linear_list:\n lineP1 = get_node_points(item[0])\n lineP2 = get_node_points(item[1])\n temp = item[2][:]\n temp[2] = temp[2] * (-1)\n inter_points = get_intersect_points(equation2, temp)\n if inter_points == \"parallel\":\n check2 = False\n else:\n x = is_between(inter_points[0], PR[0], P[0])\n y = is_between(inter_points[0], lineP1[0], lineP2[0])\n check2 = x & y\n if check2 == True:\n return True\n return False",
"def lines_intersect_2d(line1_pt1, line1_pt2, line2_pt1, line2_pt2):\r\n return geometry.gmLinesIntersect(line1_pt1, line1_pt2, line2_pt1, line2_pt2)",
"def test_line_to_points(self):\n delta = 1\n # Create simple line\n L = numpy.array([[0, 0], [2, 0]])\n V = points_along_line(L, 1)\n\n expected_V = [[0, 0], [1, 0], [2, 0]]\n msg = ('Calculated points were %s, expected '\n '%s' % (V, expected_V))\n assert numpy.allclose(V, expected_V), msg\n\n # Not starting at zero\n # Create line\n L2 = numpy.array([[168, -2], [170, -2], [170, 0]])\n V2 = points_along_line(L2, delta)\n\n expected_V2 = [[168, -2], [169, -2], [170, -2],\n [170, -1], [170, 0]]\n msg = ('Calculated points were %s, expected '\n '%s' % (V2, expected_V2))\n assert numpy.allclose(V2, expected_V2), msg\n\n # Realistic polygon\n filename = '%s/%s' % (TESTDATA, 'indonesia_highway_sample.shp')\n layer = read_layer(filename)\n geometry = layer.get_geometry()\n\n P = geometry[0]\n C = points_along_line(P, delta)\n\n # Check against reference centroid\n expected_v = [[106.7168975, -6.15530081],\n [106.85224176, -6.15344678],\n [106.93660016, -6.21370279]]\n assert numpy.allclose(C, expected_v, rtol=1.0e-8)\n\n # Store points to file (to e.g. check with qgis)\n out_filename = unique_filename(prefix='test_points_along_line',\n suffix='.shp')\n V = Vector(data=None,\n projection=DEFAULT_PROJECTION,\n geometry=[C],\n name='Test points_along_line')\n V.write_to_file(out_filename)",
"def line_intersection(p0_x, p0_y, p1_x, p1_y, p2_x, p2_y, p3_x, p3_y):\n s10_x = p1_x - p0_x\n s10_y = p1_y - p0_y\n s32_x = p3_x - p2_x\n s32_y = p3_y - p2_y\n\n denom = s10_x * s32_y - s32_x * s10_y\n if denom == 0.0:\n return None # Collinear\n denomPositive = denom > 0\n\n s02_x = p0_x - p2_x\n s02_y = p0_y - p2_y\n s_numer = s10_x * s02_y - s10_y * s02_x\n if (s_numer < 0) == denomPositive:\n return None # No collision\n\n t_numer = s32_x * s02_y - s32_y * s02_x\n if (t_numer < 0) == denomPositive:\n return None # No collision\n\n if (s_numer > denom) == denomPositive or (t_numer > denom) == denomPositive:\n return 0 # No collision\n \n # Collision detected\n t = t_numer / denom\n i_x = p0_x + (t * s10_x)\n i_y = p0_y + (t * s10_y)\n\n return i_x, i_y"
] | [
"0.68824154",
"0.6712293",
"0.654148",
"0.64509463",
"0.64332",
"0.6398922",
"0.6371681",
"0.6350881",
"0.6311801",
"0.6303512",
"0.62909174",
"0.6267723",
"0.62216777",
"0.6182562",
"0.61743903",
"0.6087039",
"0.6078734",
"0.6011505",
"0.60104203",
"0.5943569",
"0.5912073",
"0.59040856",
"0.5883364",
"0.58811575",
"0.58544254",
"0.5848009",
"0.5833881",
"0.5815353",
"0.5814023",
"0.5809542"
] | 0.6902687 | 0 |
Initializes the scheduler to poll every five minutes and start it | def _init_scheduler(self):
self._sched = BackgroundScheduler()
self._sched.add_job(self._check_rain, trigger='cron', minute='*/5')
self._sched.start() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def initialize_scheduler(self):\n scheduler = BackgroundScheduler()\n scheduler.add_job(self.do, 'interval', minutes=1)\n scheduler.start()\n self.do()",
"def initialize_scheduler(self):\n scheduler = BackgroundScheduler()\n scheduler.add_job(self.do, 'interval', minutes=1)\n scheduler.start()\n self.do()",
"def schedule_start(self):\n self.initialize_scheduler()",
"def schedule_start(self):\n print(\"Scheduler for monitoring request is running\")\n self.initialize_scheduler()",
"def __run_schedules():\n while True:\n __scheduler.run()",
"def initialize_scheduler():\n\n with SCHED_LOCK:\n\n # Check if scheduler should be started\n start_jobs = not len(SCHED.get_jobs())\n\n # Update check\n github_minutes = CONFIG.CHECK_GITHUB_INTERVAL if CONFIG.CHECK_GITHUB_INTERVAL and CONFIG.CHECK_GITHUB else 0\n\n schedule_job(versioncheck.checkGithub, 'Check GitHub for updates',\n hours=0, minutes=github_minutes, seconds=0)\n\n # Our interval should never be less than 30 seconds\n monitor_seconds = CONFIG.MONITORING_INTERVAL if CONFIG.MONITORING_INTERVAL >= 30 else 30\n\n if CONFIG.PMS_IP and CONFIG.PMS_TOKEN:\n schedule_job(plextv.get_real_pms_url, 'Refresh Plex server URLs',\n hours=12, minutes=0, seconds=0)\n schedule_job(pmsconnect.get_server_friendly_name, 'Refresh Plex server name',\n hours=12, minutes=0, seconds=0)\n\n schedule_job(activity_pinger.check_recently_added, 'Check for recently added items',\n hours=0, minutes=0, seconds=monitor_seconds * bool(CONFIG.NOTIFY_RECENTLY_ADDED))\n schedule_job(activity_pinger.check_server_response, 'Check for Plex remote access',\n hours=0, minutes=0, seconds=monitor_seconds * bool(CONFIG.MONITOR_REMOTE_ACCESS))\n schedule_job(activity_pinger.check_server_updates, 'Check for Plex updates',\n hours=12 * bool(CONFIG.MONITOR_PMS_UPDATES), minutes=0, seconds=0)\n\n # If we're not using websockets then fall back to polling\n if not CONFIG.MONITORING_USE_WEBSOCKET or POLLING_FAILOVER:\n schedule_job(activity_pinger.check_active_sessions, 'Check for active sessions',\n hours=0, minutes=0, seconds=monitor_seconds)\n\n # Refresh the users list and libraries list\n user_hours = CONFIG.REFRESH_USERS_INTERVAL if 1 <= CONFIG.REFRESH_USERS_INTERVAL <= 24 else 12\n library_hours = CONFIG.REFRESH_LIBRARIES_INTERVAL if 1 <= CONFIG.REFRESH_LIBRARIES_INTERVAL <= 24 else 12\n\n if CONFIG.PMS_TOKEN:\n schedule_job(plextv.refresh_users, 'Refresh users list',\n hours=user_hours, minutes=0, seconds=0)\n\n if CONFIG.PMS_IP and CONFIG.PMS_TOKEN:\n schedule_job(pmsconnect.refresh_libraries, 'Refresh libraries list',\n hours=library_hours, minutes=0, seconds=0)\n\n backup_hours = CONFIG.BACKUP_INTERVAL if 1 <= CONFIG.BACKUP_INTERVAL <= 24 else 6\n\n schedule_job(database.make_backup, 'Backup PlexPy database',\n hours=backup_hours, minutes=0, seconds=0, args=(True, True))\n schedule_job(config.make_backup, 'Backup PlexPy config',\n hours=backup_hours, minutes=0, seconds=0, args=(True, True))\n\n # Start scheduler\n if start_jobs and len(SCHED.get_jobs()):\n try:\n SCHED.start()\n except Exception as e:\n logger.info(e)\n\n # Debug\n #SCHED.print_jobs()",
"async def run_scheduler(self):\n while True:\n interval = 60\n for s in await self.get_service('data_svc').locate('schedules'):\n now = datetime.now().time()\n diff = datetime.combine(date.today(), now) - datetime.combine(date.today(), s.schedule)\n if interval > diff.total_seconds() > 0:\n self.log.debug('Pulling %s off the scheduler' % s.name)\n sop = copy.deepcopy(s.task)\n sop.set_start_details()\n await self._services.get('data_svc').store(sop)\n self.loop.create_task(self.run_operation(sop))\n await asyncio.sleep(interval)",
"def start(self):\n\n self.loadConf()\n self.loadDrivers()\n self.loadFeeds()\n self.runScheduler()\n self.scheduler.print_jobs()\n self.scheduler.start()\n self.printConf(\"test\")\n print(\"scheduler started\")",
"def AutonomousPeriodic(self):\n Scheduler.GetInstance().Run()",
"def setup_periodic_tasks(sender, **kwargs):\n sender.add_periodic_task(60, scheduled_task.s(), name='A scheduled task')",
"def start(self) -> None:\n self.bus.subscribe(\"cache:ready\", self.revive)\n self.bus.subscribe(\"scheduler:add\", self.add)\n self.bus.subscribe(\"scheduler:persist\", self.persist)\n self.bus.subscribe(\"scheduler:remove\", self.remove)\n self.bus.subscribe(\"scheduler:upcoming\", self.upcoming)\n self.scheduler = sched.scheduler(time.time, time.sleep)\n cherrypy.process.plugins.Monitor.start(self)",
"async def _start_cron_task(self):\n pass",
"def tasks_start(sender, **kwargs):\n sender.add_periodic_task(5.0, get_heartbeat.s())\n sender.add_periodic_task(5.0, monitor_resource_util.s())",
"def start_scheduler():\n from security_monkey import scheduler\n scheduler.setup_scheduler()\n scheduler.scheduler.start()",
"def run_scheduled_tasks(self) -> None:\n self.scheduler.run(False)",
"def scheduler(self):\n while True:\n if self.sch.empty():\n self.log.info(\"No scheduled jobs detected. Entering idle state\")\n bits = bitarray()\n # generate random 7B bitarrays\n for _ in range(pow(self.cube_dim,3)):\n bits.append(bool(random.getrandbits(1)))\n self.sch.enter(self.transmit_freq, 4, self.transmit, argument=(0, bits), kwargs={})\n else:\n try:\n self.log.info(\"Scheduled jobs detected. Serving through scheduler runner\")\n self.sch.run()\n except IOError as exc:\n self.log.exception(\"\"\"Scheduler runner encountered an error while executing the \n top level event: %s\"\"\", exc)\n sys.exit(1) # exit with status code 1",
"async def start_periodically_refresh_appointments(): # pylint: disable=invalid-name\n await asyncio.sleep(60)\n await app[\"snct_scrapper\"].refresh_appointments_every_minutes()",
"def __init__(self, scheduler_name, task, interval, delay=0):\n\n self.scheduler_name = scheduler_name\n self.task = task\n self.interval = interval\n self.delay = delay\n self.scheduler = sched.scheduler(time.time, time.sleep)\n self.__running = False\n super(Scheduler, self).__init__(name=self.scheduler_name)\n self.setDaemon(True)",
"async def test_manual_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare manual interval schedule\n manual_schedule = ManualSchedule()\n manual_schedule.name = 'manual task'\n manual_schedule.process_name = 'sleep10'\n manual_schedule.repeat = datetime.timedelta(seconds=0)\n\n await scheduler.save_schedule(manual_schedule)\n manual_schedule = await scheduler.get_schedule(manual_schedule.schedule_id)\n\n await scheduler.queue_task(manual_schedule.schedule_id) # Added a task to the _scheduler queue\n await asyncio.sleep(5)\n\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)",
"def start_engine():\r\n traffic = TrafficCollector()\r\n weather = WeatherController()\r\n client = MongoClient()\r\n db = client.jam_forecaster\r\n\r\n scheduler = BlockingScheduler()\r\n scheduler.add_job(get_data, trigger='cron', hour='6-22', minute='*/5', second='0', max_instances=10, args=[traffic, weather, db])\r\n scheduler.start()",
"def startSchedule(self):\n DPxStartDinSched()",
"async def test_startup_schedule(self):\n await self.populate_test_data() # Populate data in foglamp.scheduled_processes\n\n scheduler = Scheduler(_address, _m_port)\n await scheduler.start()\n\n # Declare schedule startup, and execute\n startup_schedule = StartUpSchedule() # A scheduled process of the _scheduler\n startup_schedule.name = 'startup schedule'\n startup_schedule.process_name = 'sleep30'\n startup_schedule.repeat = datetime.timedelta(seconds=0) # set no repeat to startup\n\n await scheduler.save_schedule(startup_schedule)\n\n await asyncio.sleep(1)\n # Assert no tasks ar running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n await scheduler.get_schedule(startup_schedule.schedule_id) # ID of the schedule startup\n\n await self.stop_scheduler(scheduler)\n\n scheduler = Scheduler()\n await scheduler.start()\n\n await asyncio.sleep(2)\n # Assert only 1 task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n scheduler.max_running_tasks = 0 # set that no tasks would run\n await scheduler.cancel_task(tasks[0].task_id)\n\n await asyncio.sleep(2)\n\n # Assert no tasks are running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 0\n\n scheduler.max_running_tasks = 1\n\n await asyncio.sleep(2)\n\n # Assert a single task is running\n tasks = await scheduler.get_running_tasks()\n assert len(tasks) == 1\n\n await self.stop_scheduler(scheduler)",
"def __init__(self, *args, time_frame=3, **kargs):\n super(Scheduler, self).__init__(*args, **kargs)\n self.time_frame = time_frame\n self.running_jobs = queue.Queue()\n self.scheduler_manager = []\n self.task_manager = None",
"def __init__(self, interval=1.0):\n\n super(VirtualTimeSyncScheduler, self).__init__()\n self.interval = interval",
"def run(self):\n self.timer.start()\n \n while not Status.is_final(self.status):\n if self.request:\n self.handle_request()\n \n if self.status == Status.RUNNING:\n # Clean up orphaned schedules and undead schedulers.\n # Schedule.objects.orphaned().update(scheduler=None)\n # CronSchedule.objects.orphaned().update(scheduler=None)\n \n cron = CronSchedule.objects.unclaimed()[:SCHEDULER_LIMIT]\n simple = Schedule.objects.unclaimed()[:SCHEDULER_LIMIT]\n for schedule in itertools.chain(cron, simple):\n self.log.info('Claiming %s.' % schedule)\n schedule.scheduler = self\n schedule.save()\n self.add(schedule)\n if not Status.is_final(self.status):\n self.wait()\n self.request = Scheduler.objects.get(pk=self.pk).request",
"def __init__(self):\n\n super(VirtualTimeScheduler, self).__init__()\n self.event_queue = Queue.PriorityQueue()",
"def runScheduler(self):\n\n for source in self.sources:\n intervals = [\n int(self.sources[source]['metrics'][x]['interval']) for x\n in range(0, len(self.sources[source]['metrics']))]\n sourceInterval = self.gcd(intervals)\n self.sources[source]['sourceInterval'] = sourceInterval\n self.logger.debug(self.sources[source]['metrics'])\n\n self.scheduler.add_job(\n self.getDriverData, 'interval', args=[\n self.sources[source]['metrics']],\n seconds=sourceInterval)",
"def _event_loop(self):\n while True:\n self.scheduler.run(blocking=True)\n time.sleep(1)",
"def _configure_scheduler(self, scheduler: Scheduler, callback: Callable[[], None]) -> None:\n if self.is_cron:\n # Scheduler always executes at the exact minute to check for cron triggering\n scheduler.every().minute.at(\":00\").do(callback)\n else:\n # Only activate when an interval is specified\n # If not the only way is to trigger the poll by the api `trigger` endpoint\n if self._poll_interval:\n # Scheduler executes every interval seconds to execute the poll\n scheduler.every(self._poll_interval).seconds.do(callback)",
"def start_updater(self, interval, clbk):\n self._scheduler = BlockingScheduler(executors={\n 'default': {'type': 'threadpool', 'max_workers': 1}\n })\n\n def job():\n clbk(self.check_feeds())\n\n self._scheduler.add_job(job, trigger='interval', minutes=interval)\n self._scheduler.start()"
] | [
"0.7595585",
"0.7595585",
"0.7579649",
"0.749416",
"0.7138583",
"0.7092702",
"0.67534935",
"0.6750422",
"0.67337924",
"0.6687412",
"0.65673363",
"0.65599716",
"0.6548511",
"0.652253",
"0.6520828",
"0.64790255",
"0.6437538",
"0.6316568",
"0.631423",
"0.6258985",
"0.62449056",
"0.6239972",
"0.6224071",
"0.6185637",
"0.61843276",
"0.6071042",
"0.6060284",
"0.60583925",
"0.60531706",
"0.6035131"
] | 0.775051 | 0 |
Calculate the maximum amount of rain between now and now+minute Remote procedure to be called by the core of Domos | def rain_max(self, key=None, name=None, lat=None, lon=None, minute=0):
self.logger.info("added sensor for rain max %s : %s for %s minutes" % (lat, lon, minute))
if key and lat and lon and minute:
try:
minute = int(minute)
except:
return False
new_rain = Rain(key, lat, lon, minute, self._max)
self._rain.append(new_rain)
return True
else:
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def rain_rate(self, value):\n if not value:\n return 0\n return await self.rain(value * 60)",
"def max_humidity(self):\n return 60",
"def find_tim(self):\n start_max = 0\n finish_max = 0\n op_mode = self.op_number + ',' + self.mode_number\n for resource in self.resources:\n end_time = resource.usage[op_mode][\"start_time\"] + resource.usage[op_mode][\"duration\"]\n if end_time > finish_max:\n finish_max = end_time\n start_max = resource.usage[op_mode][\"start_time\"]\n self.tim = finish_max\n self.sim = start_max",
"def getRemainingRunTime(self) -> int:\n if not self.debug:\n self.myFieldFox.write(\"SYST:BATT:ARTT?\")\n ret = self.myFieldFox.read()\n else:\n ret = 60\n return ret",
"def max(self):\n\n return time_stat(self, stat=\"max\")",
"def evaluate(self, time) -> float:\n ...",
"def minutesSinceLastUpdate(self):\n if self.seenTimes == []:\n return 0\n latestTime = max(self.seenTimes)\n return int(self.timeCode())-int(latestTime)",
"def end_time(self) -> float:\r\n ...",
"def wall_time(self):",
"def normalized_total_time(p, max_time=3600000):\n if \"cdgp.wasTimeout\" in p and p[\"cdgp.wasTimeout\"] == \"true\":\n v = 3600000\n else:\n v = int(float(p[\"result.totalTimeSystem\"]))\n return max_time if v > max_time else v",
"def v(self):\n\n # TODO This translation formula works, but needs simplified.\n\n # PWM duration can go from 0 to 4095 with 4095 representing max rpm\n# print(\"MuleBot.v MuleBot.dcMotorPWMDurationLeft:\", MuleBot.dcMotorPWMDurationLeft)\n speed_percentage = float(MuleBot.dcMotorPWMDurationLeft) / 4095.0\n# print(\"speed_percentage: \", speed_percentage)\n\n rpm = speed_percentage * self.motorMaxRPM\n# print(\"rpm: \", rpm)\n\n secondsPerMinute = 60\n revs_per_second = rpm / secondsPerMinute\n# print(\"--revs_per_second\", revs_per_second)\n\n inches_per_rev = 2.0 * math.pi * MuleBot.WHEEL_RADIUS\n INCHES_PER_METER = 39.3701\n meters_per_rev = inches_per_rev / INCHES_PER_METER\n# print(\"--meters_per_rev\", meters_per_rev)\n\n meters_per_second = meters_per_rev * revs_per_second\n\n# print(\"--meters_per_second: \", meters_per_second)\n return meters_per_second",
"def remaining_ms():",
"def event_based_r_factor(self):\n # assign variables\n rain_energy = 'rain_energy'\n rain_volume = 'rain_volume'\n erosivity = 'erosivity'\n r_factor = 'r_factor'\n\n # derive rainfall energy (MJ ha^-1 mm^-1)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_energy}\"\n \"=0.29*(1.-(0.72*exp(-0.05*{rain_intensity})))\".format(\n rain_energy=rain_energy,\n rain_intensity=self.rain_intensity),\n overwrite=True)\n\n # derive rainfall volume\n \"\"\"\n rainfall volume (mm)\n = rainfall intensity (mm/hr)\n * (rainfall interval (min)\n * (1 hr / 60 min))\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_volume}\"\n \"= {rain_intensity}\"\n \"*({rain_interval}\"\n \"/60.)\".format(\n rain_volume=rain_volume,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive event erosivity index (MJ mm ha^-1 hr^-1)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{erosivity}\"\n \"=({rain_energy}\"\n \"*{rain_volume})\"\n \"*{rain_intensity}\"\n \"*1.\".format(\n erosivity=erosivity,\n rain_energy=rain_energy,\n rain_volume=rain_volume,\n rain_intensity=self.rain_intensity),\n overwrite=True)\n\n # derive R factor (MJ mm ha^-1 hr^-1 yr^1)\n \"\"\"\n R factor (MJ mm ha^-1 hr^-1 yr^1)\n = EI (MJ mm ha^-1 hr^-1)\n / (rainfall interval (min)\n * (1 yr / 525600 min))\n \"\"\"\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{r_factor}\"\n \"={erosivity}\"\n \"/({rain_interval}\"\n \"/525600.)\".format(\n r_factor=r_factor,\n erosivity=erosivity,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_energy',\n 'rain_volume',\n 'erosivity'],\n flags='f')\n\n return r_factor",
"def workflow(now, realtime):\n szx = 7000\n szy = 3500\n # Create the image data\n imgdata = np.zeros((szy, szx), 'u1')\n sts = now - datetime.timedelta(minutes=2)\n metadata = {'start_valid': sts.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'end_valid': now.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'product': 'a2m',\n 'units': '0.02 mm'}\n\n gribfn = mrms.fetch('PrecipRate', now)\n if gribfn is None:\n print((\"mrms_rainrate_comp.py NODATA for PrecipRate: %s\"\n ) % (now.strftime(\"%Y-%m-%dT%H:%MZ\"),))\n return\n\n # http://www.nssl.noaa.gov/projects/mrms/operational/tables.php\n # Says units are mm/hr\n fp = gzip.GzipFile(gribfn, 'rb')\n (_, tmpfn) = tempfile.mkstemp()\n tmpfp = open(tmpfn, 'wb')\n tmpfp.write(fp.read())\n tmpfp.close()\n grbs = pygrib.open(tmpfn)\n grb = grbs[1]\n os.unlink(tmpfn)\n os.unlink(gribfn)\n\n val = grb['values']\n # Convert into units of 0.1 mm accumulation\n val = val / 60.0 * 2.0 * 50.0\n val = np.where(val < 0., 255., val)\n imgdata[:, :] = np.flipud(val.astype('int'))\n\n (tmpfp, tmpfn) = tempfile.mkstemp()\n\n # Create Image\n png = Image.fromarray(np.flipud(imgdata))\n png.putpalette(mrms.make_colorramp())\n png.save('%s.png' % (tmpfn,))\n\n mrms.write_worldfile('%s.wld' % (tmpfn,))\n # Inject WLD file\n routes = \"c\" if realtime else \"\"\n prefix = 'a2m'\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot a%s %s \"\n \"gis/images/4326/mrms/%s.wld GIS/mrms/%s_%s.wld wld' %s.wld\"\n \"\") % (routes, now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n # Now we inject into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot a%s %s \"\n \"gis/images/4326/mrms/%s.png GIS/mrms/%s_%s.png png' %s.png\"\n \"\") % (routes, now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n if realtime:\n # Create 900913 image\n cmd = (\"gdalwarp -s_srs EPSG:4326 -t_srs EPSG:3857 -q -of GTiff \"\n \"-tr 1000.0 1000.0 %s.png %s.tif\") % (tmpfn, tmpfn)\n subprocess.call(cmd, shell=True)\n # Insert into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot c %s \"\n \"gis/images/900913/mrms/%s.tif GIS/mrms/%s_%s.tif tif' %s.tif\"\n \"\") % (now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n j = open(\"%s.json\" % (tmpfn,), 'w')\n j.write(json.dumps(dict(meta=metadata)))\n j.close()\n # Insert into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot c %s \"\n \"gis/images/4326/mrms/%s.json GIS/mrms/%s_%s.json json' \"\n \"%s.json\") % (now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n for suffix in ['tif', 'json', 'png', 'wld']:\n if os.path.isfile(\"%s.%s\" % (tmpfn, suffix)):\n os.unlink('%s.%s' % (tmpfn, suffix))\n\n os.close(tmpfp)\n os.unlink(tmpfn)",
"def rainfall_event(self):\n\n # assign local variables\n datatype = 'strds'\n increment = str(self.rain_interval)+' minutes'\n raster = 'raster'\n iterations = int(self.rain_duration)/int(self.rain_interval)\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n\n # create raster space time datasets\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n rain_duration=self.rain_duration,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # determine mode and run model\n if self.mode == 'simwe_mode':\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model\n # as a series of rainfall intervals in a rainfall event\n i = 1\n while i < iterations:\n\n # update the elevation\n evol.elevation = evolved_elevation\n print evol.elevation\n\n # update time\n evol.start = time\n print evol.start\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=self.rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n i = i+1\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"={evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)",
"def max_time(self):\n return self.time[np.argmax(self.flux)]",
"def get_heater_rod_status(self, simulation, el_load_file, actual_time, start_datetime, start_sim_inh, end_sim_inh):\n if simulation:\n # file based simulation - values are read from the file\n # file based simulation - values are read from the file\n # hour_of_year = 1\n simtime = int(math.floor(((actual_time - start_datetime).seconds / (60.0 * 15.0)) + start_sim_inh * 60.0 / 15.0)) # simulationstime in quarters = 15 minutes slots\n if (simtime >= 35040): # actual time exceeds the first year (there are 35 040 slots of 15 minutes in a year)\n simtime = simtime - math.floor(simtime / 35040) * 35040\n line1 = utils.get_significant_parts(el_load_file[simtime].rstrip().split(\" \"))\n y1 = float(utils.get_ith_column(2, line1))\n return y1 # as load from 0 to 1\n else:\n # real time calculation - values are received via MQTT? - dead for now\n return 0",
"def get_max_temp(self):\n self.max_temp = self.domain[1] * 2",
"def _get_time_interval_in_minutes(self):\n return self.visa.get_request_interval_in_minutes()",
"def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)",
"def solar(self, _mask, _target, _args):\n return self.get_sensor(\"pv_yield_now\")",
"def _psp_max_time(rise, decay, rise_power):\n return rise * np.log(1 + (decay * rise_power / rise))",
"def last_5_mins(conn,from_time):\n durTot = 0\n time = '{}'.format(from_time)\n query = ''' SELECT sum(duration) FROM events WHERE event_type = 'Cycle End' AND unix_time > ?'''\n c = conn.cursor()\n c.execute(query,(time,))\n (data, ) = c.fetchone()\n try:\n \t durTot = round(data,2)\n except:\n\tpass\n return durTot",
"def endTime(self) -> float:\n try: return self.times[-1]\n except IndexError: return 0.0",
"def max_time(self):\n #{{{ function to return time of last sample\n\n if self.maxtime == -1:\n return stock.now()\n\n return self.maxtime",
"def get_current_simulated_time(self):\n\n query = \"SELECT MAX(time) FROM patient_signal_values\"\n\n return self.mysql_obj.fetch_value(query)",
"def _calculate_monomer(self, raw=False):\n ta = self.TimeAxis\n # transition frequency\n om = self.system.elenergies[1]-self.system.elenergies[0]\n # transition dipole moment\n dm = self.system.dmoments[0,1,:]\n # dipole^2\n dd = numpy.dot(dm,dm)\n # natural life-time from the dipole moment\n gama = [0.0] #[-1.0/self.system.get_electronic_natural_lifetime(1)]\n sbi = self.system.get_SystemBathInteraction(self.TimeAxis)\n reorg = sbi.CC.get_reorganization_energy(0,0)\n \n if self.system._has_system_bath_coupling:\n # correlation function\n ct = self.system.get_egcf((0,1)) \n gt = self._c2g(ta,ct.data)\n tr = {\"ta\":ta,\"dd\":dd,\"om\":om-self.rwa,\"ct\":ct,\"gt\":gt,\"gg\":gama,\"fwhm\":0.0}\n else:\n tr = {\"ta\":ta,\"dd\":dd,\"om\":om-self.rwa,\"gg\":gama,\"fwhm\":0.0}\n \n if self._gauss_broad:\n tr[\"fwhm\"] = self.gauss\n\n tr[\"re\"] = reorg\n\n if self._gauss_broad:\n tr[\"fwhm\"] = self.gauss\n\n # calculates the one transition of the monomer \n data = numpy.real(self.one_transition_spectrum_abs(tr))\n data_fl = numpy.real(self.one_transition_spectrum_fluor(tr))\n\n \n for ii in range(2,self.system.Nb[1]+1):\n \n # transition frequency\n om = self.system.elenergies[ii]-self.system.elenergies[0]\n # transition dipole moment\n dm = self.system.dmoments[0,ii,:]\n # dipole^2\n dd = numpy.dot(dm,dm)\n # natural life-time from the dipole moment\n gama = [0.0] #[-1.0/self.system.get_electronic_natural_lifetime(ii)]\n \n if self.system._has_system_bath_coupling:\n # correlation function\n ct = self.system.get_egcf((0,ii)) \n gt = self._c2g(ta,ct.data)\n tr = {\"ta\":ta,\"dd\":dd,\"om\":om-self.rwa,\"ct\":ct,\"gt\":gt,\"gg\":gama,\"fwhm\":0.0}\n else:\n tr = {\"ta\":ta,\"dd\":dd,\"om\":om-self.rwa,\"gg\":gama,\"fwhm\":0.0}\n\n if self._gauss_broad: \n tr[\"fwhm\"] = self.gauss\n \n if self._gauss_broad:\n tr[\"fwhm\"] = self.gauss\n \n data += numpy.real(self.one_transition_spectrum_abs(tr))\n\n # we only want to retain the upper half of the spectrum\n Nt = len(self.frequencyAxis.data)//2 \n do = self.frequencyAxis.data[1]-self.frequencyAxis.data[0]\n st = self.frequencyAxis.data[Nt//2]\n # we represent the Frequency axis anew\n axis = FrequencyAxis(st,Nt,do)\n\n # multiply the spectrum by frequency (compulsory prefactor)\n if not raw:\n data = axis.data*data\n data_fl = (axis.data**3)*data_fl\n\n \n spect_abs = LinSpectrum(axis=axis, data=data)\n fluor_spect = LinSpectrum(axis=axis, data=data_fl)\n \n return {\"abs\": spect_abs, \"fluor\": fluor_spect}",
"def traffic_restoration_time_to_healed_or_new_endpoints_in_minutes(self) -> Optional[int]:\n return pulumi.get(self, \"traffic_restoration_time_to_healed_or_new_endpoints_in_minutes\")",
"def callback_max_wall_time_reached(self, event):\n self.perform_final_actions()\n self._max_wall_time_reached = True",
"def get_utilization(self, current_time):\n\n # If the server is not serving, not online, and was not serving this\n # time period, move the anchor.\n if (not self.is_serving) and \\\n (not self.online) and \\\n (self.utilization == 0) and \\\n len(self.queue) == 0:\n self.utilization_anchor = current_time\n\n # If the server is serving or has people waiting...\n elif self.is_serving or len(self.queue) != 0:\n if current_time == self.utilization_anchor:\n self.utilization = 1\n else:\n self.utilization = self.utilization + (\n (1-self.utilization) /\n ((current_time-self.utilization_anchor)*1.0))\n\n # If the server is online but is not doing anything...\n elif self.online and \\\n (not self.is_serving) and \\\n len(self.queue) == 0:\n if current_time == self.utilization_anchor:\n self.utilization = 0\n else:\n self.utilization = self.utilization + (\n (0-self.utilization) /\n ((current_time-self.utilization_anchor)*1.0))\n\n # If we are on the hour and the server has been online,\n # we flush the results and reset the utilization.\n if current_time != 0 and \\\n (current_time + 1) % _get_sec(\"01:00:00\", spd_factor) == 0 and \\\n self.online:\n self.utilization_series[_get_ttime(\n current_time + 1 - _get_sec(\"01:00:00\", spd_factor), \n spd_factor)] = self.utilization\n\n\n #self.output_queue.server_statistics.append(\n # [self.id,\n # self.utilization,\n # _get_ttime(current_time, spd_factor)])\n\n self.utilization = 0\n self.utilization_anchor = current_time + 1"
] | [
"0.59764725",
"0.5706513",
"0.5661375",
"0.5645471",
"0.5608999",
"0.56051016",
"0.55437905",
"0.55279684",
"0.5525821",
"0.5406212",
"0.53948325",
"0.5393251",
"0.5372555",
"0.5347693",
"0.5315714",
"0.5262582",
"0.5261224",
"0.5259729",
"0.52450776",
"0.524259",
"0.52310866",
"0.52239656",
"0.52212614",
"0.5218735",
"0.5208346",
"0.52048355",
"0.5193882",
"0.51810914",
"0.5163793",
"0.5154705"
] | 0.5800529 | 1 |
Returns all the session names for a participant | def filter_by_participant (self, participant):
sparql_results = self.query ("""
select distinct ?rs ?session ?name ?number ?pid ?sitename
where {
BIND (<%s> AS ?participant)
?rs rdf:type austalk:RecordedSession .
?rs olac:speaker ?participant .
?participant austalk:id ?pid .
?participant austalk:recording_site ?site .
?site rdfs:label ?sitename .
?rs austalk:prototype ?session .
?session austalk:name ?name .
?session austalk:id ?number .
}
ORDER BY ?name""" % participant.identifier)
results = []
for result in sparql_results["results"]["bindings"]:
results.append (Session (
client = self.client,
identifier = result["rs"]["value"],
prototype = result["session"]["value"],
name = result["name"]["value"],
number = result["number"]["value"],
site = result["sitename"]["value"],
participantId = result["pid"]["value"]))
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filtered_session_names(self):\n return list(self.stage.filtered_sessions.keys())",
"def search_sessions(name: str, provider: Optional[str] = None) -> List[str]:\n sessions = session_list(provider=provider).sessions\n name = name.lower()\n return [s.id for s in sessions if s.id.lower().startswith(name)]",
"def teammates_player_names(self):\n return [p.name for p in self.teammates]",
"def name_list(qbo_session):\n\n return qbo_session.name_list()",
"def get_player_names(self):\n names = [user['name'] for user in self.server.status().raw['players']['sample']]\n return names",
"def all (self):\n sparql_results = self.query (\"\"\"\n select distinct ?rs ?session ?name ?number ?pid ?sitename\n where {\n \n ?rs rdf:type austalk:RecordedSession .\n ?rs olac:speaker ?participant .\n \n ?participant austalk:id ?pid .\n ?participant austalk:recording_site ?site .\n ?site rdfs:label ?sitename .\n \n ?rs austalk:prototype ?session .\n ?session austalk:name ?name .\n ?session austalk:id ?number .\n }\n ORDER BY ?name\"\"\")\n\n results = []\n\n for result in sparql_results[\"results\"][\"bindings\"]:\n\n results.append (Session (\n client = self.client,\n identifier = result[\"rs\"][\"value\"],\n prototype = result[\"session\"][\"value\"],\n name = result[\"name\"][\"value\"],\n number = result[\"number\"][\"value\"],\n site = result[\"sitename\"][\"value\"],\n participantId = result[\"pid\"][\"value\"]))\n\n return results",
"def getSessionByUsername(self, username):\n match = []\n for session in self.sessions:\n if (session.identifier[1] == username):\n match.append(session)\n return match",
"def get_speaker_sessions(self, request):\n return self.session_service.get_speaker_sessions(\n request.websafeSpeakerKey)",
"def get_users_name(self, session) -> Tuple[int, str, str]:\n users = (\n session.query(User.chat_id, User.first_name, User.last_name)\n .filter(User.is_admin==False)\n .all()\n )\n return users",
"def list(self):\n return {str(k): v for k, v in self.rpc.call(MsfRpcMethod.SessionList).items()} # Convert int id to str",
"def _getSessionsBySpeaker(self, request):\n # Ensure that the speaker key is valid and that the speaker exists\n speaker = _getEntityByWebsafeKey(request.websafeSpeakerKey, 'Speaker')\n # Return all of the speaker's sessions\n return ndb.get_multi(speaker.sessions)",
"def speaker_list(self):\n return \", \".join(str(speaker.person) for speaker in self.speakers.all())",
"def session_list(self, endpoint_name=None):\n if endpoint_name is None:\n _, body = self.request('/v1.1/endpoint/sessions', 'GET')\n else:\n _, body = self.request('/v1.1/endpoints/%s/sessions' % endpoint_name, 'GET')\n return body",
"def sessions(self):\n return utils.listItems(self, '/status/sessions')",
"async def list(self):\n all = (await self.get(self.profiles_list))['results']\n log(\"retrieved participant metadata.\")\n return all or []",
"def get_names(self):\r\n names = []\r\n for p in self.people:\r\n names.append(p.get_name())\r\n return names",
"def get_active_sessions():\n\n # The output changes based on locales, force it to be YY-MM-DD\n # for the benefit of split()\n os.environ['LANG'] = 'en_GB.utf8'\n try:\n output = subprocess.check_output(['who']).rstrip()\n except subprocess.CalledProcessError:\n print 'UNKNOWN: unable to invoke who'\n sys.exit(NAGIOS_UNKNOWN)\n\n # Nothing to process\n if not output:\n return {}\n\n sessions = {}\n for line in output.split(\"\\n\"):\n fields = line.split()\n sessions[fields[1]] = {\n 'user': fields[0],\n 'date': fields[2],\n 'time': fields[3],\n 'source': fields[4][1:-1] if len(fields) >= 5 else None,\n }\n\n return sessions",
"def find_sessions(sfe):\n print(\"-\" * 20 + \" find_sessions started\")\n isessions = sfe.list_iscsisessions()\n json_isessions = isessions.to_json()\n return json_isessions",
"def get_exercise_recording_full_names(self):\n full_names = set()\n for er in self.exercise_recordings:\n full_names.add(er.full_name)\n return full_names",
"def filter_by_session (self, site_id, participant_id, session_id):\n\n query = \"\"\"\n select distinct * where {\n\n BIND (\"%s\" AS ?pid)\n BIND (\"%s\" as ?sessionid)\n \n ?participant austalk:id ?pid .\n ?rc rdf:type austalk:RecordedComponent .\n ?rc olac:speaker ?participant .\n ?rc austalk:session ?sessionid .\n\n ?rc austalk:prototype ?prototype .\n ?prototype austalk:name ?name .\n ?prototype austalk:shortname ?shortname .\n \n optional { ?rc austalk:audiorating ?audiorating .}\n optional { ?rc austalk:videorating ?videorating .}\n optional { ?rc austalk:comment ?comment .}\n \n }\n \"\"\" % (participant_id, session_id)\n\n\n sparql_results = self.query (query)\n results = []\n for result in sparql_results[\"results\"][\"bindings\"]:\n\n for field in ['audiorating', 'videorating', 'comment']:\n if field not in result:\n result[field] = {'value': ''}\n\n comp = Component (\n client = self.client,\n identifier = result[\"rc\"][\"value\"],\n participantId = result[\"pid\"][\"value\"],\n sessionId = result[\"sessionid\"][\"value\"],\n prototype = result[\"prototype\"][\"value\"],\n audiorating = result[\"audiorating\"][\"value\"],\n videorating = result[\"videorating\"][\"value\"],\n comment = result[\"comment\"][\"value\"],\n name = result[\"name\"][\"value\"],\n componentId = result[\"shortname\"][\"value\"],\n site = site_id,\n )\n comp.details()\n results.append(comp)\n return results",
"def sessions(self):\n return list(Session.get_sessions(self))",
"def search_session_providers(name: str) -> List[str]:\n from renku.core.plugin.session import get_supported_session_providers\n\n name = name.lower()\n return [p.name for p in get_supported_session_providers() if p.name.lower().startswith(name)]",
"def get_conference_sessions(self, request):\n return self.session_service.get_conference_sessions(\n request.websafeConferenceKey)",
"def all_participants_data(study_name: str):\n # get all participants' name-ids\n participants = CC_driver.get_all_participants(study_name)\n\n if len(participants) > 0:\n participants_rdd = CC_driver.sc.parallelize(participants)\n results = participants_rdd.map(\n lambda participant: diagnose_pipeline(participant[\"identifier\"], CC_worker, config))\n results.count()\n else:\n print(study_name, \"- Study contains no participant.\")",
"def findSessions(self, channel):\n found = []\n for ss in self.sessions:\n try:\n _channel = channel.decode(ss.encoding)\n if _channel == ss.name:\n found.append(ss)\n if ss.matchNick(_channel):\n found.append(ss)\n except UnicodeDecodeError:\n continue\n if found == []:\n found = [self.defaultSession]\n return found",
"def get_sessions(self):\n\n return self.all_sessions",
"def getNames(self) -> List[unicode]:\n ...",
"def participants_group_name(self):\n return self.short_name+\"_participants\"",
"def get_sessions_list():\n sessions = Session.query.all()\n result = sessions_schema.dump(sessions).data\n return jsonify({'status': 'success', 'message': None, 'data': result}), 200",
"def get_sessions(sessions, time_feat_dict):\n filt = Session.filter_time_func(time_feat_dict)\n return [s for s in sessions if filt(shortstr2time(s['start']))]"
] | [
"0.6723869",
"0.63031816",
"0.62869734",
"0.62178284",
"0.6213438",
"0.6011497",
"0.60053223",
"0.59663093",
"0.5954626",
"0.5935582",
"0.5870372",
"0.5859776",
"0.5857662",
"0.5844726",
"0.57861197",
"0.57147163",
"0.5673998",
"0.56739765",
"0.56506336",
"0.5621848",
"0.5617745",
"0.5616195",
"0.55978554",
"0.55789727",
"0.5577217",
"0.557043",
"0.5566993",
"0.5564511",
"0.5552853",
"0.5548814"
] | 0.67719805 | 0 |
Returns all the session names for a site identified by site label | def filter_by_site (self, label):
sparql_results = self.query ("""
select distinct ?rs ?session ?name ?number ?pid
WHERE {
?rs rdf:type austalk:RecordedSession .
?rs olac:speaker ?participant .
?participant austalk:id ?pid .
?participant austalk:recording_site ?site .
?site rdfs:label "%s" .
?rs austalk:prototype ?session .
?session austalk:name ?name .
?session austalk:id ?number .
}
ORDER BY ?name""" % label)
results = []
for result in sparql_results["results"]["bindings"]:
results.append (Session (
client = self.client,
identifier = result["rs"]["value"],
prototype = result["session"]["value"],
name = result["name"]["value"],
number = result["number"]["value"],
# site = result["sitename"]["value"],
participantId = result["pid"]["value"]))
return results | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def filtered_session_names(self):\n return list(self.stage.filtered_sessions.keys())",
"def search_sessions(name: str, provider: Optional[str] = None) -> List[str]:\n sessions = session_list(provider=provider).sessions\n name = name.lower()\n return [s.id for s in sessions if s.id.lower().startswith(name)]",
"def sessions(self):\n return utils.listItems(self, '/status/sessions')",
"def name_list(qbo_session):\n\n return qbo_session.name_list()",
"def find_sessions(sfe):\n print(\"-\" * 20 + \" find_sessions started\")\n isessions = sfe.list_iscsisessions()\n json_isessions = isessions.to_json()\n return json_isessions",
"def getSessionByUsername(self, username):\n match = []\n for session in self.sessions:\n if (session.identifier[1] == username):\n match.append(session)\n return match",
"def list_sites():\n result = []\n querystring = 'select sitename from {};'.format(TABLES[0]))\n res = execute_query(querystring)\n if res:\n result = [x[0] for x in res]\n return result",
"def describe_sessions(StackName=None, FleetName=None, UserId=None, NextToken=None, Limit=None, AuthenticationType=None):\n pass",
"def get_site_names(self, include = ['*'], exclude = []):\n \n raise NotImplementedError('get_site_names')",
"def get_all_site_names(_current_parser=None):\n parser = _get_parser(_current_parser)\n return [site for site in parser if site != \"DEFAULT\"]",
"def getSEsForSite( siteName ):\n result = getSiteSEMapping()\n if not result['OK']:\n return result\n\n mapping = result['Value']\n if siteName in mapping:\n return S_OK( mapping[siteName] )\n\n return S_OK( [] )",
"def list(self):\n return {str(k): v for k, v in self.rpc.call(MsfRpcMethod.SessionList).items()} # Convert int id to str",
"async def fetch_site_devices(ipf: IPFabricClient, site: str) -> List:\n request = {\n TableFields.snapshot: ipf.active_snapshot,\n TableFields.columns: [\"hostname\"],\n TableFields.filters: ipf.parse_filter(f\"siteName = {site}\"),\n }\n res = await ipf.api.post(url=URIs.devices, json=request)\n res.raise_for_status()\n return [rec[\"hostname\"] for rec in res.json()[\"data\"]]",
"def getSessionCount(self):\n logger.debug('Getting the number of sessions discovered...')\n return get_text(get_element_by_css(\"span[data-nsmodule='sessionsdiscovered']\"))",
"def sessions(self):\n logger.debug(\"Get sessions\")\n return self._raw_api.sessions.get()",
"def search_session_providers(name: str) -> List[str]:\n from renku.core.plugin.session import get_supported_session_providers\n\n name = name.lower()\n return [p.name for p in get_supported_session_providers() if p.name.lower().startswith(name)]",
"def sessions(self):\n\n return File.session_choices",
"def sessions(self):\n return list(Session.get_sessions(self))",
"def get_sites():\n sites = [ x.get('siteid') for x in Schedconfig.objects.values('siteid').distinct() ]\n locale.setlocale(locale.LC_ALL, '')\n sites = sorted(sites, key=locale.strxfrm)\n return sites",
"def sessions(self, *args, **kwargs):\r\n return self._get('Sessions', *args, **kwargs)",
"def get_active_sessions():\n\n # The output changes based on locales, force it to be YY-MM-DD\n # for the benefit of split()\n os.environ['LANG'] = 'en_GB.utf8'\n try:\n output = subprocess.check_output(['who']).rstrip()\n except subprocess.CalledProcessError:\n print 'UNKNOWN: unable to invoke who'\n sys.exit(NAGIOS_UNKNOWN)\n\n # Nothing to process\n if not output:\n return {}\n\n sessions = {}\n for line in output.split(\"\\n\"):\n fields = line.split()\n sessions[fields[1]] = {\n 'user': fields[0],\n 'date': fields[2],\n 'time': fields[3],\n 'source': fields[4][1:-1] if len(fields) >= 5 else None,\n }\n\n return sessions",
"def tracker_list():\n trackers = db.execute(\"SELECT DISTINCT name FROM trackers\")\n names = [tup[0] for tup in trackers.fetchall()]\n return names",
"def list_secgroups(self, name=None):",
"def get_namespaces(self, label_selector=None):\n return self.core_client.list_namespace(label_selector=label_selector)",
"def get_ls_session_dates(soup):\n ls_session_dates = soup.find(\n \"select\", attrs={\n \"id\": \"ContentPlaceHolder1_ddlSession\"}).find_all(\"option\")\n return [ls.text for ls in ls_session_dates]",
"def getTrackingPluginNames(context):\n\n gsm = getGlobalSiteManager()\n global_plugins = set([p.name for p in gsm.registeredAdapters()\n if p.provided == IAnalyticsTrackingPlugin])\n\n lsm = getSite().getSiteManager()\n local_plugins = set([p.name for p in lsm.registeredAdapters()\n if p.provided == IAnalyticsTrackingPlugin])\n\n values = sorted(list(global_plugins | local_plugins))\n return SimpleVocabulary.fromValues(values)",
"def get_sessions(self):\n\n return self.all_sessions",
"def findSessions(self, channel):\n found = []\n for ss in self.sessions:\n try:\n _channel = channel.decode(ss.encoding)\n if _channel == ss.name:\n found.append(ss)\n if ss.matchNick(_channel):\n found.append(ss)\n except UnicodeDecodeError:\n continue\n if found == []:\n found = [self.defaultSession]\n return found",
"def get_list_hub(showOnly=False):\n # start Requests session\n sc = requests.Session()\n\n # import cookies from Firefox\n sc.cookies.update(get_cookies('imhsc.imhadmin.net'))\n\n # send request\n vpx = sc.post('https://imhsc.imhadmin.net/index.php?v=Hub')\n\n # check if login failed\n check_sc_login(vpx.text)\n\n # parse with BS4\n bs = BeautifulSoup(vpx.text, \"xml\")\n\n # server=0 net=2\n slist = []\n for trr in bs.tbody.find_all('tr'):\n try:\n tsrv = trr.find_all('td')[0].text.strip()\n except:\n continue\n slist.append(tsrv)\n if not showOnly:\n print(tsrv)\n\n return slist",
"def getNames():\r\n return [\"Server1\", \"Server2\", \"Client1\", \"Client2\"]"
] | [
"0.6083844",
"0.5573907",
"0.5564926",
"0.54466736",
"0.5325585",
"0.53207505",
"0.5316571",
"0.527415",
"0.5223453",
"0.5201878",
"0.5165152",
"0.514091",
"0.5095732",
"0.50864303",
"0.5076436",
"0.50540745",
"0.50449497",
"0.5036865",
"0.50282997",
"0.50101763",
"0.49757937",
"0.49587727",
"0.4914938",
"0.49044168",
"0.4901336",
"0.48946995",
"0.4886746",
"0.4883055",
"0.48687926",
"0.4866625"
] | 0.71040463 | 0 |
Delete one or more keys specified by ``keys`` | async def delete(self, *keys, **kwargs):
def gen_keys(keys):
all_keys = []
for key in keys:
if isinstance(key, list):
all_keys += gen_keys(keys=key)
else:
all_keys.append(key)
return all_keys
all_keys = gen_keys(keys)
for key in all_keys:
await self._client_conn.hdel(key=self.name, field=key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete_many(self, keys):\n raise NotImplementedError()",
"def delete_many(self, keys):\n try:\n if keys:\n self._cache.delete(*map(self.prepare_key, keys))\n except Exception as err:\n return self.warn_or_error(err)",
"def delete(cls, *keys):\n todelete = []\n namespace, kind, member = Schema.Get(cls)\n for key in keys:\n assert isinstance(key, str)\n todelete.append(Key(namespace, kind, key)) \n Lisa.delete(*todelete)",
"def delete_many(self, keys):\n return self.delete_many_values(keys)",
"def delete_many(self, *keys):\n self.collection.remove({'_id': {'$in': keys}})\n return True",
"def Delete(keys):\n keys, multiple = NormalizeAndTypeCheckKeys(keys)\n\n if multiple and not keys:\n return\n\n req = datastore_pb.DeleteRequest()\n req.key_list().extend([key._Key__reference for key in keys])\n\n tx = _MaybeSetupTransaction(req, keys)\n\n resp = datastore_pb.DeleteResponse()\n try:\n apiproxy_stub_map.MakeSyncCall('datastore_v3', 'Delete', req, resp)\n except apiproxy_errors.ApplicationError, err:\n raise _ToDatastoreError(err)",
"def remove_keys(data: dict, keys: list[str]) -> None:\n for k in keys:\n _ = data.pop(k, None)",
"def delete(cls, keys, pipe=None):\n with cls._pipe(pipe) as pipe:\n core = cls.core(pipe)\n core.delete(*keys)",
"def delete_many(self, keys, version=None, client=None):\r\n\r\n if client is None:\r\n client = self.get_client(write=True)\r\n\r\n if not keys:\r\n return\r\n\r\n keys = [self.make_key(k, version=version) for k in keys]\r\n try:\r\n return client.delete(*keys)\r\n except ConnectionError:\r\n raise ConnectionInterrupted(connection=client)",
"def remove_keys(_dict, _keys):\n if isinstance(_keys, str):\n if _keys in _dict:\n del _dict[_keys]\n else:\n for _key in _keys:\n _dict = remove_keys(_dict, _key)\n return _dict",
"def delete(self, *keys: KeyT) -> ResponseT:\n return self._split_command_across_slots(\"DEL\", *keys)",
"def delete(self, keys: List[K]) -> List[bool]:\n raise NotImplementedError('delete must be reimplemented in concrete implementation')",
"def delete(\n self, keys: Optional[Iterable[Text]] = None\n ) -> Dict[Text, StateDictInterface]:\n records = self.execute()\n assert isinstance(records, dict)\n\n self.store.delete_many(records.values(), keys=keys)\n return records",
"def del_quiet(dic, *keys):\n for key in keys:\n try:\n del dic[key]\n except KeyError:\n pass",
"def deleteTable(*keys):\r\n\treturn getGameData().delTable(*keys)",
"def remove_keys(d, keys):\n pp = deepcopy(d)\n if isinstance(keys, (list, tuple)):\n for k in keys:\n pp.pop(k, None)\n else:\n pp.pop(keys, None)\n return pp",
"def del_dict_keys(dict_in, keys):\n for key in keys:\n if key in dict_in:\n del dict_in[key]\n return dict_in",
"def delete_keys_from_dict(d, keys):\n if isinstance(d, dict):\n for field in d.keys():\n if field in keys:\n del d[field]\n elif isinstance(d[field], dict) or isinstance(d[field], list) or isinstance(d[field], set):\n delete_keys_from_dict(d[field], keys)\n elif isinstance(d, dict) or isinstance(d, list) or isinstance(d, set):\n for i in d:\n delete_keys_from_dict(i, keys)",
"def multi_del(self, keys, no_update_log=False):\n # TODO: write better documentation: why would user need the no_update_log param?\n opts = (no_update_log and TyrantProtocol.RDBMONOULOG or 0)\n if not isinstance(keys, (list, tuple)):\n keys = list(keys)\n\n wait(self.proto.misc(\"outlist\", keys, opts))",
"def delete_keys_from_dict(dictionary, list_keys):\n for k in list_keys:\n try:\n del dictionary[k]\n except KeyError:\n pass\n for v in dictionary.values():\n if isinstance(v, dict):\n delete_keys_from_dict(v, list_keys)\n\n return dictionary",
"def del_seqs(self, keys):\n for j in range(len(keys)):\n del self._d_seqs[keys[j]]\n self._num_seqs = int(len(self._d_seqs))\n self._d_seqs = self._d_seqs\n self._seqs = list(self._d_seqs)",
"def delete_many(self, keys, version=None):\r\n res = 0\r\n for key in [self.make_key(k, version=version) for k in keys]:\r\n client = self.get_server(key)\r\n res += self.delete(key, client=client)\r\n return res",
"def except_keys(dic, *keys):\n ret = dic.copy()\n for key in keys:\n try:\n del ret[key]\n except KeyError:\n pass\n return ret",
"def rem(self, keys: Union[str, Iterable]):\n return(self.db.delVal(db=self.sdb, key=self._tokey(keys)))",
"def rem(self, keys: Union[str, Iterable]):\n return(self.db.delVal(db=self.sdb, key=self._tokey(keys)))",
"def rem(self, keys: Union[str, Iterable]):\n return(self.db.delVal(db=self.sdb, key=self._tokey(keys)))",
"def remove_keys(_dict, keys):\n if not _dict:\n return None\n new = dict(_dict)\n for key in keys:\n new.pop(key, None)\n return new",
"def try_del(d, keys):\n for key in keys:\n try:\n del d[key]\n except KeyError:\n pass",
"def remove_keys_from_dict(dictionary, keys):\n\n # Copy dictionary\n dictionary_updated = dictionary.copy()\n try:\n [dictionary_updated.pop(key) for key in keys]\n except:\n print(\"Error: No ratio and sampling strategy parameters\")\n return dictionary_updated",
"def keep_in_dictionary(self,dictionary,*keys):\r\n remove_keys = [k for k in dictionary if k not in keys]\r\n self.remove_from_dictionary(dictionary,*remove_keys)"
] | [
"0.8140219",
"0.8091643",
"0.80413496",
"0.8027012",
"0.7771675",
"0.7751918",
"0.7727257",
"0.7682047",
"0.7680383",
"0.7599218",
"0.7451378",
"0.7443246",
"0.73895985",
"0.7378481",
"0.7349482",
"0.7295384",
"0.72044706",
"0.7172078",
"0.71646804",
"0.711923",
"0.7093819",
"0.70718133",
"0.7067456",
"0.70660895",
"0.70660895",
"0.70660895",
"0.7063682",
"0.7059187",
"0.69828",
"0.6960323"
] | 0.8353874 | 0 |
It should create color shapes in the given data directory. | def test_create_shapes(data_dir):
dataset.create_shapes(10, 10, 1, data_dir=data_dir)
img_path = os.path.join(data_dir, "ellipse/0.png")
assert os.path.exists(img_path)
img = imageio.imread(img_path)
assert img.shape == (10, 10, 4) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_shapes_grayscale(data_dir):\n dataset.create_shapes(10, 10, 1, channels=1, data_dir=data_dir)\n img_path = os.path.join(data_dir, \"ellipse/0.png\")\n assert os.path.exists(img_path)\n img = imageio.imread(img_path)\n assert img.shape == (10, 10)",
"def load_shapes(self,count,img_folder,mask_folder,imglist,dataset_root_path):\n self.add_class(\"shapes\",1,\"red_s\")\n self.add_class(\"shapes\",2,\"red_m\")\n self.add_class(\"shapes\",3,\"red_l\")\n self.add_class(\"shapes\",4,\"yellow_s\")\n self.add_class(\"shapes\",5,\"yellow_m\")\n self.add_class(\"shapes\",6,\"yellow_l\")\n self.add_class(\"shapes\",7,\"green_s\")\n self.add_class(\"shapes\",8,\"green_m\")\n self.add_class(\"shapes\",9,\"green_l\")\n self.add_class(\"shapes\",10,\"blue_s\")\n self.add_class(\"shapes\",11,\"blue_m\")\n self.add_class(\"shapes\",12,\"blue_l\")\n self.add_class(\"shapes\",13,\"orange_s\")\n self.add_class(\"shapes\",14,\"orange_m\")\n self.add_class(\"shapes\",15,\"orange_l\")\n\n for i in range(count):\n filestr = imglist[i].split(\".\")[0]\n package_id = (int(filestr)-1)//30 + 1\n package_path = \"package%s\" % package_id\n # print(filestr)\n if mask_folder == 'mask/training_data/':\n mask_path = mask_folder+package_path +\"/image%s\" % filestr\n # print('====>',mask_path)\n csv_path_str = \"training_data/\"+package_path\n path_to_img = img_folder+'/'+package_path+ \"/%s.png\" % filestr\n else:\n mask_path = mask_folder + \"/image%s\" % filestr\n csv_path_str = img_folder\n path_to_img = img_folder+ \"/%s.png\" % filestr\n label_index = filestr\n # path_to_img = img_folder+ \"/%s.png\" % filestr\n # print(path_to_img)\n cv_img = cv2.imread(path_to_img)\n # print(cv_img)\n # resize_img = cv2.resize(cv_img,(384,384),interpolation = cv2.INTER_AREA)\n self.add_image(\"shapes\",image_id=i, path=path_to_img, csv_path=csv_path_str, width=cv_img.shape[1], height=cv_img.shape[0], mask_path=mask_path, label_index=label_index)",
"def makeDataset(numberOfTrials, data_type):\n\n\tdata_folder = data_type + \"_images\"\n\tlabel_file = os.path.join(dataset_params.data_path, data_type + \"_lables.csv\")\n\n\tutils.create_directory(dataset_params.data_path)\n\tutils.create_directory(os.path.join(dataset_params.data_path, data_folder))\n\n\tallowedRadius = utils.defineShapePerimeter()\n\tcolorsRGB = utils.defineColorValues()\n\tshapeDict = utils.defineShapeSides()\n\tpadding = dataset_params.padding\n\n\tnum = 0\n\toutput_images = [[\"figNum\", \"shape\", \"color\", \"size\", \"background\", \"quadrant\", \"radius\"]]\n\tfor c in dataset_params.colors: # for all 7 foreground colors \n\t\tfor q in dataset_params.quadrants: # for all 4 quadratns \n\t\t\tfor s in dataset_params.shapes: # for all 5 shapes\n\t\t\t\tfor k in dataset_params.sizes: # for all 3 sizes\n\t\t\t\t\tfor b in dataset_params.backgrounds: # for all 3 background colors\n\t\t\t\t\t\tfor i in range(numberOfTrials):\n\t\t\t\t\t\t\tfileName = os.path.join(dataset_params.data_path, data_folder, str(num) + \".png\")\n\t\t\t\t\t\t\tpresentQuadrant = dataset_params.quadrants[q]\n\t\t\t\t\t\t\tradius = random.randint(allowedRadius[s][k][0],allowedRadius[s][k][1])\n\n\t\t\t\t\t\t\tif(presentQuadrant == 3):\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 2):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 1):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\txCenter = random.randint(xMin, xMax)\n\t\t\t\t\t\t\tyCenter = random.randint(yMin, yMax)\n\t\t\t\t\t\t\tcenter = [xCenter, yCenter]\n\n\t\t\t\t\t\t\tif(s == \"circle\"):\n\t\t\t\t\t\t\t\toutput_images.append([num, \"circle\", c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\t\timg = makeCircle(c, radius, center, b, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tn = shapeDict[s]\n\t\t\t\t\t\t\t\timg = makePolygon(center, n, radius, b, c, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\t\toutput_images.append([num, s, c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\tnum += 1\n\t\n\tprint(\"Number of image generated\", num)\n\n\tprint(\"Saving \" + data_type + \" data meta information to CSV ......\")\n\tdf = pd.DataFrame(output_images[1:], columns=output_images[0])\n\tdf.to_csv(label_file, index=False)\n\tprint(\"Saved \" + data_type + \" data meta information: \" + data_folder)\n\t\n\n\tprint(\"Saving \" + data_type + \" images data to npz(numpy) compressed file ......\")\n\tmake_npz_file(data_type)\n\tprint(\"Saved \" + data_type + \" images data to npz(numpy) compressed file!\")\n\t\n\treturn None",
"def make_props_files(labels, label_list, dir_path, data,\r\n background_color, label_color, prefs):\r\n cat_connected_num = 0\r\n mapping = data['map']\r\n groups_and_colors = iter_color_groups(mapping, prefs)\r\n for params in groups_and_colors:\r\n l = params[0]\r\n if l == \"SampleID\" or l == \"Description\":\r\n continue\r\n m = params[2]\r\n c = params[3]\r\n output = open(os.path.join(dir_path, \"props/custom.%s.props\" % l), 'w')\r\n props_str_list = [l] * 5\r\n props_str_list.append(','.join(map(str, label_color.toRGB())))\r\n props_str_list.extend([l] * 22)\r\n props_str_list.append(','.join(map(str, label_color.toRGB())))\r\n props_str_list.extend([l] * 16)\r\n props_str_list.append(props_edge % (l, l))\r\n props_str_list.append(l)\r\n props_str_list.append(\r\n '\\n'.join([props_edge_meta % (l, s, ','.join(map(str, c[n].toRGB())))\r\n for s, n in m.items()]))\r\n props_str_list.extend([l] * 109)\r\n props_str_list.append(props_node % (l, l))\r\n props_str_list.append(l)\r\n props_str_list.append(\r\n '\\n'.join([props_node_meta % (l, s, ','.join(map(str, c[n].toRGB())))\r\n for s, n in m.items()]))\r\n props_str_list.extend([l] * 48)\r\n props_str_list[98] = ','.join(map(str, background_color.toRGB()))\r\n props_str_list[109] = ','.join(map(str, label_color.toRGB()))\r\n props_str_list[132] = ','.join(map(str, label_color.toRGB()))\r\n output.write(props_file_str % tuple(props_str_list))\r\n output.close()",
"def create_data(data_size,heme, nucleotide, control, steroid,data_total,path_to_data):\n\n os.chdir(path_to_data)\n\n x_array = np.zeros(shape = (data_size,14,32,32,32))\n\n y_array = np.zeros(shape = data_size)\n\n print(\"data size = \", data_size)\n\n #training set :\n\n file_count = 0\n\n for file in data_total:\n\n y_array[file_count]= find_class(str(file), heme, nucleotide, control, steroid)\n\n x_array[file_count] = np.load(str(file+\".npy\"))\n\n file_count+=1\n\n\n return (x_array, y_array)",
"def make_all_charts(data, dir_path, filename, num_categories, colorby, args,\r\n color_data, prefs, background_color, label_color,\r\n chart_type, generate_image_type, plot_width, plot_height,\r\n bar_width, dpi, resize_nth_label, label_type,\r\n include_html_legend, include_html_counts):\r\n\r\n # iterate over the preferences and assign colors according to taxonomy\r\n img_data = []\r\n for label, f_name in data:\r\n raw_fpath = os.path.join(\r\n dir_path,\r\n 'raw_data',\r\n os.path.split(f_name)[-1])\r\n # move raw file to output directory\r\n shutil.copyfile(f_name, raw_fpath)\r\n\r\n f = color_data['counts'][f_name]\r\n level = max([len(t.split(';')) - 1 for t in f[1]])\r\n\r\n for key in prefs.keys():\r\n if prefs[key]['column'] != str(level):\r\n continue\r\n col_name = 'Taxon'\r\n mapping = [['Taxon']]\r\n mapping.extend([[m] for m in f[1]])\r\n if 'colors' in prefs[key]:\r\n if isinstance(prefs[key]['colors'], dict):\r\n pref_colors = prefs[key]['colors'].copy()\r\n # copy so we can mutate\r\n else:\r\n pref_colors = prefs[key]['colors'][:]\r\n else:\r\n pref_colors = {}\r\n labelname = prefs[key]['column']\r\n\r\n # Define groups and associate appropriate colors to each group\r\n groups = group_by_field(mapping, col_name)\r\n pref_colors, data_colors, data_color_order = \\\r\n get_group_colors(groups, pref_colors)\r\n\r\n updated_pref_colors = {}\r\n\r\n if chart_type == 'area' and len(f[0]) == 1:\r\n raise ValueError(\r\n 'When generating area charts, the number of samples (or category values) must be greater than 1. However, you can still produce a pie chart or bar chart with only 1 sample (or category value), but you must remove the area chart value from the input arguments.')\r\n\r\n for key in pref_colors:\r\n updated_pref_colors[key.replace('\"', '')] = pref_colors[key]\r\n\r\n for i, val in enumerate(f[1]):\r\n f[1][i] = val.replace('\"', '')\r\n\r\n # parse the counts and continue processing\r\n img_data.extend(get_counts(label.strip(), colorby, num_categories,\r\n dir_path, level, f, prefs, updated_pref_colors,\r\n background_color,\r\n label_color, chart_type, generate_image_type,\r\n plot_width, plot_height, bar_width, dpi, raw_fpath,\r\n resize_nth_label, label_type, include_html_legend,\r\n include_html_counts))\r\n\r\n # generate html filepath\r\n outpath = os.path.join(dir_path, '%s_charts.html' % chart_type)\r\n out_table = ''.join(img_data)\r\n # write out html file\r\n write_html_file(out_table, outpath)",
"def create_dataset(data_folder: str, dataset_file: str, targets_file: str = os.path.join('data', 'targets.pkl')):\n files = sorted(glob.glob(os.path.join(data_folder, '**/*.jpg'), recursive=True))\n images = []\n crop_sizes = []\n crop_centers = []\n targets = []\n for image in tqdm(files, desc='creating dataset', total=len(files)):\n img = Image.open(image)\n # quadruple dataset by vertical and horizontal flipping\n for i in range(4):\n if i == 1 or i == 3:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if i == 2:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n x, y, w, h, cx, cy = get_random_image_values()\n resized = img.resize((y, x), Image.LANCZOS) # mind thee: x and y swapped\n arr = np.array(resized, dtype=np.float32)\n arr, target_array = create_cropped_data(np.copy(arr), (w, h), (cx, cy), crop_only=False)\n images.append(arr)\n crop_sizes.append((w, h))\n crop_centers.append((cx, cy))\n targets.append(target_array)\n data = {'images': images, 'crop_sizes': crop_sizes, 'crop_centers': crop_centers}\n # persist on harddrive\n with open(dataset_file, 'wb') as f:\n pickle.dump(data, f)\n with open(targets_file, 'wb') as f:\n pickle.dump(targets, f)\n print(f'created datset and saved it to {dataset_file} and targets to {targets_file}')",
"def pyplot_colourise(self, folder_name):\n colourised_folder_name = folder_name + '_colourised'\n\n try:\n print(\"Making dir \" + str(colourised_folder_name) + \" for colourisation\")\n os.mkdir(colourised_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this colourisation??\")\n return\n\n photo_list = self.get_photo_list(folder_name)\n\n for i, name in enumerate(photo_list):\n fig, ax = plt.subplots(figsize=(32, 16))\n file_name = folder_name + '/' + name\n colourised_image_name = colourised_folder_name + '/' + name\n image = cv2.imread(file_name, cv2.IMREAD_ANYDEPTH).astype(np.float32)\n go = ax.imshow(image, cmap='jet')\n fig.colorbar(go)\n fig.savefig(colourised_image_name)\n plt.close()",
"def load_shapes(self, count, img_floder, mask_floder, imglist, creatnpzfile:bool=True):\n # Add classes\n \n self.add_class(\"shapes\", 1, \"grasper\")\n self.add_class(\"shapes\", 2, \"grasper2\")\n self.add_class(\"shapes\", 3, \"grasper3\")\n self.add_class(\"shapes\", 4, \"irrigator\")\n self.add_class(\"shapes\", 5, \"hook\")\n self.add_class(\"shapes\", 6, \"clipper\")\n\n # Add images\n # Generate random specifications of images (i.e. color and\n # list of shapes sizes and locations). This is more compact than\n # actual images. Images are generated on the fly in load_image().\n for i in range(count):\n img = imglist[i]\n if img.endswith(\".jpg\"):\n img_name = img.split(\".\")[0]\n img_path = os.path.join(img_floder,img)\n mask_path = os.path.join(mask_floder,img_name+\".png\")\n #save the mask infomation with numpy\n mask_info = None\n \n if not os.path.exists(os.path.join(mask_infofloder,\"{}.npz\".format(img_name))):\n mask_info = self.load_mask_pre(i,mask_path)\n np.savez(os.path.join(mask_infofloder,img_name),mask_ = mask_info[0], id_=mask_info[1])\n else:\n data = np.load(os.path.join(mask_infofloder,\"{}.npz\".format(img_name)))\n mask_info = data['mask_'],data['id_']\n\n self.add_image(\"shapes\", image_id=i, path=img_path, name=img_name, mask_path=mask_path, mask_info=mask_info)\n sys.stdout.write('-------creating the np file:--%s-------------pross:--%.4f%%--'%(os.path.join(mask_infofloder,\"{}.npz\".format(img_name)),\n (i+1)/float(count)*100))\n sys.stdout.write('\\r')\n sys.stdout.flush()",
"def test_gen_colors(self):\n result = magic.gen_colors(\"tests/test_files/test.jpg\")\n self.assertEqual(result[0], \"#0F191A\")",
"def prepare_data(data_path, val_data_path, patch_size,stride,scales = [1, 0.9, 0.8, 0.7],\n max_num_patches=None, aug_times=1,random_aug=False, gray_mode=False):\n # training database\n print('> Training database')\n types = ('*.bmp', '*.png')\n files = []\n for tp in types:\n files.extend(glob.glob(os.path.join(data_path, tp)))\n files.sort()\n\n if gray_mode:\n traindbf = './data/set400_p64.h5'\n valdbf = './data/set12.h5'\n else:\n traindbf = './data/train_rgb.h5'\n valdbf = './data/val_rgb.h5'\n\n if max_num_patches is None:\n max_num_patches = 5000000\n print(\"\\tMaximum number of patches not set\")\n else:\n print(\"\\tMaximum number of patches set to {}\".format(max_num_patches))\n train_num = 0\n i = 0\n with h5py.File(traindbf, 'w') as h5f:\n while i < len(files) and train_num < max_num_patches:\n imgor = cv2.imread(files[i])\n # h, w, c = img.shape\n for sca in scales:\n img = cv2.resize(imgor, (0, 0), fx=sca, fy=sca, \\\n interpolation=cv2.INTER_CUBIC)\n if not gray_mode:\n # CxHxW RGB image\n img = (cv2.cvtColor(img, cv2.COLOR_BGR2RGB)).transpose(2, 0, 1)\n else:\n # CxHxW grayscale image (C=1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = np.expand_dims(img, 0)\n img = normalize(img)\n patches = img_to_patches(img, win=patch_size, stride=stride)\n print(\"\\tfile: %s scale %.1f # samples: %d\" % \\\n (files[i], sca, patches.shape[3] * 8))\n for nx in range(patches.shape[3]):\n if random_aug == False:\n for j in range(aug_times):\n data = data_augmentation(patches[:, :, :, nx].copy(), j)\n h5f.create_dataset(str(train_num), data=data)\n train_num += 1\n else:\n for j in range(aug_times):\n data = data_augmentation(patches[:, :, :, nx].copy(), random.randint(0, 7))\n h5f.create_dataset(str(train_num), data=data)\n train_num += 1\n i += 1\n # validation database\n print('\\n> Validation database')\n files = []\n for tp in types:\n files.extend(glob.glob(os.path.join(val_data_path, tp)))\n files.sort()\n h5f = h5py.File(valdbf, 'w')\n val_num = 0\n for i, item in enumerate(files):\n print(\"\\tfile: %s\" % item)\n img = cv2.imread(item)\n if not gray_mode:\n # C. H. W, RGB image\n img = (cv2.cvtColor(img, cv2.COLOR_BGR2RGB)).transpose(2, 0, 1)\n else:\n # C, H, W grayscale image (C=1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = np.expand_dims(img, 0)\n\n C,H,W=img.shape\n\n # if H % 2 == 1:\n # \timg = img[:, :-1, :]\n # if W % 2 == 1:\n # \timg = img[:, :, :-1]\n\n img = normalize(img)\n h5f.create_dataset(str(val_num), data=img)\n val_num += 1\n h5f.close()\n\n print('\\n> Total')\n print('\\ttraining set, # samples %d' % train_num)\n print('\\tvalidation set, # samples %d\\n' % val_num)",
"def create_data_folders() -> None:\n if not os.path.exists(\"data/save\"):\n os.mkdir(\"./data\")\n os.mkdir(\"./data/save\")\n if not os.path.exists(\"data/critics\"):\n os.mkdir(\"./data/critics\")\n if not os.path.exists('data/policies/'):\n os.mkdir('data/policies/')\n if not os.path.exists('data/results/'):\n os.mkdir('data/results/')",
"def init_color_space(color_path):\n # type: (str) -> None\n color_space = np.zeros((256, 256, 256), dtype=np.uint8)\n if color_path.endswith('.yaml'):\n with open(color_path, 'r') as stream:\n try:\n color_values = yaml.safe_load(stream)\n except yaml.YAMLError as exc:\n # TODO: what now??? Handle the error?\n pass\n # pickle-file is stored as '.txt'\n elif color_path.endswith('.txt'):\n try:\n with open(color_path, 'rb') as f:\n color_values = pickle.load(f)\n except pickle.PickleError as exc:\n pass\n \n # compatibility with colorpicker\n if 'color_values' in color_values.keys():\n color_values = color_values['color_values']['greenField']\n length = len(color_values['red'])\n if length == len(color_values['green']) and \\\n length == len(color_values['blue']):\n # setting colors from yaml file to True in color space\n for x in range(length):\n color_space[color_values['blue'][x],\n color_values['green'][x],\n color_values['red'][x]] = 1\n print(\"Imported color space\")\n return color_space",
"def setUp(self):\r\n\r\n self.data = {}\r\n self.data['xaxis'] = [10.0]\r\n self.sample_dict = {'Sample1': {10.00: [1.3276140000000001]}}\r\n self.data['yvals'] = {'Sample1': [1.3276140000000001]}\r\n self.data['err'] = {'Sample1': [.1]}\r\n self.xmax = 140\r\n self.ymax = 20\r\n self.std_type = 'stddev'\r\n self.ops = ['Sample1']\r\n self.mapping_category = 'SampleID'\r\n self.imagetype = 'png'\r\n self.resolution = 70\r\n self.mapping_lookup = {'SampleID-Sample1': 'col_0_row_0'}\r\n self.data['map'] = [['SampleID', 'Day'], ['Sample1', 'Day1']]\r\n self.color_prefs = {'SampleID': {'column': 'SampleID', 'color':\r\n {'Sample1': '#ff0000'}}}\r\n self.groups = {'Sample1': ['Sample1']}\r\n self.background_color = 'black'\r\n self.label_color = 'white'\r\n self.labelname = 'SampleID'\r\n self.rare_data = {'color': {'Sample1': '#ff0000'},\r\n 'series': {'Sample1': [2.0515300000000001], },\r\n 'headers': ['test.txt', 'SampleID'], 'xaxis': [10.0],\r\n 'error': {'Sample1': [0.0]}, 'options': ['Sample1']}\r\n self.fpath = '/tmp/'\r\n self.output_dir = '/tmp/'\r\n self.metric_name = 'test'\r\n self._paths_to_clean_up = []\r\n self._folders_to_cleanup = []\r\n self.rarefaction_file_data = [[10.0, 0.0, 1.0], [10.0, 1.0, 3.0]]\r\n d = {'redtowhite3_0': '#7fff00', 'redtowhite3_1': '#7fff00'}\r\n self.data_colors = color_dict_to_objects(d)\r\n self.colors = {'Sample1': 'redtowhite3_0', 'Sample2': 'redtowhite3_1'}\r\n self.colors2 = {'Sample1': 'redtowhite3_0'}\r\n self.mappingfile = ['#SampleID\\tSex\\tAge',\r\n '123\\tF\\t32',\r\n '234\\tM\\t30',\r\n '345\\tM\\t32']\r\n # self.p_mappingfile = parse_mapping_file(self.mappingfile,\\\r\n # strip_quotes=True)\r\n self.rarefactionfile = [\r\n '\\tsequences per sample\\titeration\\t123\\t234\\t345',\r\n 'rare10.txt\\t10\\t0\\t1.99181\\t0.42877\\t2.13996',\r\n 'rare10.txt\\t10\\t1\\t2.07163\\t0.42877\\t2.37055',\r\n 'rare310.txt\\t310\\t0\\t8.83115\\t0.42877\\t11.00725',\r\n 'rare310.txt\\t310\\t1\\t10.05242\\t0.42877\\t8.24474',\r\n 'rare610.txt\\t610\\t0\\t12.03067\\t0.42877\\t11.58928',\r\n 'rare610.txt\\t610\\t1\\t12.9862\\t0.42877\\t11.58642']\r\n\r\n self.rares = {'test.txt': (['', 'sequences per sample', 'iteration',\r\n 'Sample1'], [], ['rare1.txt', 'rare2.txt'],\r\n [[10.0, 2.0, 7.0, 7.0, 9.0], [10.0, 2.0, 7.0, 7.0, 9.0]])}\r\n self.col_headers, self.comments, self.rarefaction_fns, \\\r\n self.rarefaction_data = parse_rarefaction(self.rarefactionfile)\r\n self.matrix, self.seqs_per_samp, self.sampleIDs = \\\r\n get_rarefaction_data(self.rarefaction_data, self.col_headers)\r\n self.ave_seqs_per_sample1 = {'Sample1': [2.03172, 9.4417849999999994,\r\n 12.508435]}\r\n self.ave_seqs_per_sample = {'123': [2.03172, 9.4417849999999994,\r\n 12.508435], '234': [0.42876999999999998, 0.42876999999999998,\r\n 0.42876999999999998], '345': [2.255255, 9.625995, 11.58785]}\r\n self.collapsed_ser_sex = {'M': [1.3420125000000001, 5.0273824999999999,\r\n 6.0083099999999998], 'F': [2.03172, 9.4417849999999994, 12.508435]}\r\n self.err_ser_sex = {'M': [0.91324250000000007, 4.5986124999999998,\r\n 5.5795399999999997], 'F': [0.0, 0.0, 0.0]}\r\n self.rarefaction_legend_mat_init = {'test': {'SampleID': {}}}\r\n self.col_headers2 = [\r\n '', 'sequences per sample', 'iteration', 'Sample1',\r\n 'Sample2']\r\n\r\n self.rarefaction_data_mat = {\r\n 'SampleID': {'Sample1': {'test': {'ave': [' 7.000'], 'err': [' nan']}}}}\r\n\r\n self.rarefaction_legend_mat = {\r\n 'test': {\r\n 'samples': {\r\n 'Sample1': {\r\n 'color': '#ff0000',\r\n 'link': 'html_plots/testcol_0_row_0.png'}},\r\n 'groups': {\r\n 'SampleID': {\r\n 'Sample1': {\r\n 'groupcolor': '#ff0000',\r\n 'groupsamples': [\r\n 'Sample1']}}}}}\r\n self.exp_err_series_ave = {'M':\r\n [1.571915, 6.49885, 8.1750183333333339]}",
"def colourise_image(self, folder_name):\n colourised_folder_name = folder_name + '_colourised'\n\n try:\n print(\"Making dir \" + str(colourised_folder_name) + \" for colourisation\")\n os.mkdir(colourised_folder_name)\n except OSError:\n print(\"Folder exists, have you already done this colourisation??\")\n return\n\n print(\"Writing to folder +\" + str(colourised_folder_name))\n photo_list = self.get_photo_list(folder_name)\n for i, name in enumerate(photo_list):\n file_name = folder_name + '/' + name\n colourised_image_name = colourised_folder_name + '/' + name\n image = cv2.imread(file_name, cv2.IMREAD_GRAYSCALE)\n image_8bit = image.astype(np.uint8)\n colour_image = cv2.applyColorMap(image_8bit, cv2.COLORMAP_JET)\n cv2.imwrite(colourised_image_name, colour_image)",
"def _create_layout(root_dir, subsets):\n _create_folder(os.path.join(root_dir, \"images\"))\n _create_folder(os.path.join(root_dir, \"labels\"))\n\n for subset in subsets:\n _create_folder(os.path.join(root_dir, \"images\", subset))\n _create_folder(os.path.join(root_dir, \"labels\", subset))",
"def load_data_in_folder(self):\n if self.data_filenames:\n print('removing existing data files')\n for f in tqdm(self.data_filenames):\n os.remove(f)\n print('loading files in data folder')\n n = len(self.filenames)\n idx_max = n // self.batch_size\n for idx in tqdm(range(0, idx_max-1)):\n data = []\n for f in self.filenames[idx:idx+self.batch_size]:\n img = cv2.imread(f, int(self.color))\n if not self.color:\n img = np.expand_dims(img, axis=-1)\n data.append(img)\n data = np.array(data)\n data = data.astype('float32')\n data = (data - 127.5)/127.5\n np.save(op.join(self.data_path, str(idx)), data)\n # TODO last batch ?\n self.data_filenames = sorted(glob(op.join(self.data_path, '*.npy')))",
"def _preprocess_data(self, name, directory):\n if name.endswith('data'):\n for path in glob(str(directory / '**/*.jpg'), recursive=True):\n try:\n with Image.open(path) as img:\n if not name.startswith('feature'):\n img = img.rotate(-90, 0, 1)\n img = img.resize(self.input_shape)\n except (ValueError, OSError):\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n filename = path.name.split('img-')[1]\n target = (path.parent / filename).with_suffix('.image.png')\n img.save(target, 'PNG')\n os.remove(str(path))\n elif name.endswith('targets'):\n for path in glob(str(directory / '**/*.mat'), recursive=True):\n try:\n mat = spio.loadmat(path)['depthMap']\n img = spmisc.toimage(mat).resize(self.target_shape)\n except ValueError:\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n name = path.name[path.name.index('-') + 1:]\n target = (path.parent / name).with_suffix('.depth.png')\n img.save(target, 'PNG')\n os.remove(str(path))",
"def _generate_dataset(self):\n # create train images\n train_path = os.path.join(self.root_dir, \"shapes\", \"train\", \"good\")\n os.makedirs(train_path, exist_ok=True)\n for i in range(self.num_train):\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n generate_mask=False,\n )\n image = result[\"image\"]\n imsave(os.path.join(train_path, f\"{i:03}.png\"), image, check_contrast=False)\n\n # create test images\n for test_category in self.test_shapes:\n test_path = os.path.join(self.root_dir, \"shapes\", \"test\", test_category)\n mask_path = os.path.join(self.root_dir, \"shapes\", \"ground_truth\", test_category)\n os.makedirs(test_path, exist_ok=True)\n os.makedirs(mask_path, exist_ok=True)\n # anomaly and masks. The idea is to superimpose anomalous shapes on top of correct ones\n for i in range(self.num_test):\n correct_shapes = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n generate_mask=False,\n )\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=[test_category],\n generate_mask=True,\n )\n correct_shapes = correct_shapes[\"image\"]\n image, mask = result[\"image\"], result[\"mask\"]\n image = np.minimum(image, correct_shapes) # since 255 is white\n imsave(os.path.join(test_path, f\"{i:03}.png\"), image, check_contrast=False)\n imsave(os.path.join(mask_path, f\"{i:03}_mask.png\"), mask, check_contrast=False)\n # good test\n test_good = os.path.join(self.root_dir, \"shapes\", \"test\", \"good\")\n os.makedirs(test_good, exist_ok=True)\n for i in range(self.num_test):\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n )\n image = result[\"image\"]\n imsave(os.path.join(test_good, f\"{i:03}.png\"), image, check_contrast=False)",
"def create_data_lists(voc07_path, voc08_path, voc09_path, voc10_path, voc12_path, output_folder):\n voc07_path = os.path.abspath(voc07_path)\n voc08_path = os.path.abspath(voc08_path)\n voc09_path = os.path.abspath(voc09_path)\n voc10_path = os.path.abspath(voc10_path)\n voc12_path = os.path.abspath(voc12_path)\n \n voc_labels = ('aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable',\n 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor')\n label_map = {k: v + 1 for v, k in enumerate(voc_labels)}\n label_map['background'] = 0\n rev_label_map = {v: k for k, v in label_map.items()} # Inverse mapping\n\n # Color map for bounding boxes of detected objects from https://sashat.me/2017/01/11/list-of-20-simple-distinct-colors/\n distinct_colors = ['#e6194b', '#3cb44b', '#ffe119', '#0082c8', '#f58231', '#911eb4', '#46f0f0', '#f032e6',\n '#d2f53c', '#fabebe', '#008080', '#000080', '#aa6e28', '#fffac8', '#800000', '#aaffc3', '#808000',\n '#ffd8b1', '#e6beff', '#808080', '#FFFFFF']\n label_color_map = {k: distinct_colors[i] for i, k in enumerate(label_map.keys())}\n \n train_images = list()\n train_objects = list()\n n_objects = 0\n \n path_list = [voc07_path, \n# voc08_path, \n# voc09_path, \n# voc10_path, \n voc12_path\n ]\n # Training data\n for path in path_list:\n\n # Find IDs of images in training data\n with open(os.path.join(path, 'ImageSets/Main/trainval.txt')) as f:\n ids = f.read().splitlines()\n \n for id in ids:\n # Parse annotation's XML file\n objects, size = parse_annotation(os.path.join(path, 'Annotations', id + '.xml'))\n # \n if len(objects) == 0:\n continue\n if size[1] ==500:\n n_objects += len(objects)\n train_objects.append(objects)\n train_images.append(os.path.join(path, 'JPEGImages', id + '.jpg'))\n\n assert len(train_objects) == len(train_images)\n\n # Save to file\n with open(os.path.join(output_folder, 'TRAIN_images.json'), 'w') as j:\n json.dump(train_images, j)\n with open(os.path.join(output_folder, 'TRAIN_objects.json'), 'w') as j:\n json.dump(train_objects, j)\n with open(os.path.join(output_folder, 'label_map.json'), 'w') as j:\n json.dump(label_map, j) # save label map too\n\n print('\\nThere are %d training images containing a total of %d objects. Files have been saved to %s.' % (\n len(train_images), n_objects, os.path.abspath(output_folder)))\n\n # Validation data\n test_images = list()\n test_objects = list()\n n_objects = 0\n\n # Find IDs of images in validation data\n with open(os.path.join(voc07_path, 'ImageSets/Main/val.txt')) as f: # test\n ids = f.read().splitlines()\n\n for i, id in enumerate(ids):\n# #TEST CODE\n# if i>=4:\n# break\n # Parse annotation's XML file\n objects, size = parse_annotation(os.path.join(voc07_path, 'Annotations', id + '.xml'))\n if len(objects) == 0:\n continue\n if size[1] ==500:\n test_objects.append(objects)\n n_objects += len(objects)\n test_images.append(os.path.join(voc07_path, 'JPEGImages', id + '.jpg'))\n\n assert len(test_objects) == len(test_images)\n\n # Save to file\n with open(os.path.join(output_folder, 'TEST_images.json'), 'w') as j:\n json.dump(test_images, j)\n with open(os.path.join(output_folder, 'TEST_objects.json'), 'w') as j:\n json.dump(test_objects, j)\n\n print('\\nThere are %d validation images containing a total of %d objects. Files have been saved to %s.' % (\n len(test_images), n_objects, os.path.abspath(output_folder)))",
"def generate_dat_files(rspecs, datroot, bands, labels):\n d = ds9.ds9()\n d.set('rgb')\n d.set('rgb red')\n\n # Save plaintext projection data\n # Idea: minimize file (band) loading operations\n for fname, flab in zip(bands, labels):\n d.set('file ' + fname) # Load a band\n for i in xrange(len(rspecs)):\n d.set('regions', rspecs[i]) # Load a region\n d.set('rgb red') # Plot projection data\n dat_fname = '{0}_{1:02d}_band_{2}.dat'.format(datroot, i+1, flab)\n d.set('plot {0} save {1}'.format(d.get('plot'), dat_fname))\n d.set('regions delete all')\n d.set('exit')",
"def create_folder():\n directory = \"data/\"\n if not os.path.exists(directory):\n os.makedirs(directory)\n logging.info(\"Data folder created.\")\n else:\n logging.info(\"Data folder already existed.\")",
"def generate_2d_plots(prefs, data, html_dir_path, data_dir_path, filename,\r\n background_color, label_color, generate_scree):\r\n coord_tups = [(\"1\", \"2\"), (\"3\", \"2\"), (\"1\", \"3\")]\r\n mapping = data['map']\r\n out_table = ''\r\n # Iterate through prefs and generate html files for each colorby option\r\n # Sort by the column name first\r\n sample_location = {}\r\n\r\n groups_and_colors = iter_color_groups(mapping, prefs)\r\n groups_and_colors = list(groups_and_colors)\r\n\r\n for i in range(len(groups_and_colors)):\r\n labelname = groups_and_colors[i][0]\r\n groups = groups_and_colors[i][1]\r\n colors = groups_and_colors[i][2]\r\n data_colors = groups_and_colors[i][3]\r\n data_color_order = groups_and_colors[i][4]\r\n\r\n data_file_dir_path = mkdtemp(dir=data_dir_path)\r\n\r\n new_link = os.path.split(data_file_dir_path)\r\n data_file_link = os.path.join('.', os.path.split(new_link[-2])[-1],\r\n new_link[-1])\r\n\r\n new_col_name = labelname\r\n img_data = {}\r\n plot_label = labelname\r\n\r\n if 'support_pcoas' in data:\r\n matrix_average, matrix_low, matrix_high, eigval_average, m_names = \\\r\n summarize_pcoas(data['coord'], data['support_pcoas'],\r\n method=data['ellipsoid_method'])\r\n data['coord'] = \\\r\n (m_names, matrix_average, data['coord'][2], data['coord'][3])\r\n for i in range(len(m_names)):\r\n sample_location[m_names[i]] = i\r\n else:\r\n matrix_average = None\r\n matrix_low = None\r\n matrix_high = None\r\n eigval_average = None\r\n m_names = None\r\n iterator = 0\r\n\r\n for coord_tup in coord_tups:\r\n if isarray(matrix_low) and isarray(matrix_high) and \\\r\n isarray(matrix_average):\r\n coord_1r = asarray(matrix_low)\r\n coord_2r = asarray(matrix_high)\r\n mat_ave = asarray(matrix_average)\r\n else:\r\n coord_1r = None\r\n coord_2r = None\r\n mat_ave = None\r\n sample_location = None\r\n\r\n coord_1, coord_2 = coord_tup\r\n img_data[coord_tup] = draw_pcoa_graph(\r\n plot_label, data_file_dir_path,\r\n data_file_link, coord_1, coord_2,\r\n coord_1r, coord_2r, mat_ave,\r\n sample_location,\r\n data, prefs, groups, colors,\r\n background_color, label_color,\r\n data_colors, data_color_order,\r\n generate_eps=True)\r\n\r\n out_table += TABLE_HTML % (labelname,\r\n \"<br>\".join(img_data[(\"1\", \"2\")]),\r\n \"<br>\".join(img_data[(\"3\", \"2\")]),\r\n \"<br>\".join(img_data[(\"1\", \"3\")]))\r\n\r\n if generate_scree:\r\n data_file_dir_path = mkdtemp(dir=data_dir_path)\r\n new_link = os.path.split(data_file_dir_path)\r\n data_file_link = os.path.join(\r\n '.',\r\n os.path.split(new_link[-2])[-1],\r\n new_link[-1])\r\n\r\n img_src, download_link = draw_scree_graph(\r\n data_file_dir_path, data_file_link, background_color,\r\n label_color, generate_eps=True, data=data)\r\n\r\n out_table += SCREE_TABLE_HTML % (\"<br>\".join((img_src, download_link)))\r\n\r\n outfile = create_html_filename(filename, '.html')\r\n outfile = os.path.join(html_dir_path, outfile)\r\n\r\n write_html_file(out_table, outfile)",
"def process_data(output_folder):\n # select imgs\n img_folder = join(output_folder, 'img')\n select_img(output_folder, img_folder, 'HE-green')\n\n mask_folder = join(output_folder, 'mask')\n select_img(output_folder, mask_folder, '_EF5')",
"def create_dataset_folder_structure():\n\n path = Path(f'{DATASETS}/{FEATURES_DATASET}')\n if not os.path.exists(path):\n print(f'\\nWARNING: The path does not exist. Creating new directory...\\n{path}\\n')\n os.mkdir(path)\n\n try:\n for path in new_sensor_paths:\n if not os.path.exists(path):\n print(f'\\nWARNING: The path does not exist. Creating new directory...\\n{path}\\n')\n os.mkdir(path)\n else:\n print(\"\\nPath already exists!\")\n except:\n return False\n else:\n return True",
"def extract_shapefuns(self, parent_folder):\n shapelist = []\n cwd = os.getcwd()\n print('Extracting shapes')\n has_shapefun_file = False\n found_shapes = False #to see if KKRnano produced some shapes as output (better than using shapefun)\n parent_folder_listdir = parent_folder.listdir() #quicker this way as based on SSH tunneling\n for filename in parent_folder_listdir:\n if filename.find('shape.') >= 0:\n if has_shapefun_file:\n shapelist = []\n abs_path = f'{cwd}/{filename}'\n parent_folder.getfile(filename, f'{cwd}/{filename}')\n self.put_object_from_file(abs_path, filename)\n self.set_attribute(filename.replace('.', ''), filename)\n os.remove(filename)\n with self.open(filename, 'r') as _f:\n shapelist.append(SinglefileData(_f.name))\n print('Found shape in repsitory:')\n print(_f.name)\n has_shapefun_file = False\n found_shapes = True\n if 'shapefun' in parent_folder_listdir and not found_shapes:\n filename = 'shapefun'\n print('Shapefun in dir, this part of the program might need more testing')\n abs_path = f'{cwd}/{filename}'\n parent_folder.getfile(filename, f'{cwd}/{filename}')\n\n with open(filename, 'r') as reader:\n shapes = reader.readlines()\n\n\n# print(os.path.realpath(reader.name))\n lines = []\n for line in range(len(shapes)):\n if shapes[line].find('Shape') > 0:\n lines.append(line)\n lines.append(len(shapes))\n for j in range(len(lines) - 1):\n shape_string = ''\n for k in range(lines[j], lines[j + 1]):\n shape_string += shapes[k]\n\n shape_no_filename = 'shape.' + str(j + 1).zfill(7)\n with open(shape_no_filename, 'w') as file:\n file.write(shape_string)\n path = os.path.realpath(file.name)\n print(path)\n abs_path = f'{cwd}/{shape_no_filename}'\n self.put_object_from_file(abs_path, shape_no_filename) #Problem has to be called via instance\n self.set_attribute(shape_no_filename.replace('.', ''), shape_no_filename)\n with self.open(shape_no_filename, 'r') as _f:\n shapelist.append(SinglefileData(_f.name))\n os.remove(shape_no_filename)\n has_shapefun_file = True\n if has_shapefun_file:\n print(\n 'WARNING: Only a shapefun from some Voronoi input was found, it is possible that the potential does not match the shapefun parameters, unless they are set this way explicitly in the respective input file! It is advisable to use the `write_shapes=1` command in input.conf'\n )\n print('Found shapelist:')\n print(shapelist)\n return shapelist",
"def generate_colour_data(width, height, imagiry_data, pixel2coord):\n for i in range(1, height):\n for j in range(1, width):\n colour_data.append(\n [\n pixel2coord(j, i)[0],\n pixel2coord(j, i)[1],\n imagiry_data.read([1])[0][i - 1][j - 1],\n \n ]\n )",
"def makeLocationPtShapefile(config,locationData):\n\n\n # set up the shapefile driver\n driver = ogr.GetDriverByName(\"ESRI Shapefile\")\n\n num_years = config.EndYear\n \n for iteration in range(config.MinimumIteration, config.MaximumIteration + 1):\n for year in range(1, num_years + 1):\n\n shapeFilename = config.getOutputFilePath(cc.COLLAR_VALUES_SHAPEFILE_FILENAME.format(iteration,year))\n\n # delete the shapefile if it already exists\n if os.path.exists(shapeFilename):\n driver.DeleteDataSource(shapeFilename)\n if os.path.exists(shapeFilename):\n sys.exit(\"Unable to delete existing Shapefile '{0}'\".format(shapeFilename))\n\n # create the data source\n data_source = driver.CreateDataSource(shapeFilename)\n\n # create the spatial reference, WGS84\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n\n # create the layer\n layer = data_source.CreateLayer(\"location\", srs, ogr.wkbPoint)\n\n # Add the fields we're interested in\n # ITERATION_ID,YEAR_ID,JULIAN_DAY,STRATUM_ID,HARVEST_ZONE,LAT, LON,OUT_OF_BOUNDS,DISTANCE\n # DEVNOTE: Shapefiles seem bound to 10 character limit\n layer.CreateField(ogr.FieldDefn(\"ITER_ID\", ogr.OFTInteger))\n layer.CreateField(ogr.FieldDefn(\"YEAR_ID\", ogr.OFTInteger))\n layer.CreateField(ogr.FieldDefn(\"JULIAN_DAY\", ogr.OFTInteger))\n layer.CreateField(ogr.FieldDefn(\"STRATUM_ID\", ogr.OFTInteger))\n layer.CreateField(ogr.FieldDefn(\"LAT\", ogr.OFTReal))\n layer.CreateField(ogr.FieldDefn(\"LON\", ogr.OFTReal))\n layer.CreateField(ogr.FieldDefn(\"DIST_KM\", ogr.OFTReal))\n layer.CreateField(ogr.FieldDefn(\"REL_ZOI\", ogr.OFTString))\n layer.CreateField(ogr.FieldDefn(\"RAA\", ogr.OFTString))\n\n # Process the text file and add the attributes and features to the shapefile\n for row in locationData:\n \n # Filter by iteration and timestep\n if row['ITERATION_ID'] == iteration:\n if row['YEAR_ID'] == year:\n # create the feature\n feature = ogr.Feature(layer.GetLayerDefn())\n # Set the attributes using the values from the delimited text file\n feature.SetField(\"ITER_ID\", row['ITERATION_ID'])\n feature.SetField(\"YEAR_ID\", row['YEAR_ID'])\n feature.SetField(\"JULIAN_DAY\", row['JULIAN_DAY'])\n feature.SetField(\"STRATUM_ID\", row['STRATUM_ID'])\n feature.SetField(\"LAT\", row['LAT'])\n feature.SetField(\"LON\", row['LON'])\n feature.SetField(\"DIST_KM\", row['DISTANCE'])\n feature.SetField(\"REL_ZOI\", row['RELATION_TO_ZOI'])\n feature.SetField(\"RAA\", row['RANGE_ASSESSMENT_AREA'])\n\n # create the WKT for the feature using Python string formatting\n wkt = \"POINT(%f %f)\" % (float(row['LON']) , float(row['LAT']))\n\n # Create the point from the Well Known Txt\n point = ogr.CreateGeometryFromWkt(wkt)\n\n # Set the feature geometry using the point\n feature.SetGeometry(point)\n # Create the feature in the layer (shapefile)\n layer.CreateFeature(feature)\n # Destroy the feature to free resources\n feature.Destroy()\n\n # Destroy the data source to free resources\n data_source.Destroy()\n\n print (\"\\n\\tConverted Collar Points Values into Shapefile for Iteration/Year {0}/{1}. Output file:'{2}'\".format(iteration, year, shapeFilename))",
"def color_raster_from_shapes(target_bounds, target_dx, shapes, shape_colors,\n shapes_crs, nodata=-1):\n assert(len(shapes) == len(shape_colors))\n assert(len(shapes) > 0)\n \n dtype = np.dtype(type(shape_colors[0]))\n \n target_x0 = np.round(target_bounds[0] - target_dx/2)\n target_y1 = np.round(target_bounds[3] + target_dx/2)\n width = int(np.ceil((target_bounds[2] + target_dx/2 - target_x0)/target_dx))\n height = int(np.ceil((target_y1 - target_bounds[1] - target_dx/2)/target_dx))\n\n out_bounds = [target_x0, target_y1 - target_dx*height, target_x0 + target_dx*width, target_y1]\n\n logging.info('Coloring shapes onto raster:')\n logging.info(' target_bounds = {}'.format(target_bounds))\n logging.info(' out_bounds = {}'.format(out_bounds))\n logging.info(' pixel_size = {}'.format(target_dx))\n logging.info(' width = {}, height = {}'.format(width, height))\n logging.info(' and {} independent colors of dtype {}'.format(len(set(shape_colors)), dtype))\n\n transform = rasterio.transform.from_origin(target_x0, target_y1, target_dx, target_dx)\n \n out_profile = {'height':height,\n 'width':width,\n 'count':1,\n 'dtype':dtype,\n 'crs':workflow.crs.to_rasterio(shapes_crs),\n 'transform':transform,\n 'nodata':nodata}\n \n out = nodata * np.ones((height, width), dtype)\n for p, p_id in zip(shapes, shape_colors):\n mask = rasterio.features.geometry_mask([p,], out.shape, transform, invert=True)\n out[mask] = p_id\n return out, out_profile, out_bounds",
"def create_patches(data, patch_shape):\n\n imgs = []\n\n if data[0].shape[0] == test_size:\n step_length = (test_size - patch_shape[0]) // 2 # 176\n else:\n step_length = (training_size - patch_shape[0])\n\n for i in range(data.shape[0]):\n if len(patch_shape) == 3: # RGB images\n patches = patchify(data[i], patch_shape, step=step_length)\n patches = patches.reshape((-1, patch_shape[0], patch_shape[1], patch_shape[2]))\n imgs.extend(patches)\n else:\n patches = patchify(data[i], patch_shape, step=step_length)\n patches = patches.reshape((-1, patch_shape[0], patch_shape[1]))\n imgs.extend(patches)\n\n return np.asarray(imgs)"
] | [
"0.6856443",
"0.618052",
"0.60780936",
"0.60242367",
"0.5808405",
"0.57668114",
"0.5690427",
"0.56130856",
"0.5600461",
"0.55399936",
"0.54958624",
"0.5479186",
"0.5459529",
"0.5456665",
"0.5448621",
"0.54074323",
"0.5362034",
"0.53619134",
"0.5324682",
"0.53203875",
"0.53083163",
"0.5288739",
"0.52818304",
"0.52659994",
"0.52484584",
"0.5247805",
"0.52442896",
"0.5239872",
"0.5237977",
"0.5231064"
] | 0.70245886 | 0 |
It should create grayscale shapes in the given data directory. | def test_create_shapes_grayscale(data_dir):
dataset.create_shapes(10, 10, 1, channels=1, data_dir=data_dir)
img_path = os.path.join(data_dir, "ellipse/0.png")
assert os.path.exists(img_path)
img = imageio.imread(img_path)
assert img.shape == (10, 10) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_shapes(data_dir):\n dataset.create_shapes(10, 10, 1, data_dir=data_dir)\n img_path = os.path.join(data_dir, \"ellipse/0.png\")\n assert os.path.exists(img_path)\n img = imageio.imread(img_path)\n assert img.shape == (10, 10, 4)",
"def makeDataset(numberOfTrials, data_type):\n\n\tdata_folder = data_type + \"_images\"\n\tlabel_file = os.path.join(dataset_params.data_path, data_type + \"_lables.csv\")\n\n\tutils.create_directory(dataset_params.data_path)\n\tutils.create_directory(os.path.join(dataset_params.data_path, data_folder))\n\n\tallowedRadius = utils.defineShapePerimeter()\n\tcolorsRGB = utils.defineColorValues()\n\tshapeDict = utils.defineShapeSides()\n\tpadding = dataset_params.padding\n\n\tnum = 0\n\toutput_images = [[\"figNum\", \"shape\", \"color\", \"size\", \"background\", \"quadrant\", \"radius\"]]\n\tfor c in dataset_params.colors: # for all 7 foreground colors \n\t\tfor q in dataset_params.quadrants: # for all 4 quadratns \n\t\t\tfor s in dataset_params.shapes: # for all 5 shapes\n\t\t\t\tfor k in dataset_params.sizes: # for all 3 sizes\n\t\t\t\t\tfor b in dataset_params.backgrounds: # for all 3 background colors\n\t\t\t\t\t\tfor i in range(numberOfTrials):\n\t\t\t\t\t\t\tfileName = os.path.join(dataset_params.data_path, data_folder, str(num) + \".png\")\n\t\t\t\t\t\t\tpresentQuadrant = dataset_params.quadrants[q]\n\t\t\t\t\t\t\tradius = random.randint(allowedRadius[s][k][0],allowedRadius[s][k][1])\n\n\t\t\t\t\t\t\tif(presentQuadrant == 3):\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 2):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 1):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\txCenter = random.randint(xMin, xMax)\n\t\t\t\t\t\t\tyCenter = random.randint(yMin, yMax)\n\t\t\t\t\t\t\tcenter = [xCenter, yCenter]\n\n\t\t\t\t\t\t\tif(s == \"circle\"):\n\t\t\t\t\t\t\t\toutput_images.append([num, \"circle\", c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\t\timg = makeCircle(c, radius, center, b, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tn = shapeDict[s]\n\t\t\t\t\t\t\t\timg = makePolygon(center, n, radius, b, c, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\t\toutput_images.append([num, s, c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\tnum += 1\n\t\n\tprint(\"Number of image generated\", num)\n\n\tprint(\"Saving \" + data_type + \" data meta information to CSV ......\")\n\tdf = pd.DataFrame(output_images[1:], columns=output_images[0])\n\tdf.to_csv(label_file, index=False)\n\tprint(\"Saved \" + data_type + \" data meta information: \" + data_folder)\n\t\n\n\tprint(\"Saving \" + data_type + \" images data to npz(numpy) compressed file ......\")\n\tmake_npz_file(data_type)\n\tprint(\"Saved \" + data_type + \" images data to npz(numpy) compressed file!\")\n\t\n\treturn None",
"def _preprocess_data(self, name, directory):\n if name.endswith('data'):\n for path in glob(str(directory / '**/*.jpg'), recursive=True):\n try:\n with Image.open(path) as img:\n if not name.startswith('feature'):\n img = img.rotate(-90, 0, 1)\n img = img.resize(self.input_shape)\n except (ValueError, OSError):\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n filename = path.name.split('img-')[1]\n target = (path.parent / filename).with_suffix('.image.png')\n img.save(target, 'PNG')\n os.remove(str(path))\n elif name.endswith('targets'):\n for path in glob(str(directory / '**/*.mat'), recursive=True):\n try:\n mat = spio.loadmat(path)['depthMap']\n img = spmisc.toimage(mat).resize(self.target_shape)\n except ValueError:\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n name = path.name[path.name.index('-') + 1:]\n target = (path.parent / name).with_suffix('.depth.png')\n img.save(target, 'PNG')\n os.remove(str(path))",
"def load_shapes(self,count,img_folder,mask_folder,imglist,dataset_root_path):\n self.add_class(\"shapes\",1,\"red_s\")\n self.add_class(\"shapes\",2,\"red_m\")\n self.add_class(\"shapes\",3,\"red_l\")\n self.add_class(\"shapes\",4,\"yellow_s\")\n self.add_class(\"shapes\",5,\"yellow_m\")\n self.add_class(\"shapes\",6,\"yellow_l\")\n self.add_class(\"shapes\",7,\"green_s\")\n self.add_class(\"shapes\",8,\"green_m\")\n self.add_class(\"shapes\",9,\"green_l\")\n self.add_class(\"shapes\",10,\"blue_s\")\n self.add_class(\"shapes\",11,\"blue_m\")\n self.add_class(\"shapes\",12,\"blue_l\")\n self.add_class(\"shapes\",13,\"orange_s\")\n self.add_class(\"shapes\",14,\"orange_m\")\n self.add_class(\"shapes\",15,\"orange_l\")\n\n for i in range(count):\n filestr = imglist[i].split(\".\")[0]\n package_id = (int(filestr)-1)//30 + 1\n package_path = \"package%s\" % package_id\n # print(filestr)\n if mask_folder == 'mask/training_data/':\n mask_path = mask_folder+package_path +\"/image%s\" % filestr\n # print('====>',mask_path)\n csv_path_str = \"training_data/\"+package_path\n path_to_img = img_folder+'/'+package_path+ \"/%s.png\" % filestr\n else:\n mask_path = mask_folder + \"/image%s\" % filestr\n csv_path_str = img_folder\n path_to_img = img_folder+ \"/%s.png\" % filestr\n label_index = filestr\n # path_to_img = img_folder+ \"/%s.png\" % filestr\n # print(path_to_img)\n cv_img = cv2.imread(path_to_img)\n # print(cv_img)\n # resize_img = cv2.resize(cv_img,(384,384),interpolation = cv2.INTER_AREA)\n self.add_image(\"shapes\",image_id=i, path=path_to_img, csv_path=csv_path_str, width=cv_img.shape[1], height=cv_img.shape[0], mask_path=mask_path, label_index=label_index)",
"def preprocess_directory(data_path, label_path, damage_fn):\r\n\r\n file_names = os.listdir(data_path)\r\n os.mkdir(label_path)\r\n\r\n for file_name in file_names:\r\n file_path = data_path + \"/\" + file_name\r\n cur_label_path = label_path + \"/\" + file_name\r\n current_image = Image.open(file_path)\r\n label = damage_fn(current_image)\r\n label.save(cur_label_path, \"JPEG\")",
"def create_data(data_size,heme, nucleotide, control, steroid,data_total,path_to_data):\n\n os.chdir(path_to_data)\n\n x_array = np.zeros(shape = (data_size,14,32,32,32))\n\n y_array = np.zeros(shape = data_size)\n\n print(\"data size = \", data_size)\n\n #training set :\n\n file_count = 0\n\n for file in data_total:\n\n y_array[file_count]= find_class(str(file), heme, nucleotide, control, steroid)\n\n x_array[file_count] = np.load(str(file+\".npy\"))\n\n file_count+=1\n\n\n return (x_array, y_array)",
"def training_data_generation(DATA_DIR, img_height_size, img_width_size, label_list):\r\n \r\n img_ms_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n img_pan_files = glob.glob(DATA_DIR + '\\\\Train_Pan' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_ms_array_list = []\r\n img_pan_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_ms_files)):\r\n with rasterio.open(img_ms_files[file]) as f:\r\n metadata = f.profile\r\n img_ms = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(img_pan_files[file]) as g:\r\n metadata_pan = g.profile\r\n img_pan = np.expand_dims(g.read(1), axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n if (img_height_size % ms_to_pan_ratio) != 0 or (img_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both img_height_size and img_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n mask = training_mask_generation(img_pan_files[file], polygon_files[file], labels = label_list)\r\n \r\n img_ms_array, img_pan_array, mask_array = image_clip_to_segment_and_convert(img_ms, img_pan, mask, ms_to_pan_ratio, \r\n img_height_size, img_width_size)\r\n \r\n img_ms_array_list.append(img_ms_array)\r\n img_pan_array_list.append(img_pan_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_ms_full_array = np.concatenate(img_ms_array_list, axis = 0)\r\n img_pan_full_array = np.concatenate(img_pan_array_list, axis = 0)\r\n mask_full_array = to_categorical(np.concatenate(mask_array_list, axis = 0), num_classes = len(label_list))\r\n \r\n return img_ms_full_array, img_pan_full_array, mask_full_array",
"def create_dataset(data_folder: str, dataset_file: str, targets_file: str = os.path.join('data', 'targets.pkl')):\n files = sorted(glob.glob(os.path.join(data_folder, '**/*.jpg'), recursive=True))\n images = []\n crop_sizes = []\n crop_centers = []\n targets = []\n for image in tqdm(files, desc='creating dataset', total=len(files)):\n img = Image.open(image)\n # quadruple dataset by vertical and horizontal flipping\n for i in range(4):\n if i == 1 or i == 3:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if i == 2:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n x, y, w, h, cx, cy = get_random_image_values()\n resized = img.resize((y, x), Image.LANCZOS) # mind thee: x and y swapped\n arr = np.array(resized, dtype=np.float32)\n arr, target_array = create_cropped_data(np.copy(arr), (w, h), (cx, cy), crop_only=False)\n images.append(arr)\n crop_sizes.append((w, h))\n crop_centers.append((cx, cy))\n targets.append(target_array)\n data = {'images': images, 'crop_sizes': crop_sizes, 'crop_centers': crop_centers}\n # persist on harddrive\n with open(dataset_file, 'wb') as f:\n pickle.dump(data, f)\n with open(targets_file, 'wb') as f:\n pickle.dump(targets, f)\n print(f'created datset and saved it to {dataset_file} and targets to {targets_file}')",
"def process_data(output_folder):\n # select imgs\n img_folder = join(output_folder, 'img')\n select_img(output_folder, img_folder, 'HE-green')\n\n mask_folder = join(output_folder, 'mask')\n select_img(output_folder, mask_folder, '_EF5')",
"def preprocess_dir(data_path,\n output_path,\n dataset,\n n_train,\n new_size,\n ):\n img_type_dict = get_class_labels()\n\n print('Preprocessing:', dataset)\n target_data_path = data_path\n disease_dirs = os.listdir(target_data_path)\n disease_dirs = [d for d in disease_dirs if\n os.path.isdir(os.path.join(target_data_path, d))]\n img_stack, target_list = [], []\n img_names = []\n for img_type in disease_dirs:\n class_lbl = img_type_dict[img_type]\n n_class = int(n_train / len(disease_dirs))\n print('\\t', img_type)\n img_files_path = os.path.join(target_data_path, img_type)\n if not (os.path.isdir(img_files_path)):\n continue\n img_files = os.listdir(img_files_path)\n img_files = [f for f in img_files if f.endswith('.jpeg')]\n if dataset == 'train':\n img_files = img_files[0:n_class]\n for img_fname in img_files:\n img_path = os.path.join(img_files_path, img_fname)\n img_arr = np.array(Image.open(img_path))\n img_arr = skimage.transform.resize(img_arr, new_size)\n img_arr = (img_arr - img_arr.min()) / img_arr.max()\n img_stack.append(img_arr)\n target_list.append(class_lbl)\n img_names += [n.split('.')[0] for n in img_files]\n # Save preprocessed data\n save_data(output_path, img_stack, target_list,\n new_size, dataset, n_train, img_names)",
"def load_png_data():\n m=1 #训练文件个数\n n=1 #测试文件个数\n train_set_x=[]#训练数据集\n train_set_y=[]#训练标签集\n\n test_set_x=[]#测试数据集\n test_set_y=[]#测试标签集\n\n train_data={}\n\n train_path=r\".\\dataset\\train_label\\\\\"\n dirs=os.listdir(train_path)\n\n for file in dirs:\n srcImg=cv2.imread(train_path+file)\n #将label数据集保存为numpy格式并保存\n npImg=np.array(srcImg)\n np.save(train_path+str(m)+'.npy',npImg)\n train_set_x.append(npImg)\n\n\n NoiseImg = GaussianNoise(srcImg, 25, 4, 0.8)\n npNoiseImg = np.array(NoiseImg)\n cv2.imwrite(r\".\\dataset\\trainset\\\\\"+str(m)+'.png', NoiseImg, [int(cv2.IMWRITE_PNG_STRATEGY_DEFAULT)])\n np.save(r\".\\dataset\\trainset\\\\\" + str(m) + '.npy', npNoiseImg)\n train_set_y.append(npNoiseImg)\n m=m+1\n train_data['train_set_x']=train_set_x\n train_data['train_set_y']=train_set_y\n\n test_path = r\".\\dataset\\test_label\\\\\"\n dirs_test = os.listdir(test_path)\n for file in dirs_test:\n srcImg=cv2.imread(test_path+file)\n #将label数据集保存为numpy格式并保存\n npImg=np.array(srcImg)\n np.save(test_path+str(n)+'.npy',npImg)\n test_set_x.append(npImg)\n\n\n NoiseImg = GaussianNoise(srcImg, 25, 4, 0.8)\n npNoiseImg = np.array(NoiseImg)\n cv2.imwrite(r\".\\dataset\\testset\\\\\"+str(n)+'.png', NoiseImg, [int(cv2.IMWRITE_PNG_STRATEGY_DEFAULT)])\n np.save(r\".\\dataset\\testset\\\\\" + str(n) + '.npy', npNoiseImg)\n test_set_y.append(npNoiseImg)\n n=n+1\n train_data['test_set_x']=test_set_x\n train_data['test_set_y']=test_set_y\n\n np.savez(r\"E:\\DeepLearning\\CNNDenoiser\\dataset\\train_data.npz\",**train_data)",
"def _generate_dataset(self):\n # create train images\n train_path = os.path.join(self.root_dir, \"shapes\", \"train\", \"good\")\n os.makedirs(train_path, exist_ok=True)\n for i in range(self.num_train):\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n generate_mask=False,\n )\n image = result[\"image\"]\n imsave(os.path.join(train_path, f\"{i:03}.png\"), image, check_contrast=False)\n\n # create test images\n for test_category in self.test_shapes:\n test_path = os.path.join(self.root_dir, \"shapes\", \"test\", test_category)\n mask_path = os.path.join(self.root_dir, \"shapes\", \"ground_truth\", test_category)\n os.makedirs(test_path, exist_ok=True)\n os.makedirs(mask_path, exist_ok=True)\n # anomaly and masks. The idea is to superimpose anomalous shapes on top of correct ones\n for i in range(self.num_test):\n correct_shapes = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n generate_mask=False,\n )\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=[test_category],\n generate_mask=True,\n )\n correct_shapes = correct_shapes[\"image\"]\n image, mask = result[\"image\"], result[\"mask\"]\n image = np.minimum(image, correct_shapes) # since 255 is white\n imsave(os.path.join(test_path, f\"{i:03}.png\"), image, check_contrast=False)\n imsave(os.path.join(mask_path, f\"{i:03}_mask.png\"), mask, check_contrast=False)\n # good test\n test_good = os.path.join(self.root_dir, \"shapes\", \"test\", \"good\")\n os.makedirs(test_good, exist_ok=True)\n for i in range(self.num_test):\n result = generate_random_anomaly_image(\n image_width=self.image_width,\n image_height=self.image_height,\n shapes=self.train_shapes,\n )\n image = result[\"image\"]\n imsave(os.path.join(test_good, f\"{i:03}.png\"), image, check_contrast=False)",
"def __data_generation(self, image_mask_dirs): # X : (n_samples, *dim, n_channels)\n # Initialization\n X = np.empty((self.batch_size, *self.dim, self.n_channels))\n y = np.empty((self.batch_size, *self.dim, 1))\n\n # Generate data\n for i, dirs in enumerate(image_mask_dirs):\n # Store image\n x_img = cv2.imread(dirs[0])\n X[i,] = cv2.cvtColor(x_img, cv2.COLOR_BGR2RGB)\n\n # Store mask\n y_img = cv2.imread(dirs[1], cv2.IMREAD_GRAYSCALE).reshape((*self.dim, 1))\n y[i,] = y_img\n\n if self.preprocessor is not None:\n X = self.preprocessor(X)\n y = self.preprocessor(y)\n\n X = X.astype('float32')\n X /= 255\n y = y.astype('float32')\n y /= 255\n\n return X, y",
"def gen_batch_function(self, data_folder, image_shape):\n\n\t\tdef get_batches_fn(batch_size):\n\t\t\t#\n\t\t\timage_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n\t\t\t#\n\t\t\tlabel_paths = {\tre.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n\t\t\t\tfor path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))}\n\t\t\t#\n\t\t\tbackground_color = np.array([255, 0, 0])\n\t\t\t#\n\t\t\trandom.shuffle(image_paths)\n\t\t\t#\n\t\t\tfor batch_i in range(0, len(image_paths), batch_size):\n\t\t\t\t#\n\t\t\t\timages = []\n\t\t\t\t#\n\t\t\t\tgt_images = []\n\t\t\t\t#\n\t\t\t\tfor image_file in image_paths[batch_i:batch_i+batch_size]:\n\t\t\t\t\t#\n\t\t\t\t\tgt_image_file = label_paths[os.path.basename(image_file)]\n\t\t\t\t\t#\n\t\t\t\t\timage = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n\t\t\t\t\t#\n\t\t\t\t\tgt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\t\t\t\t\t#\n\t\t\t\t\tgt_bg = np.all(gt_image == background_color, axis=2)\n\t\t\t\t\t#\n\t\t\t\t\tgt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n\t\t\t\t\t#\n\t\t\t\t\tgt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\t\t\t\t\t#\n\t\t\t\t\timages.append(image)\n\t\t\t\t\t#\n\t\t\t\t\tgt_images.append(gt_image)\n\t\t\t\t#\n\t\t\t\tyield np.array(images), np.array(gt_images)\n\t\t#\n\t\treturn get_batches_fn",
"def Dev_Image_data_generator(folderlist,resize = (920,1200),Transformation = True, scaling = True, batch_size = 16):\n\n while True:\n total_classes = len(folderlist.keys())\n keys = folderlist.keys()\n Images = []\n Image_label = []\n for key in folderlist.keys():\n img_label = random.choice(folderlist[key])\n img = Image.open(img_label,'r')\n h = resize[1]\n l = int(img.size[1]*h/img.size[0])\n img = img.resize((h,l), Image.ANTIALIAS)\n background = Image.new('RGB', (resize[1], resize[0]), (255, 255, 255))\n img_w, img_h = img.size\n bg_w, bg_h = background.size\n offset = (int((bg_w - img_w) / 2), int((bg_h - img_h) / 2))\n background.paste(img, offset)\n background = np.asarray(background)\n if Transformation == True:\n rotation = rotate(background,random.choice(range(360)))\n translate = translate_xy(background,random.choice(range(resize[0]/4)),random.choice(range(resize[1]/4)))\n flip = cv2.flip(rotation,1)\n Y = np.concatenate((rotation[np.newaxis,:,:,:],flip[np.newaxis,:,:,:],translate[np.newaxis,:,:,:]))\n Images.append(Y)\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key for i in range(4)])\n else:\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key])\n Image_label = np.concatenate(Image_label)\n Images = np.concatenate(Images)\n Image_label = np.array(pd.get_dummies(Image_label))\n X_Image , Y_Image = shuffle(Images,Image_label,random_state=0)\n if scaling == True:\n X_Image = X_Image/255\n else:\n X_Image = X_Image\n batches = int(len(X_Image)/batch_size)\n for batch in range(batches):\n x = X_Image[batch*batch_size:(batch+1)*batch_size,:,:,:]\n y = Y_Image[batch*batch_size:(batch+1)*batch_size]\n yield((x,y))",
"def create_random_data(output_path: str, num_images: int = 5) -> None:\n train_path = os.path.join(output_path, \"train\")\n class1_train_path = os.path.join(train_path, \"class1\")\n class2_train_path = os.path.join(train_path, \"class2\")\n\n val_path = os.path.join(output_path, \"val\")\n class1_val_path = os.path.join(val_path, \"class1\")\n class2_val_path = os.path.join(val_path, \"class2\")\n\n test_path = os.path.join(output_path, \"test\")\n class1_test_path = os.path.join(test_path, \"class1\")\n class2_test_path = os.path.join(test_path, \"class2\")\n\n paths = [\n class1_train_path,\n class1_val_path,\n class1_test_path,\n class2_train_path,\n class2_val_path,\n class2_test_path,\n ]\n\n for path in paths:\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n\n for i in range(num_images):\n pixels = numpy.random.rand(64, 64, 3) * 255\n im = Image.fromarray(pixels.astype(\"uint8\")).convert(\"RGB\")\n im.save(os.path.join(path, f\"rand_image_{i}.jpeg\"))\n\n process_images(output_path)",
"def explore_data():\n labels = [\"vehicles\", \"non-vehicles\"]\n labelmap = {0: \"vehicles\", 1: \"non-vehicles\"}\n vehicles_glob = os.path.join(data_dir, \"vehicles\", \"**\", \"*.png\")\n nonvehicles_glob = os.path.join(data_dir, \"non-vehicles\", \"**\", \"*.png\")\n class_fnames = [\n glob.glob(vehicles_glob, recursive = True),\n glob.glob(nonvehicles_glob, recursive = True)]\n n_samples = [len(fnames) for fnames in class_fnames]\n shapes = []\n samples = []\n print(table_format([\"label\", \"size\", \"shape\"], header = True))\n for label, fnames in enumerate(class_fnames):\n indices = np.random.choice(len(fnames), 4*10, replace = False)\n for i in indices:\n fname = fnames[i]\n img = cv2.imread(fname)\n samples.append(img)\n shape = img.shape\n shapes.append(shape)\n print(table_format([labels[label], n_samples[label], shapes[label]]))\n\n samples = np.stack(samples)\n samples = tile(samples, 2*4, 10)\n cv2.imwrite(os.path.join(out_dir, \"datasamples.png\"), samples)\n\n return class_fnames, labelmap",
"def create_patches(data, patch_shape):\n\n imgs = []\n\n if data[0].shape[0] == test_size:\n step_length = (test_size - patch_shape[0]) // 2 # 176\n else:\n step_length = (training_size - patch_shape[0])\n\n for i in range(data.shape[0]):\n if len(patch_shape) == 3: # RGB images\n patches = patchify(data[i], patch_shape, step=step_length)\n patches = patches.reshape((-1, patch_shape[0], patch_shape[1], patch_shape[2]))\n imgs.extend(patches)\n else:\n patches = patchify(data[i], patch_shape, step=step_length)\n patches = patches.reshape((-1, patch_shape[0], patch_shape[1]))\n imgs.extend(patches)\n\n return np.asarray(imgs)",
"def prepare_data(data_path, val_data_path, patch_size,stride,scales = [1, 0.9, 0.8, 0.7],\n max_num_patches=None, aug_times=1,random_aug=False, gray_mode=False):\n # training database\n print('> Training database')\n types = ('*.bmp', '*.png')\n files = []\n for tp in types:\n files.extend(glob.glob(os.path.join(data_path, tp)))\n files.sort()\n\n if gray_mode:\n traindbf = './data/set400_p64.h5'\n valdbf = './data/set12.h5'\n else:\n traindbf = './data/train_rgb.h5'\n valdbf = './data/val_rgb.h5'\n\n if max_num_patches is None:\n max_num_patches = 5000000\n print(\"\\tMaximum number of patches not set\")\n else:\n print(\"\\tMaximum number of patches set to {}\".format(max_num_patches))\n train_num = 0\n i = 0\n with h5py.File(traindbf, 'w') as h5f:\n while i < len(files) and train_num < max_num_patches:\n imgor = cv2.imread(files[i])\n # h, w, c = img.shape\n for sca in scales:\n img = cv2.resize(imgor, (0, 0), fx=sca, fy=sca, \\\n interpolation=cv2.INTER_CUBIC)\n if not gray_mode:\n # CxHxW RGB image\n img = (cv2.cvtColor(img, cv2.COLOR_BGR2RGB)).transpose(2, 0, 1)\n else:\n # CxHxW grayscale image (C=1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = np.expand_dims(img, 0)\n img = normalize(img)\n patches = img_to_patches(img, win=patch_size, stride=stride)\n print(\"\\tfile: %s scale %.1f # samples: %d\" % \\\n (files[i], sca, patches.shape[3] * 8))\n for nx in range(patches.shape[3]):\n if random_aug == False:\n for j in range(aug_times):\n data = data_augmentation(patches[:, :, :, nx].copy(), j)\n h5f.create_dataset(str(train_num), data=data)\n train_num += 1\n else:\n for j in range(aug_times):\n data = data_augmentation(patches[:, :, :, nx].copy(), random.randint(0, 7))\n h5f.create_dataset(str(train_num), data=data)\n train_num += 1\n i += 1\n # validation database\n print('\\n> Validation database')\n files = []\n for tp in types:\n files.extend(glob.glob(os.path.join(val_data_path, tp)))\n files.sort()\n h5f = h5py.File(valdbf, 'w')\n val_num = 0\n for i, item in enumerate(files):\n print(\"\\tfile: %s\" % item)\n img = cv2.imread(item)\n if not gray_mode:\n # C. H. W, RGB image\n img = (cv2.cvtColor(img, cv2.COLOR_BGR2RGB)).transpose(2, 0, 1)\n else:\n # C, H, W grayscale image (C=1)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n img = np.expand_dims(img, 0)\n\n C,H,W=img.shape\n\n # if H % 2 == 1:\n # \timg = img[:, :-1, :]\n # if W % 2 == 1:\n # \timg = img[:, :, :-1]\n\n img = normalize(img)\n h5f.create_dataset(str(val_num), data=img)\n val_num += 1\n h5f.close()\n\n print('\\n> Total')\n print('\\ttraining set, # samples %d' % train_num)\n print('\\tvalidation set, # samples %d\\n' % val_num)",
"def load_data(train_test_ratio = 0.8, class_range = 8, randomised = True):\n\n # Get image filenames, labels, and the number of classification classes\n filenames = glob.glob(\"../img/*.png\")\n if randomised:\n random.shuffle(filenames)\n\n img_labels = []\n for filename in filenames:\n label = int(filename.split(\"-d\",1)[1].split('-',1)[0])\n label = max(0, (label - 1) // (class_range))\n img_labels.append(label)\n\n num_classes = max(img_labels) + 1 # E.g. max label 5 -> 0-5 inclusive\n num_total_samples = len(filenames)\n num_train_samples = int(num_total_samples * train_test_ratio)\n num_test_samples = num_total_samples - num_train_samples\n\n training_images = np.empty(\n (num_train_samples, OUTPUT_RES, OUTPUT_RES, 3), dtype='uint8'\n )\n training_labels = np.asarray(img_labels[:num_train_samples], dtype='uint8')\n\n for i in range(0, num_train_samples):\n training_images[i] = parse_img(filenames[i])\n\n test_images = np.empty(\n (num_test_samples, OUTPUT_RES, OUTPUT_RES, 3), dtype='uint8'\n )\n test_labels = np.asarray(img_labels[num_train_samples:], dtype='uint8')\n\n for i in range(0, num_test_samples):\n test_images[i] = parse_img(filenames[i + num_train_samples])\n\n return ((training_images, training_labels),\n (test_images, test_labels),\n num_classes)",
"def main():\n labels, data = load_image_data()\n print(labels.shape, data.shape)",
"def build_dataset(self):\n print(\"reading data of images currently , please wait......\")\n x_train, y_train, _ = get_images(self.train_directory)\n x_test, y_test, _ = get_images(self.test_directory)\n x_train, y_train = image_subset(self.num_classes, x_train, y_train)\n x_test, y_test = image_subset(self.num_classes, x_test, y_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n self.x_train = x_train / 255\n self.x_test = x_test / 255\n self.y_train = utils.to_categorical(y_train, self.num_classes)\n self.y_test = utils.to_categorical(y_test, self.num_classes)",
"def create_data_dict(data_dir, img_size=[25, 83]):\n print(\"Creating data dictionary\")\n print(\"- Using data at:\", data_dir)\n\n # Directories\n imgs_dir = os.path.join(data_dir, \"training/image_2\")\n labels_dir = os.path.join(data_dir, \"training/gt_image_2\")\n\n print(\"- Getting list of files\")\n # Only get the label files for road (not lane)\n label_files = glob.glob(os.path.join(labels_dir, \"*_road_*.png\"))\n\n # Create corresponding list of training image files\n img_files = list(map(lambda f: os.path.basename(f).replace(\"_road\", \"\"), label_files))\n img_files = list(map(lambda f: os.path.join(imgs_dir, f), img_files)) # absolute path\n\n n_samples = len(img_files)\n print(\"- Encountered {} samples\".format(n_samples))\n est_filesize = (n_samples*np.prod(img_size)*(3+1))/1e6\n print(\"- Estimated output filesize: {:0.3f} MB + overhead\".format(est_filesize))\n\n data = {}\n data[\"X_train\"] = np.empty([n_samples]+img_size+[3], dtype=np.uint8)\n data[\"Y_train\"] = np.empty([n_samples]+img_size, dtype=np.uint8)\n\n print(\"- Processing image files\")\n for i in range(n_samples):\n label_img = scipy.misc.imread(label_files[i])\n input_img = scipy.misc.imread(img_files[i])\n\n # PRERPOCESS THE IMAGES\n label_img = scipy.misc.imresize(label_img, img_size)\n input_img = scipy.misc.imresize(input_img, img_size)\n\n # PROCESSING LABEL IMAGE\n # Only one channel, (1=road, 0=not road)\n non_road_class = np.array([255,0,0])\n label_img = (1-np.all(label_img==non_road_class, axis=2, keepdims=False)).astype(np.uint8)\n\n # Place the images into the data arrays\n data[\"X_train\"][i] = input_img\n data[\"Y_train\"][i] = label_img\n\n print(\"- Shuffling the data\")\n np.random.seed(seed=128)\n ids = list(np.random.permutation(n_samples))\n data[\"X_train\"] = data[\"X_train\"][ids]\n data[\"Y_train\"] = data[\"Y_train\"][ids]\n\n print(\"- Done!\")\n return data",
"def get_data(folder: str, dimensions: int):\n preprocess = transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(dimensions),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n )\n ]\n )\n return datasets.ImageFolder(folder, transform=preprocess)",
"def load_data(data_dir):\n\n # Initiate lists\n images = []\n labels = []\n\n main_dir = os.path.abspath(os.curdir)\n\n for i in range(NUM_CATEGORIES):\n os.chdir(os.path.join(data_dir, str(i))) # Open directory i\n dir_images = os.listdir() # Create a list of all images in directory\n\n for j in range(len(dir_images)):\n image = cv2.imread(dir_images[j]) # Read image from file\n image = tf.keras.preprocessing.image.img_to_array(image) # Transform image to numpy array\n image = tf.image.resize(image, (IMG_WIDTH, IMG_HEIGHT)) # Reshape image to 30 x 30 px\n image = image/255 # Normalize image RGB values\n images.append(image) \n labels.append(i)\n\n os.chdir(main_dir)\n \n return (images, labels)",
"def prepare_test_data(args):\n image_dir = args.test_image_dir\n\n files = os.listdir(image_dir)\n files = [f for f in files if f.lower().endswith('.png')]\n\n img_ids = list(range(len(files)))\n img_files = []\n img_heights = []\n img_widths = []\n \n for f in files:\n img_path = os.path.join(image_dir, f)\n img_files.append(img_path)\n img = cv2.imread(img_path)\n img_heights.append(img.shape[0]) \n img_widths.append(img.shape[1]) \n\n print(\"Building the testing dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths)\n print(\"Dataset built.\")\n return dataset",
"def test_generator(self, test_path):\n\n img_list = os.scandir(test_path)\n for img_entry in img_list:\n\n img = cv2.imread(img_entry.path, COLOR_TO_OPENCV[self.color_mode])\n if img.shape[-1] == 3:\n orig_shape = img.shape[-2::-1]\n else:\n orig_shape = img.shape[::-1]\n\n\n img = cv2.resize(img, tuple(self.target_size))\n img = img / 255\n if self.color_mode == \"grayscale\":\n img = np.reshape(img, img.shape + (1,))\n img = np.reshape(img, (1,) + img.shape)\n yield img, img_entry, orig_shape",
"def gen_batch_function(data_folder, image_shape, seed=None, samples_limit=None):\n # Grab image and label paths\n image_paths = glob(os.path.join(data_folder, 'image_2', '*.png'))\n label_paths = {\n re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path\n for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))\n }\n background_color = np.array([255, 0, 0])\n\n if samples_limit:\n image_paths = image_paths[0:samples_limit]\n\n samples_n = len(image_paths)\n\n rnd = random.Random(seed)\n\n def get_batches_fn(batch_size):\n \"\"\"\n\t\tCreate batches of training data\n\t\t:param batch_size: Batch Size\n\t\t:return: Batches of training data\n\t\t\"\"\"\n # Shuffle training data\n rnd.shuffle(image_paths)\n # Loop through batches and grab images, yielding each batch\n for batch_i in range(0, samples_n, batch_size):\n images = []\n gt_images = []\n for image_file in image_paths[batch_i:batch_i + batch_size]:\n gt_image_file = label_paths[os.path.basename(image_file)]\n # Re-size to image_shape\n image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape)\n gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape)\n\n # Create \"one-hot-like\" labels by class\n gt_bg = np.all(gt_image == background_color, axis=2)\n gt_bg = gt_bg.reshape(*gt_bg.shape, 1)\n gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2)\n\n images.append(image)\n gt_images.append(gt_image)\n\n yield np.array(images), np.array(gt_images)\n\n return get_batches_fn, samples_n",
"def process_images(path, dataset):\n \n print(f\"Processing images {os.path.join(path, dataset)}\", flush=True)\n label_file = os.path.join(path, dataset + '-labels-idx1-ubyte')\n with open(label_file, 'rb') as file:\n _, num = struct.unpack(\">II\", file.read(8))\n labels = numpy.fromfile(file, dtype=numpy.int8) #int8\n new_labels = numpy.zeros((num, 10))\n new_labels[numpy.arange(num), labels] = 1\n\n img_file = os.path.join(path, dataset + '-images-idx3-ubyte')\n with open(img_file, 'rb') as file:\n _, num, rows, cols = struct.unpack(\">IIII\", file.read(16))\n imgs = numpy.fromfile(file, dtype=numpy.uint8).reshape(num, rows, cols) #uint8\n imgs = imgs.astype(numpy.float32) / 255.0\n\n os.remove(label_file); os.remove(img_file)\n print(f\"Saving files under {os.path.join(path, dataset)} path\", flush=True)\n numpy.savez_compressed(os.path.join(path, dataset), imgs=imgs, labels=labels)",
"def read_files_and_visualize(data):\n\n image = cv2.imread(data[0])\n label = cv2.imread(data[1], 0)\n name = data[1].split('/')[-1].split('.')[0]\n obj_label = None\n\n if generator_options.save_label_preview:\n obj_label = []\n if os.path.isfile(data[2]):\n with open(data[2], 'r') as f:\n obj = csv.reader(f, delimiter=',')\n for row in obj:\n row = [int(r.split('.')[0]) if index != 0 else r\n for index, r in enumerate(row)]\n obj_label.append(row)\n\n else:\n label_vals = np.unique(label)\n for val in label_vals:\n obj_label.append([_LABEL_DEF_FULL[val], 0, 0, 0, 0])\n\n save_visuals(image, label, obj_label, name)"
] | [
"0.66889775",
"0.6415231",
"0.6367699",
"0.63560754",
"0.6261791",
"0.6167243",
"0.61289775",
"0.60442597",
"0.6023371",
"0.5907351",
"0.5826012",
"0.57797915",
"0.5760415",
"0.57251453",
"0.57180756",
"0.57146996",
"0.57077223",
"0.56844974",
"0.5684313",
"0.56831986",
"0.56820303",
"0.56811684",
"0.5680994",
"0.56770945",
"0.5675886",
"0.5668763",
"0.56536174",
"0.56481946",
"0.5640629",
"0.56321543"
] | 0.78713274 | 0 |
Calculate by how many cells the moving window should be moved. If this is nonzero, shift the fields on the interpolation grid, and add new particles. | def move_grids(self, fld, comm, time):
# To avoid discrepancies between processors, only the first proc
# decides whether to send the data, and broadcasts the information.
dz = comm.dz
if comm.rank==0:
# Move the continuous position of the moving window object
self.zmin += self.v * (time - self.t_last_move)
# Find the number of cells by which the window should move
zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(
local=False, with_damp=False, with_guard=False )
n_move = int( (self.zmin - zmin_global_domain)/dz )
else:
n_move = None
# Broadcast the information to all proc
if comm.size > 1:
n_move = comm.mpi_comm.bcast( n_move )
# Move the grids
if n_move != 0:
# Move the global domain
comm.shift_global_domain_positions( n_move*dz )
# Shift the fields
Nm = len(fld.interp)
for m in range(Nm):
# Modify the values of the corresponding z's
fld.interp[m].zmin += n_move*fld.interp[m].dz
fld.interp[m].zmax += n_move*fld.interp[m].dz
# Shift/move fields by n_move cells in spectral space
self.shift_spect_grid( fld.spect[m], n_move )
# Because the grids have just been shifted, there is a shift
# in the cell indices that are used for the prefix sum.
if fld.use_cuda:
fld.prefix_sum_shift += n_move
# This quantity is reset to 0 whenever prefix_sum is recalculated
# Prepare the positions of injection for the particles
# (The actual creation of particles is done when the routine
# exchange_particles of boundary_communicator.py is called)
if comm.rank == comm.size-1:
# Move the injection position
self.z_inject += self.v * (time - self.t_last_move)
# Take into account the motion of the end of the plasma
self.z_end_plasma += self.v_end_plasma * (time - self.t_last_move)
# Increment the number of particle cells to add
nz_new = int( (self.z_inject - self.z_end_plasma)/dz )
self.nz_inject += nz_new
# Increment the virtual position of the end of the plasma
# (When `generate_particles` is called, then the plasma
# is injected between z_end_plasma - nz_inject*dz and z_end_plasma,
# and afterwards nz_inject is set to 0.)
self.z_end_plasma += nz_new*dz
# Change the time of the last move
self.t_last_move = time | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def swipeBase (self) :\n grid = self.grid\n\n #we start by putting every tile up\n for columnNbr in range(4) :\n nbrZeros = 4 - np.count_nonzero(grid[:,columnNbr])\n\n for lineNbr in range(4) :\n counter = 0\n while (grid[lineNbr, columnNbr] == 0) and (counter < 4):\n counter += 1\n if np.count_nonzero(grid[lineNbr:4, columnNbr]) != 0 :\n for remainingLine in range (lineNbr, 3) :\n grid[remainingLine, columnNbr] = grid[remainingLine+1, columnNbr]\n grid[3, columnNbr] = 0\n\n #now we do the additions\n for lineNbr in range(3) :\n if grid[lineNbr, columnNbr] == grid[lineNbr+1, columnNbr] :\n grid[lineNbr, columnNbr] *= 2\n for remainingLine in range (lineNbr+1, 3) :\n grid[remainingLine, columnNbr] = grid[remainingLine+1, columnNbr]\n grid[3, columnNbr] = 0\n\n return (grid)",
"def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter",
"def update_grid_pos(self):\n self.grid_pos = self.get_tile_of_position(self.tank.body.position)",
"def moving_windows_num(self, size = None, overlap = None):\n if size==None:\n size=4096\n if overlap==None:\n overlap=0\n \n return (self.num_datapoints)/(size-overlap) #Not sure if it's right, need testing",
"def _ignite_cells(self, istep, ip):\n particle = self.particles[ip] # get particle\n state, x, y = particle.get_from_keys([\"state\", \"x\", \"y\"])\n if state > STTHR:\n for i in range(self.grid.NX-1):\n if abs(x - self.grid.XCELL[i, 0]) < self.grid.DX/2:\n INDX = i\n for j in range(self.grid.NY-1):\n if abs(y - self.grid.YCELL[0, j]) < self.grid.DY/2:\n INDY = j\n cell = self.grid.CELLS[INDX, INDY]\n cell.BURNPROG += 1\n if (cell.QMAXTR > 0 or cell.QMAXBLD > 0) and cell.BURNSTAT == 0:\n cell.BURNSTAT = 1\n cell.CLOCK = self.TIME[istep]\n # elif cell.QMAXTR == 0 or cell.QMAXBLD == 0:\n # particle.update(state=0.0, factor=0.0)\n # if pType == 2:\n # particle.update(state=0.0)",
"def update_grid(self):\n if self.game_over:\n return\n if self.active_piece is None:\n self.place_new_piece()\n if self.piece_collision_exists(self.active_piece):\n self.handle_active_piece_collision()\n self.place_new_piece()\n self.shift_cells(self.active_piece, self.current_direction)\n self.active_piece = TransformPiece.shift_coordinates(self.active_piece, self.current_direction)\n self.merge_with_completed_rows()\n if self.is_game_won():\n self.game_over = True",
"def calc_positions(self) :\n\t\tx, y = self.x0, self.y0\n\n\t\twhile self.is_visible(x, y) :\n\t\t\tx = 0.5 * self.gx * self.t**2 + self.vx0 * self.t + self.x0\n\t\t\ty = 0.5 * self.gy * self.t**2 + self.vy0 * self.t + self.y0\n\t\t\t\n\t\t\tself.t += self.dt\n\t\t\tself.pos_x.append(x)\n\t\t\tself.pos_y.append(y)",
"def increment(grid):\n height = len(grid)\n width = len(grid[0])\n for r in range(height):\n for c in range(width):\n grid[r][c] += 1\n if grid[r][c] == 10:\n grid [r][c] = 0",
"def num_cells_up(self):\n if hasattr(self, '__num_cells_up__'):\n return self.__num_cells_up__\n elif self.shared_coboundaries is not None:\n assert self.upper_index is not None\n return int(self.shared_coboundaries.max()) + 1\n assert self.upper_index is None\n return 0",
"def update_shift_count(self, move):\n if len(move) == 2:\n self.shift_count += 1\n else:\n self.shift_count = 0",
"def calc(self):\n np = 0\n for cell in self.cells:\n n = self.cell_np[cell]\n np += n\n self.dnp = np - self.np\n self.np = np",
"def _add_mines(self):\n num = 0\n while num < self._n:\n x = random.randint(0, self._dim - 1)\n y = random.randint(0, self._dim - 1)\n if self._board[x][y] != -1:\n self._board[x][y] = -1\n neighbors = self._get_neighbors((x, y))\n for neighbor in neighbors:\n if self._board[neighbor[0]][neighbor[1]] != -1:\n self._board[neighbor[0]][neighbor[1]] += 1\n num += 1",
"def update_grid(self, x):\r\n\r\n # Append boundary rows and columns to matrix\r\n x = self.append_boundary(x) # the boundary is recomputed at each step\r\n y = np.copy(x)\r\n\r\n # For each cell within boundary, compute state according to rules.\r\n chg_0_1 = 0 # the number of cells that changed from state 0 to state 1\r\n chg_1_0 = 0 # the number of cells that changes from state 1 to state 0\r\n chg_none = 0 # the number of cells that did not change\r\n index = np.arange(1, x.shape[0] - 1)\r\n for i in index:\r\n for j in index:\r\n neighborhood = x[i - 1:i + 2:1, j - 1:j + 2:1] # 3x3 sub matrix centered at i, j\r\n y[i, j] = self.update_cell(neighborhood)\r\n change = int(y[i, j] - x[i, j])\r\n if change == -1:\r\n chg_1_0 += 1\r\n if change == 0:\r\n chg_none += 1\r\n if change == 1:\r\n chg_0_1 += 1\r\n\r\n # Compute statistics excluding boundary\r\n total = np.power(x[1:-1:1, 1:-1:1].shape[0] - 1, 2)\r\n start_1 = np.sum(x[1:-1:1, 1:-1:1])\r\n end_1 = np.sum(y[1:-1:1, 1:-1:1])\r\n stats = [total, start_1, end_1, chg_1_0, chg_none, chg_0_1]\r\n\r\n return y[1:-1:1, 1:-1:1], stats # remove the boundary\r",
"def update_positions(self, grid):\r\n self.grid = grid",
"def set_adjacent_mine_count(self):\n for position in self.grid_coords:\n x, y = position\n if self.grid[y][x] >= 0:\n grid_value = sum(map(self.is_mine, get_adjacent.get_adjacent(position)))\n self.grid[y][x] = grid_value",
"def make_move(grid, n_columns, n_rows):\r\n # Generate the game grid to be manipulated\r\n new_grid = [[0] * (n_columns + 1) for i in range(n_rows + 1)]\r\n\r\n\r\n for i in range(n_rows):\r\n for j in range(n_columns):\r\n upper_left = grid[i-1][j-1] # neighbor to upper left of cell of interest\r\n upper = grid[i-1][j] # neighbor above cell of interest\r\n upper_right = grid[i-1][j+1] # neighbor to upper right of cell of interest\r\n left = grid[i][j-1] # neighbor to left of cell of interest\r\n right = grid[i][j+1] # neighbor to right of cell of interest\r\n bot_left = grid[i+1][j-1] # neighbor to bottom left cell of interest\r\n bot = grid[i+1][j] # neighbor below cell of interest\r\n bot_right = grid[i+1][j+1] # neighbor to bottom right of cell of interest\r\n\r\n # sum of the state of all neighbors\r\n on_neighbors = upper_left + upper + upper_right + left + right + bot_left + bot + bot_right\r\n\r\n # Any ON cell with fewer than two ON neighbors turns OFF\r\n if grid[i][j] == 1 and on_neighbors < 2:\r\n new_grid[i][j] = 0\r\n\r\n # Any ON cell with two or three ON neighbours stays ON\r\n elif grid[i][j] == 1 and (on_neighbors == 2 or on_neighbors == 3):\r\n new_grid[i][j] = 1\r\n\r\n # Any ON cell with more than three ON neighbors turns OFF\r\n elif grid[i][j] == 1 and on_neighbors > 3:\r\n new_grid[i][j] = 0\r\n\r\n # Any OFF cell with three ON neighbors turns ON\r\n elif grid[i][j] == 0 and on_neighbors == 3:\r\n new_grid[i][j] = 1\r\n\r\n return new_grid #manipulated game grid\r",
"def push_up (grid):\r\n for a in range (4): \r\n for i in range(3,0,-1): \r\n for j in range(4): \r\n if grid[i-1][j]==0: \r\n grid[i-1][j]=grid[i][j] \r\n grid[i][j]=0\r\n #joining like numbers \r\n for i in range(3): \r\n for j in range(4): \r\n if grid[i][j]==grid[i+1][j]: \r\n grid[i][j]=(grid[i][j])*2\r\n grid[i+1][j]=0\r\n #pafter adding the numbers continue to move them \r\n for a in range (4): \r\n for i in range(3,0,-1): \r\n for j in range(4): \r\n if grid[i-1][j]==0: \r\n grid[i-1][j]=grid[i][j] \r\n grid[i][j]=0",
"def _cal_grid_parameters_without_bsite(self, spacing, extra_buffer, nc_handle):\n assert spacing > 0 and extra_buffer > 0, \"spacing and extra_buffer must be positive\"\n self._set_grid_key_value(\"origin\", np.zeros( [3], dtype=float))\n \n self._set_grid_key_value(\"d0\", np.array([spacing, 0, 0], dtype=float))\n self._set_grid_key_value(\"d1\", np.array([0, spacing, 0], dtype=float))\n self._set_grid_key_value(\"d2\", np.array([0, 0, spacing], dtype=float))\n self._set_grid_key_value(\"spacing\", np.array([spacing]*3, dtype=float))\n \n lj_radius = np.array(self._prmtop[\"LJ_SIGMA\"]/2., dtype=float)\n dx = (self._crd[:,0] + lj_radius).max() - (self._crd[:,0] - lj_radius).min()\n dy = (self._crd[:,1] + lj_radius).max() - (self._crd[:,1] - lj_radius).min()\n dz = (self._crd[:,2] + lj_radius).max() - (self._crd[:,2] - lj_radius).min()\n\n print(\"Receptor enclosing box [%f, %f, %f]\"%(dx, dy, dz))\n print(\"extra_buffer: %f\"%extra_buffer)\n\n length = max([dx, dy, dz]) + 2.0*extra_buffer\n count = np.ceil(length / spacing) + 1\n \n self._set_grid_key_value(\"counts\", np.array([count]*3, dtype=int))\n print(\"counts \", self._grid[\"counts\"])\n print(\"Total box size %f\" %((count-1)*spacing))\n\n for key in [\"origin\", \"d0\", \"d1\", \"d2\", \"spacing\", \"counts\"]:\n self._write_to_nc(nc_handle, key, self._grid[key])\n return None",
"def grid_inflation(self):\n for obs in self.obstacle_list:\n\n inflation_x1 = round((obs[0][0]-self._inflation_radius)/self.step_size)\n\n inflation_y2 = round((obs[0][1] + obs[2] +self._inflation_radius)/self.step_size)\n\n inflation_x2 = round((obs[0][0] + obs[1] +self._inflation_radius)/self.step_size)\n\n inflation_y1 = round((obs[0][1] -self._inflation_radius)/self.step_size)\n\n self.grid[1, inflation_x1:inflation_x2+1,\n inflation_y1:inflation_y2+1] = INFLATION_COST\n\n # border inflation\n self.grid[1, 0:self.gridwidth+1, 0:round(self._inflation_radius/self.step_size)+1] = INFLATION_COST\n self.grid[1, 0:self.gridwidth+1, self.gridheight-round(self._inflation_radius / self.step_size):self.gridheight+1] = INFLATION_COST\n self.grid[1, 0:round(self._inflation_radius/self.step_size)+1, 0:self.gridheight+1] = INFLATION_COST\n self.grid[1, self.gridwidth-round(self._inflation_radius/self.step_size):self.gridwidth+1, 0:self.gridheight+1] = INFLATION_COST\n\n # if NEED_DRAW_INFLATED_GRID:\n # for i in range(self.gridwidth):\n # plt.scatter(i,0)\n # plt.scatter(i,self.gridheight)\n # for j in range(self.gridheight):\n # plt.scatter(0,j)\n # plt.scatter(self.gridwidth,j)\n # if self.grid[i, j] != 0:\n # plt.scatter(i,j)\n # plt.show()\n\n return self.grid",
"def update_pop_matrix(self):\n for row in self.unique_rows[1:-1]: # First and last cell is water\n for col in self.unique_cols[1:-1]: # First and last cell is water\n cell = self.landscape[(row, col)]\n if cell.is_mainland:\n # print(cell)\n self.herb_pop_matrix[row - 1][col - 1] = cell.herb_count\n self.carn_pop_matrix[row - 1][col - 1] = cell.carn_count",
"def move(self):\r\n for index in range(self.size):\r\n self.values[index] = self.values[index] + self.velocities[index]\r\n \r\n # Adjust values to keep particle inside boundaries.\r\n if self.values[index] < Particle.MIN_VALUE:\r\n self.values[index] = (-self.values[index] % Particle.MAX_VALUE)\r\n elif self.values[index] > Particle.MAX_VALUE:\r\n self.values[index] = (self.values[index] % Particle.MAX_VALUE)",
"def change_cell(self):\n # TODO: assess whether this may partly moved into the base class\n\n x, mu = self.update_position_direction(self.l_edge)\n mu_mean = self.calculate_mean_mu(self.x, x, self.l_edge)\n self.update_estimators(self.l_edge, mu_mean)\n\n if self.next_cell_index == self.grid.Ncells:\n # packet escapes\n self.is_escaped = True\n self.is_active = False\n self.x = self.cell_xr\n\n elif self.next_cell_index == -1:\n # packets gets reflected\n\n self.x = self.cell_xl\n self.mu = -self.mu\n\n self.calculate_and_set_propagation_distances()\n\n else:\n # packet is transported into target cell\n if self.next_cell_index > self.cell_index:\n # packet is moved one cell to the right\n\n self.x = self.grid.xl[self.next_cell_index]\n\n else:\n # packet is moved one cell to the left\n\n self.x = self.grid.xr[self.next_cell_index]\n\n # reset cell-based properties for easy access\n self.cell_index = self.next_cell_index\n self.cell_chi = self.grid.chi[self.cell_index]\n self.cell_xl = self.grid.xl[self.cell_index]\n self.cell_xr = self.grid.xr[self.cell_index]\n self.cell_dx = self.grid.dx[self.cell_index]\n\n # recalculate distances\n self.calculate_and_set_propagation_distances()",
"def _update_w(self, idx):\n self.w = ((self._w - 0.4) * (self._generations - idx)) /\\\n (self._generations + 0.4)",
"def measurement_update(particles, measured_marker_list, grid):\n weight = []\n cnt = 0\n\n # no new sensor info\n if len(measured_marker_list) == 0:\n s = 1\n for p in particles:\n weight.append((p, 1/len(particles)))\n else:\n for p in particles:\n markers_visible_to_p = p.read_markers(grid)\n\n if p.x < 0 or p.x >= grid.width or p.y < 0 or p.y >= grid.height:\n weight.append((p, 0))\n continue\n if (p.x, p.y) in grid.occupied:\n weight.append((p, 0))\n continue\n\n match = []\n diff = int(math.fabs(len(measured_marker_list)-len(markers_visible_to_p)))\n\n for cm in measured_marker_list:\n if len(markers_visible_to_p) == 0:\n break\n cmx, cmy, cmh = add_marker_measurement_noise(cm, MARKER_TRANS_SIGMA, MARKER_ROT_SIGMA)\n\n # find minp, the closest marker out of markers_visible_to_particle\n minp = markers_visible_to_p[0]\n mind = grid_distance(cmx, cmy, minp[0], minp[1])\n\n for mvp in markers_visible_to_p:\n mvpx, mvpy, mvph = mvp[0], mvp[1], mvp[2]\n dist = grid_distance(cmx, cmy, mvpx, mvpy)\n if dist < mind:\n mind = dist\n minp = mvp\n\n # store the pairing [cm, m] for later calculations\n match.append((minp, cm))\n markers_visible_to_p.remove(minp)\n\n # use match to calculate weight of p\n prob = 1\n\n maxc1 = 0\n maxc2 = (45 ** 2) / (2*(MARKER_ROT_SIGMA ** 2))\n c1 = 2*(MARKER_TRANS_SIGMA ** 2)\n c2 = 2*(MARKER_ROT_SIGMA ** 2)\n\n for i, j in match:\n distBetweenMarkers = grid_distance(i[0], i[1], j[0], j[1])\n angleBetweenMarkers = diff_heading_deg(i[2], j[2])\n const1 = (distBetweenMarkers ** 2) / c1\n const2 = (angleBetweenMarkers ** 2) / c2\n maxc1 = max(maxc1, const1)\n prob *= np.exp(-const1-const2)\n\n for _ in range(diff):\n prob *= np.exp(-maxc1-maxc2)\n\n weight.append((p, prob))\n\n #normalize weight\n s = 0\n weight.sort(key=lambda x: x[1])\n delete = int(PARTICLE_COUNT/100)\n weight = weight[delete:]\n for i, j in weight:\n if j == 0:\n cnt+=1\n else:\n s += j\n weight = weight[cnt:]\n cnt += delete\n\n plist = []\n wlist = []\n\n for i, j in weight:\n newi = Particle(i.x, i.y, i.h)\n wlist.append(j/s)\n plist.append(newi)\n\n newplist = []\n\n if plist != []:\n newplist = np.random.choice(plist, size=len(plist), replace = True, p=wlist)\n\n measured_particles = Particle.create_random(cnt, grid)[:]\n\n for p in newplist:\n ph = add_gaussian_noise(p.h, ODOM_HEAD_SIGMA)\n px = add_gaussian_noise(p.x, ODOM_TRANS_SIGMA)\n py = add_gaussian_noise(p.y, ODOM_TRANS_SIGMA)\n newp = Particle(px, py, ph)\n measured_particles.append(newp)\n\n return measured_particles",
"def _data_move_in_mc_on_w(tik_inst, dst, src, data_pos_info):\n\n sub_h_size, sub_w_size, h_size, w_size, w_offset = data_pos_info\n data_cnt_one_block = _get_elment_cnt_one_block(src.dtype)\n sub_w_block = _ceil_div(sub_w_size, data_cnt_one_block)\n sub_h_align_block_size = sub_h_size // data_cnt_one_block * data_cnt_one_block\n sub_h_left = sub_h_size % data_cnt_one_block\n is_not_w_block_align = w_size % data_cnt_one_block > 0\n is_h_size_smaller_one_block = h_size < data_cnt_one_block\n\n def _move_in_one_more_block():\n \"\"\"\n move in one more block of h when h > sub_h and sub_h is not block align\n \"\"\"\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx],\n src[w_offset + w_size * sub_h_idx], 0, 1, sub_w_block, 0, 0)\n # in order to avoid dirty data when multiple core\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block *\n (sub_h_align_block_size + sub_h_idx_1)],\n src[w_offset +\n w_size * (sub_h_size - data_cnt_one_block + sub_h_idx_1)],\n 0, 1, sub_w_block, 0, 0)\n\n with tik_inst.if_scope(is_not_w_block_align):\n # sub_h is block align or h is not enough one block\n with tik_inst.if_scope(tik.any(sub_h_left == 0, is_h_size_smaller_one_block)):\n with tik_inst.for_range(0, sub_h_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx],\n src[w_offset + w_size * sub_h_idx], 0, 1, sub_w_block, 0, 0)\n with tik_inst.else_scope():\n _move_in_one_more_block()\n\n with tik_inst.else_scope():\n with tik_inst.if_scope(tik.any(sub_h_left == 0, is_h_size_smaller_one_block)):\n src_strides = w_size // data_cnt_one_block - sub_w_block\n # mte max strides value is 65535\n with tik_inst.if_scope(src_strides > MTE_STRIDES):\n with tik_inst.for_range(0, sub_h_size) as sub_h_idx_2:\n tik_inst.data_move(dst[sub_w_size * sub_h_idx_2],\n src[w_offset + w_size * sub_h_idx_2],\n 0, 1, sub_w_block, 0, 0)\n with tik_inst.else_scope():\n tik_inst.data_move(dst, src[w_offset], 0, sub_h_size, sub_w_block, src_strides, 0)\n with tik_inst.else_scope():\n _move_in_one_more_block()",
"def update(frame_num, mat, grid, N):\n\n new_grid = np.copy(grid)\n #print(\"grid size:\", grid.shape)\n for i in range(1, grid.shape[0]-1):\n for j in range(1, grid.shape[1]-1):\n neighbors = int(grid[i-1, j] + grid[i+1, j] + \\\n grid[i, j+1] + grid[i, j-1] + \\\n grid[i-1,j-1] + grid[i+1,j+1] + \\\n grid[i+1,j-1] + grid[i-1,j+1])\n if grid[i, j] == ON:\n if not (2 <= neighbors <= 3):\n new_grid[i, j] = OFF\n elif grid[i, j] == OFF and neighbors == 3:\n # Grow a cell\n new_grid[i, j] = ON\n else:\n new_grid[i, j] = OFF\n\n ### Update new grid\n mat.set_data(new_grid)\n grid[:] = new_grid[:] # Brackets are important\n return mat",
"def push_up (grid):\r\n for i in range (3):\r\n for row in range(1,4):\r\n for col in range(4):\r\n if grid[row-1][col] == 0:\r\n grid[row-1][col] = grid[row][col]\r\n grid[row][col] = 0\r\n for row in range(1,4):\r\n for col in range(4):\r\n if grid[row-1][col] == grid[row][col]:\r\n grid[row-1][col] = grid[row-1][col]*2\r\n grid[row][col]=0\r\n for row in range(1,4):\r\n for col in range(4):\r\n if grid[row-1][col] == 0:\r\n grid[row-1][col] = grid[row][col]\r\n grid[row][col] = 0\r\n \r\n return grid",
"def change_cell(self):\n\n x, mu = self.update_position_direction(self.l_edge)\n mu_mean = self.calculate_mean_mu(self.x, x, self.l_edge)\n self.update_estimators(self.l_edge, mu_mean)\n\n if self.next_cell_index == self.grid.Ncells:\n # packet escapes\n self.is_escaped = True\n self.is_active = False\n self.mu = mu\n self.x = self.cell_xr\n\n elif self.next_cell_index == -1:\n\n raise GeometryException(\"No inner boundary in homogeneous sphere\")\n\n else:\n # packet is transported into target cell\n\n self.mu = mu\n\n if self.next_cell_index > self.cell_index:\n # packet is moved one cell to the right\n\n self.x = self.grid.xl[self.next_cell_index]\n\n else:\n # packet is moved one cell to the left\n\n self.x = self.grid.xr[self.next_cell_index]\n\n # reset cell-based properties for easy access\n self.cell_index = self.next_cell_index\n self.cell_chi = self.grid.chi[self.cell_index]\n self.cell_xl = self.grid.xl[self.cell_index]\n self.cell_xr = self.grid.xr[self.cell_index]\n self.cell_dx = self.grid.dx[self.cell_index]\n self.cell_dV = self.grid.dV[self.cell_index]\n\n # recalculate distances\n self.calculate_and_set_propagation_distances()",
"def shift_spect_grid( self, grid, n_move,\n shift_rho=True, shift_currents=True ):\n if grid.use_cuda:\n shift = grid.d_field_shift\n # Get a 2D CUDA grid of the size of the grid\n tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] )\n # Shift all the fields on the GPU\n shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move )\n else:\n shift = grid.field_shift\n # Shift all the fields on the CPU\n shift_spect_array_cpu( grid.Ep, shift, n_move )\n shift_spect_array_cpu( grid.Em, shift, n_move )\n shift_spect_array_cpu( grid.Ez, shift, n_move )\n shift_spect_array_cpu( grid.Bp, shift, n_move )\n shift_spect_array_cpu( grid.Bm, shift, n_move )\n shift_spect_array_cpu( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_cpu( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_cpu( grid.Jp, shift, n_move )\n shift_spect_array_cpu( grid.Jm, shift, n_move )\n shift_spect_array_cpu( grid.Jz, shift, n_move )",
"def update(self):\r\n if self.able_to_move:\r\n self.pix_pos += self.direction*self.speed\r\n if self.time_to_move():\r\n if self.stored_direction != None:\r\n self.direction = self.stored_direction\r\n self.able_to_move = self.can_move()\r\n # calls to the next function in order to check that the player is within bounds \r\n\r\n self.grid_pos[0] = (self.pix_pos[0]-TOP_BOTTOM_BUFFER +\r\n self.app.cell_width//2)//self.app.cell_width+1\r\n self.grid_pos[1] = (self.pix_pos[1]-TOP_BOTTOM_BUFFER +\r\n self.app.cell_height//2)//self.app.cell_height+1\r\n # keep track of where the player is currently to the grid \r\n\r\n if self.on_coin():\r\n self.eat_coin()\r\n # removes the coin once the player is over the tile\r\n\r\n if self.on_fruit():\r\n self.eat_fruit()\r\n # removes the fruit once the player is over the tile\r"
] | [
"0.59834635",
"0.5809151",
"0.57765406",
"0.56846297",
"0.55949116",
"0.5528766",
"0.5480749",
"0.54742026",
"0.54698974",
"0.54524153",
"0.54521763",
"0.54496145",
"0.5448103",
"0.5435209",
"0.54142",
"0.5403219",
"0.53876007",
"0.5387515",
"0.5371499",
"0.5368437",
"0.53605324",
"0.5331428",
"0.5330667",
"0.5313796",
"0.52975523",
"0.52965856",
"0.52924114",
"0.52881426",
"0.52798",
"0.5271108"
] | 0.67525464 | 0 |
Generate new particles at the right end of the plasma (i.e. between z_end_plasma nz_injectdz and z_end_plasma) Return them in the form of a particle buffer of shape (8, Nptcl) | def generate_particles( self, species, dz, time ) :
# Shortcut for the number of integer quantities
n_int = species.n_integer_quantities
n_float = species.n_float_quantities
# Create new particle cells
if (self.nz_inject > 0) and (species.continuous_injection == True):
# Create a temporary density function that takes into
# account the fact that the plasma has moved
if species.dens_func is not None:
def dens_func( z, r ):
return( species.dens_func( z-self.v_end_plasma*time, r ) )
else:
dens_func = None
# Create the particles that will be added
zmax = self.z_end_plasma
zmin = self.z_end_plasma - self.nz_inject*dz
Npz = self.nz_inject * self.p_nz
new_ptcl = Particles( species.q, species.m, species.n,
Npz, zmin, zmax, species.Npr, species.rmin, species.rmax,
species.Nptheta, species.dt, dens_func=dens_func,
ux_m=self.ux_m, uy_m=self.uy_m, uz_m=self.uz_m,
ux_th=self.ux_th, uy_th=self.uy_th, uz_th=self.uz_th)
# Initialize ionization-relevant arrays if species is ionizable
if species.ionizer is not None:
new_ptcl.make_ionizable( element=species.ionizer.element,
target_species=species.ionizer.target_species,
level_start=species.ionizer.level_start,
full_initialization=False )
# Convert them to a particle buffer
# - Float buffer
float_buffer = np.empty( (n_float, new_ptcl.Ntot), dtype=np.float64 )
float_buffer[0,:] = new_ptcl.x
float_buffer[1,:] = new_ptcl.y
float_buffer[2,:] = new_ptcl.z
float_buffer[3,:] = new_ptcl.ux
float_buffer[4,:] = new_ptcl.uy
float_buffer[5,:] = new_ptcl.uz
float_buffer[6,:] = new_ptcl.inv_gamma
float_buffer[7,:] = new_ptcl.w
if species.ionizer is not None:
float_buffer[8,:] = new_ptcl.ionizer.w_times_level
# - Integer buffer
uint_buffer = np.empty( (n_int, new_ptcl.Ntot), dtype=np.uint64 )
i_int = 0
if species.tracker is not None:
uint_buffer[i_int,:] = \
species.tracker.generate_new_ids(new_ptcl.Ntot)
i_int += 1
if species.ionizer is not None:
uint_buffer[i_int,:] = new_ptcl.ionizer.ionization_level
else:
# No new particles: initialize empty arrays
float_buffer = np.empty( (n_float, 0), dtype=np.float64 )
uint_buffer = np.empty( (n_int, 0), dtype=np.uint64 )
return( float_buffer, uint_buffer ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_particles(self):\n # xf, yf = create_fluid_with_solid_cube()\n xf, yf = create_fluid()\n uf = np.zeros_like(xf)\n vf = np.zeros_like(xf)\n m = initialize_mass(xf, yf)\n rho = initialize_density_fluid(xf, yf)\n h = np.ones_like(xf) * self.hdx * self.dx\n fluid = get_particle_array_wcsph(x=xf, y=yf, h=h, m=m, rho=rho, u=uf,\n v=vf, name=\"fluid\")\n\n xt, yt = create_boundary(self.dx / 2.)\n ut = np.zeros_like(xt)\n vt = np.zeros_like(xt)\n m = np.ones_like(xt) * 1500 * self.dx * self.dx\n rho = np.ones_like(xt) * 1000\n h = np.ones_like(xt) * self.hdx * self.dx / 2.\n tank = get_particle_array_wcsph(x=xt, y=yt, h=h, m=m, rho=rho, u=ut,\n v=vt, name=\"tank\")\n\n return [fluid, tank]",
"def generate_particle_distribution(self, max_loop = np.inf, outfile=None):\n \n self.pos = np.zeros((self.N_part, 3))\n self.vel = np.zeros((self.N_part, 3))\n \n \n F_max = np.max(self.DF.f) ; F_min = np.min(self.DF.f)\n\n n_particles = 0\n loop_counter = 0\n \n if self.optimize:\n relative_potential = self._interpolate_relative_potential\n else:\n relative_potential = self.DF.relative_potential\n \n \n \n # Continue until max number of particles chosen, or until max loop counter\n while ((n_particles < self.N_part) and (loop_counter < max_loop)):\n \n # choose random position, eval potential, choose velocity\n r = self._choose_position()\n \n Psi = relative_potential(r) \n v = self._choose_velocity(r, Psi)\n \n E = Psi - 0.5 * v * v\n\n # interpolate along DF to find f(E) of chosen particle\n f_E = self.DF.interpolate_f(E)\n\n # random number from 0 to F_max for accept reject\n #F = np.random.rand() * F_max\n \n # HOLY CRAP....Fmax - Fmin ... not Fmin - Fmax\n F = 10.0**( np.random.rand()*(np.log10(F_max) - np.log10(F_min)) + np.log10(F_min) )\n \n \n if F <= f_E: # accept particle\n\n \n # convert position to cartesian using random theta and phi\n theta = np.random.rand() * np.pi\n phi = np.random.rand() * 2.0 * np.pi\n \n x = np.sin(theta) * np.cos(phi)\n y = np.sin(theta) * np.sin(phi)\n z = np.cos(theta)\n \n # save particle position\n self.pos[n_particles] = r * np.array([x,y,z])\n \n # repeat for velocity using new random numbers\n theta = np.random.rand() * np.pi\n phi = np.random.rand() * 2.0 * np.pi\n \n vx = np.sin(theta) * np.cos(phi)\n vy = np.sin(theta) * np.sin(phi)\n vz = np.cos(theta)\n \n # save particle velocity\n self.vel[n_particles] = v * np.array([vx,vy,vz])\n \n \n n_particles = n_particles + 1\n \n \n if (loop_counter % 5000) == 0:\n _my_print(\"Have %4i particles. On loop %6i\"%(n_particles, loop_counter))\n loop_counter = loop_counter + 1\n \n \n if (not outfile == None):\n self.write_pd(outfile)\n \n return self.pos, self.vel",
"def _init_particles(self):\n self.NPART = self.grid.get_npart()\n self.particles = np.empty(self.NPART, dtype=object)\n for i in range(self.NPART):\n tmem = TMEM\n ux = UXM + UPRIME*normal()*LANGFACTOR\n vy = VYM + UPRIME*normal()*LANGFACTOR\n self.particles[i] = Particle(tmem=tmem, ux=ux, vy=vy)\n #\n # PUT THE PARTICLES IN THE CELLS.\n # LOOP OVER CELLS AND DEFINE THEIR PARTICLES.\n # FOR NOW, ONLY POSITION DEPENDS ON SPACE HEIGHT & MEMORY DO NOT.\n # FIRST THE TREE PARTICLES, THEN THE BUILDING PARTICLES.\n #\n NX = self.grid.NX\n NY = self.grid.NY\n icounter = 0\n for i in range(NX - 1):\n for j in range(NY - 1):\n cell = self.grid.CELLS[i, j]\n x = self.grid.XCELL[i, j]\n y = self.grid.YCELL[i, j]\n for k in range(cell.NPARTTR):\n self.particles[k + icounter].update(x=x, y=y, type=1)\n for k in range(cell.NPARTRAD):\n self.particles[k + cell.NPARTTR + icounter].update(x=x, y=y, type=2)\n icounter += cell.NPARTTR + cell.NPARTRAD",
"def distribute_waterbag(self):\n # Generate particles by creating trials and finding particles with potential less than emittance, then assign the rest to momentum\n ptclsMade = 0\n phaseSpaceList = []\n while ptclsMade < self.npart:\n ranU = 0.0\n while ranU <= 0:\n ranU = random.random()\n\n # Generate some bounds on the transverse size to reduce waste in generating the bunch\n # Use the lemming method to find the maximum y\n trialH = np.sqrt(ranU)\n newH = self.emit*trialH\n y0 = np.sqrt(newH)\n #self.emittance = newH\n yMax = newton(self.whatsleft, y0)\n\n #bounding the horizontal coordinate is difficult, but it should not exceed the pole\n xMax = self.c\n #xMax = yMax\n\n trialValue = 1e10\n while trialValue >= newH:\n xTrial = 2.*(0.5 - random.random())*xMax\n yTrial = 2.*(0.5 - random.random())*yMax\n trialValue = self.compute_potential(xTrial, yTrial)\n\n initialValue = trialValue\n if initialValue < newH:\n pMag = np.sqrt(2*(newH - initialValue))\n pDir = 2*np.pi* random.random()\n pxHat = pMag * np.cos(pDir)\n pyHat = pMag * np.sin(pDir)\n xReal = xTrial * np.sqrt(self.betax)\n yReal = yTrial * np.sqrt(self.betay)\n pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)\n pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)\n ptclCoords = np.array([xReal, pxReal, yReal, pyReal])\n phaseSpaceList.append(ptclCoords)\n ptclsMade += 1\n\n #Add 3 more particles if creating a quiet start\n if self.quiet:\n self.exact_centroids(ptclCoords, phaseSpaceList)\n ptclsMade += 3\n else:\n print(\"Initial value generated exceeds limiting H. Sampling new value.\")\n\n self.particles[:,:4] = np.asarray(phaseSpaceList)",
"def build(self):\n # Store current positions of all particles\n self.old_pos = []\n for p in self.sys.particles:\n self.old_pos.append(copy(p.r))\n \n # Set up the cell list\n self.cell_list.wipe()\n for p in self.sys.particles:\n self.cell_list.add_particle(p)\n\n # Build the list \n self.neighbours = []\n for p in self.sys.particles:\n neighbours = []\n for n in self.cell_list.get_neighbours(p):\n pn = self.sys.particles[n]\n if pn.id > p.id:\n dr = pn.r - p.r \n dr.apply_periodic(self.sys.box)\n if dr.length() < self.rcut + self.pad:\n neighbours.append(n)\n self.neighbours.append(neighbours)\n \n self.sys.has_nl = True",
"def distribute_KV(self):\n\n assert (self.emitx == self.emity), \"For a KV distribution, the planar emittances must be equal\"\n\n #total emittance of the K-V distribution is 4 times the planar emittance\n emit = 4.*self.emitx\n self.emit = emit\n\n # Generate some bounds on the transverse size to reduce waste in generating the bunch\n # Use the lemming method to find the maximum y\n y0 = np.sqrt(self.emit)\n\n yMax = newton(self.whatsleft, y0)\n xMax = yMax\n\n # Generate particles by creating trials and finding particles with potential less than emittance,\n # then assign the rest to momentum\n ptclsMade = 0\n phaseSpaceList = []\n\n while ptclsMade < self.npart:\n #Note that the particle coordinates here are distributed in normal coordinates\n xTrial = 2.*(0.5 - random.random())*xMax\n yTrial = 2.*(0.5 - random.random())*yMax\n trialValue = self.compute_potential(xTrial, yTrial)\n if trialValue < self.emit:\n\n pMag = np.sqrt(2.*(self.emit - trialValue))\n pDir = 2.*np.pi * random.random()\n pxHat = pMag * np.cos(pDir)\n pyHat = pMag * np.sin(pDir)\n\n xReal = xTrial * np.sqrt(self.betax)\n yReal = yTrial * np.sqrt(self.betay)\n\n #We want to provide the user with standard (non-normal) coordinates\n pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)\n pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)\n\n ptclCoords = np.array([xReal, pxReal, yReal, pyReal])\n phaseSpaceList.append(ptclCoords)\n ptclsMade += 1\n\n #Add 3 more particles if creating a quiet start\n if self.quiet:\n self.exact_centroids(ptclCoords, phaseSpaceList)\n ptclsMade += 3\n\n self.particles[:,:4] = np.asarray(phaseSpaceList)",
"def x_add_particles():\n particle_count_list = np.zeros(7)",
"def addParticles( screen, number, color ):\n\t\n\tparticles = []\n\t\n\tfor i in range( number ):\n\t\n\t\tradius = 5\n\t\tmass = 1\n\t\t\n\t\t#random position and velocity\n\t\tx, y = randint(-WINDOW_X + radius, 1), randint(-WINDOW_Y + radius, WINDOW_Y - radius)\n\t\tvx, vy = randrange(-1, 2, 2) * 100, randrange(-1, 2, 2) * 100\n\t\t\n\t\tparticles.append( Particle( screen, x, y, vx, vy, radius, mass, color ))\n\t\n\treturn particles",
"def inject_planet(self,data, psf_library, c_ratio=[0.01, 0.1], x_bound=[4, 61], y_bound=[4, 61], no_blend=False):\n\n image = data.copy()\n pl_num = np.random.randint(1, high=4)\n pos_label = np.zeros([64, 64])\n used_xy = np.array([])\n c_prior = np.linspace(c_ratio[0], c_ratio[1], 100)\n if x_bound[0] < 4 or x_bound[0] > 61:\n raise Exception(\"current method only injects whole psf\")\n if y_bound[0] < 4 or y_bound[0] > 61:\n raise Exception(\"current method only injects whole psf\")\n\n for num in range(pl_num):\n while True:\n np.random.shuffle(c_prior)\n psf_idx = np.random.randint(0, high=psf_library.shape[0])\n Nx = np.random.randint(x_bound[0], high=x_bound[1])\n Ny = np.random.randint(y_bound[0], high=y_bound[1])\n if len(used_xy) == 0:\n pass\n else:\n if no_blend:\n if np.any(dist([Nx, Ny], used_xy) < 3):\n pass\n else:\n if np.any(np.array([Nx, Ny]) == used_xy):\n pass\n if dist([Nx, Ny], (32.5, 32.5)) < 4:\n pass\n else:\n planet_psf = psf_library[psf_idx]\n brightness_f = c_prior[0] * np.max(image) / np.max(planet_psf)\n image[Ny - 4:Ny + 3, Nx - 4:Nx + 3] += planet_psf * brightness_f\n used_xy = np.append(used_xy, [Nx, Ny]).reshape(-1, 2)\n pos_label[Ny - 4:Ny + 3, Nx - 4:Nx + 3] = 1\n break\n return image, pos_label",
"def particle_initial_velocity(fignr,N,D,T,m,dim,kb):\n V = np.zeros((3,N))\n V[0:dim,:] = np.random.normal(0, kb*T/m, (dim,N))# / np.sqrt(T/(kb*m))\n plotfunctions.velocity(fignr,N,V)\n # Typical speed for particles\n return V",
"def new_star_particle():\n function = LegacyFunctionSpecification()\n function.must_handle_array = True\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT, description =\n \"\"\"\n An index assigned to the newly created particle.\n This index is supposed to be a local index for the code\n (and not valid in other instances of the code or in other codes)\n \"\"\"\n )\n for par in [\"x\", \"y\", \"z\"]:\n function.addParameter(par, dtype='float64', unit=generic_unit_system.length, direction=function.IN, \n description = \"The initial position vector of the particle\")\n function.addParameter('radius', dtype='float64', unit=generic_unit_system.length, direction=function.IN, description = \"The radius of the particle\")\n for par in [\"red\", \"green\", \"blue\"]:\n function.addParameter(par, dtype='float64', direction=function.IN, \n description = \"The RGB color of the particle\")\n function.addParameter(\"alpha\", dtype='float64', direction=function.IN, description = \"The opacity of the particle\", default = 1.0)\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n return function",
"def init_particles(self):\n \n # Each particle is a dimension-K vector. We generate each particle \n # uniformly at random from the space [0,1]^K. \n self.Particles = np.random.uniform(0, 1, (self.Npar, self.K))\n #print(\"Particles: \", self.Particles) \n return None",
"def __init__(self, dim: tuple, count: int):\n self.surface = pygame.Surface(dim)\n # initialize\n self.particles = []\n # initialize\n for counter in range(count):\n pos = pygame.Vector2(random.randint(0, self.surface.get_width()), random.randint(0, self.surface.get_height()))\n direction = pygame.Vector2(10 * (random.random() - 0.5), 10 * (random.random() - 0.5))\n color = pygame.Color(random.randint(0, 255), random.randint(0, 255), random.randint(0, 255), 255)\n size = 5 + random.randint(0, 10)\n particle = Particle(self.surface, pos, direction, size, color)\n self.particles.append(particle)",
"def new_marker_particle():\n function = LegacyFunctionSpecification()\n function.must_handle_array = True\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT, description =\n \"\"\"\n An index assigned to the newly created particle.\n This index is supposed to be a local index for the code\n (and not valid in other instances of the code or in other codes)\n \"\"\"\n )\n for par in [\"x\", \"y\", \"z\"]:\n function.addParameter(par, dtype='float64', unit=generic_unit_system.length, direction=function.IN, \n description = \"The initial position vector of the particle\")\n function.addParameter('radius', dtype='float64', unit=generic_unit_system.length, direction=function.IN, description = \"The radius of the particle\")\n for par in [\"red\", \"green\", \"blue\"]:\n function.addParameter(par, dtype='float64', direction=function.IN, \n description = \"The RGB color of the particle\")\n function.addParameter(\"alpha\", dtype='float64', direction=function.IN, description = \"The opacity of the particle\", default = 1.0)\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n return function",
"def simulate_brownian(num_part, dt, time_steps, x0, y0, z0, sigma, drift = False):\n # Calculating drift components \n if drift == True: \n v_x = np.random.random() \n v_y = np.random.random() \n v_z = np.random.random() \n drift_x = v_x * dt \n drift_y = v_y * dt \n drift_z = v_z * dt \n else: \n drift_x = 0 \n drift_y = 0 \n drift_z = 0 \n\n # Generate Brownian increments \n increment_x = np.random.normal(loc = 0.0, scale = sigma, size = (num_part, time_steps - 1)) \n increment_y = np.random.normal(loc = 0.0, scale = sigma, size = (num_part, time_steps - 1)) \n increment_z = np.random.normal(loc = 0.0, scale = sigma, size = (num_part, time_steps - 1)) \n\n # Pre-allocation of memory for particle positions \n p_x = np.zeros(shape = (num_part, time_steps - 1)) \n p_y = np.zeros(shape = (num_part, time_steps - 1))\n p_z = np.zeros(shape = (num_part, time_steps - 1))\n\n # Generate initial position of particle(s) \n p_x[:, 0] = x0 + 20 * np.random.random(size = (1, num_part)) \n p_y[:, 0] = y0 + 20 * np.random.random(size = (1, num_part)) \n p_z[:, 0] = z0 + 20 * np.random.random(size = (1, num_part)) \n\n for p in np.arange(0, num_part, step = 1): \n for ti in np.arange(start = 1, stop = time_steps, step = 1): \n p_x[p, ti] = p_x[p, ti - 1] + increment_x[p, ti] + 10 * drift_x \n p_y[p, ti] = p_y[p, ti - 1] + increment_y[p, ti] + 10 * drift_y \n p_z[p, ti] = p_z[p, ti - 1] + increment_z[p, ti] + 10 * drift_z \n\n return p_x, p_y, p_z",
"def new_gas_particle():\n function = LegacyFunctionSpecification()\n function.must_handle_array = True\n function.addParameter('index_of_the_particle', dtype='int32', direction=function.OUT, description =\n \"\"\"\n An index assigned to the newly created particle.\n This index is supposed to be a local index for the code\n (and not valid in other instances of the code or in other codes)\n \"\"\"\n )\n for par in [\"x\", \"y\", \"z\"]:\n function.addParameter(par, dtype='float64', unit=generic_unit_system.length, direction=function.IN, \n description = \"The initial position vector of the particle\")\n function.addParameter('radius', dtype='float64', unit=generic_unit_system.length, direction=function.IN, description = \"The radius of the particle\")\n for par in [\"red\", \"green\", \"blue\"]:\n function.addParameter(par, dtype='float64', direction=function.IN, \n description = \"The RGB color of the particle\")\n function.addParameter(\"alpha\", dtype='float64', direction=function.IN, description = \"The opacity of the particle\", default = 1.0)\n function.addParameter('npoints', dtype='int32', direction=function.LENGTH)\n function.result_type = 'int32'\n return function",
"def update_particle_cloud(self, scan):\n\n \"\"\"\n Initialise arrays for the new particle cloud,\n particle weights and cummulative weights\n \"\"\"\n newParticleCloud = []\n particleWeights = []\n \n randomGauss = 10*self.NUMBER_PREDICTED_READINGS\n gaussianRandomNumX = []\n gaussianRandomNumY = []\n\n sensorSigma=0.1 #variance\n sensorMu=0 #mean\n noise=sensorSigma * numpy.random.randn() + sensorMu\n\n for i in range (0,randomGauss):\n gaussianRandomNumX.append(random.gauss(0,1))\n gaussianRandomNumY.append(random.gauss(0,1))\n\n for p in self.particlecloud.poses:\n particleWeights.append(self.sensor_model.get_weight(scan, p))\n\n for i in range(len(self.particlecloud.poses)):\n randomSelection = numpy.random.random()\n csum = 0\n for p in self.particlecloud.poses:\n weight = self.sensor_model.get_weight(scan, p) / sum(particleWeights)\n csum += weight\n if csum >= randomSelection:\n newParticle = copy.deepcopy(p)\n newParticle.position.x = newParticle.position.x + (gaussianRandomNumX[i] * noise)\n newParticle.position.y = newParticle.position.y + (gaussianRandomNumY[i] * noise)\n newParticle.position.z = newParticle.position.z\n newParticleCloud.append(newParticle)\n break\n self.particlecloud.poses = newParticleCloud\n\n pass",
"def simulate_fractionalbrownian(num_part, H, M, n, t, x0, y0, z0, gamma_H):\n # Generate zero mean and unit variance increments \n incx = np.random.normal(loc = 0.0, scale = 1.0, size = (num_part, t.shape[0])) \n incy = np.random.normal(loc = 0.0, scale = 1.0, size = (num_part, t.shape[0])) \n incz = np.random.normal(loc = 0.0, scale = 1.0, size = (num_part, t.shape[0])) \n\n # Pre-allocation of memory for particle positions \n p_x = np.zeros(shape = (num_part, t.shape[0])) \n p_y = np.zeros(shape = (num_part, t.shape[0])) \n p_z = np.zeros(shape = (num_part, t.shape[0])) \n\n # Generate initial position of particle(s)\n p_x[:, 0] = x0 + 10 * np.random.random(size = (1, num_part)) \n p_y[:, 0] = y0 + 10 * np.random.random(size = (1, num_part)) \n p_z[:, 0] = z0 + 10 * np.random.random(size = (1, num_part)) \n \n for p in np.arange(0, num_part, step = 1): \n for ti in np.arange(start = 1, stop = t.shape[0], step = 1): \n\n s1_x = np.array([ ((i ** (H - 0.5)) * incx[p, 1 + ti - i]) for i in range(1, n + 1)]).sum() \n s2_x = np.array([ (((n + i) ** (H - 0.5) - i ** (H - 0.5)) * incx[p, 1 + ti - n - i]) for i in range(1, 1 + n * (M - 1))]).sum() \n s1_y = np.array([ ((i ** (H - 0.5)) * incy[p, 1 + ti - i]) for i in range(1, n + 1)]).sum() \n s2_y = np.array([ (((n + i) ** (H - 0.5) - i ** (H - 0.5)) * incy[p, 1 + ti - n - i]) for i in range(1, 1 + n * (M - 1))]).sum() \n s1_z = np.array([ ((i ** (H - 0.5)) * incz[p, 1 + ti - i]) for i in range(1, n + 1)]).sum() \n s2_z = np.array([ (((n + i) ** (H - 0.5) - i ** (H - 0.5)) * incz[p, 1 + ti - n - i]) for i in range(1, 1 + n * (M - 1))]).sum() \n\n icx = gamma_H * (s1_x + s2_x) \n icy = gamma_H * (s1_y + s2_y) \n icz = gamma_H * (s1_z + s2_z) \n\n p_x[p, ti] = p_x[p, ti - 1] + icx \n p_y[p, ti] = p_y[p, ti - 1] + icy \n p_z[p, ti] = p_z[p, ti - 1] + icz \n return p_x, p_y, p_z",
"def initializeParticles(self):\n import itertools\n import random\n #create a list of possible ghost permutations, where each of three ghosts can be on any of the legal positions in the boards.\n permutations = list(itertools.product(self.legalIntentions, repeat=self.numAgents))\n \n random.shuffle(permutations)\n p = len(permutations)\n n = self.numParticles\n self.particles = []\n #create the particles\n while n >= p:\n self.particles += permutations\n n -= p\n #add the remainder\n self.particles += permutations[0: n - 1]",
"def create_particle(self,r,v=(0.0,0.0,0.0)):\n self.r[self.n] = r\n self.m[self.n] = self.m[self.n-1] \n self.v[self.n] = v\n self.n = self.n+1\n self.rebuild_lists()",
"def monteCarloRun(startingPoints, qms, vs, directions, BR, BZ, r, z, rLim, fluxGridCoarseness, steppingMethod):\n totalGrid = np.zeros((BR.shape[0]//fluxGridCoarseness, BR.shape[1]//fluxGridCoarseness))\n trappedGrid = np.zeros((BR.shape[0]//fluxGridCoarseness, BR.shape[1]//fluxGridCoarseness))\n rReduced = np.linspace(np.min(r), np.max(r), len(r)//fluxGridCoarseness)\n rDelta = rReduced[1]-rReduced[0]\n rReduced += rDelta/2. # Use distance to cell centers to count particles\n zReduced = np.linspace(np.min(z), np.max(z), len(z)//fluxGridCoarseness)\n zDelta = zReduced[1]-zReduced[0]\n zReduced += zDelta/2. # Use distance to cell centers to count particles\n \n habitatCrossings = 0\n GDTcrossings = 0\n detectorCounts = np.zeros(14)\n \n gridStep = r[1]-r[0]\n \n numParticles = len(qms)\n for particleNumber in prange(numParticles):\n if particleNumber % (numParticles/10) == 0:\n print(particleNumber)\n \n qm = qms[particleNumber]\n v0 = vs[particleNumber]\n dt = (r[1]-r[0])/v0/2\n maxTime = rLim * 3 / v0\n maxSteps = int(maxTime / dt)\n particleGrid = np.zeros((BR.shape[0]//fluxGridCoarseness, BR.shape[1]//fluxGridCoarseness))\n crossedHabitat = 0\n crossedGDT = 0\n particleDetectorCounts = np.zeros(14)\n \n # Generate random point and direction\n point1 = startingPoints[particleNumber]\n direction = directions[particleNumber]\n noAccelStep = 0.99*gridStep*direction\n trapped = True\n \n x = point1.copy() # copy is important... \n v = direction*v0\n E = np.zeros(3)\n \n if steppingMethod == 2:\n x, _ = RKnext(x, v, qm, BR, BZ, r, z, dt/2)\n\n for i in range(maxSteps):\n # Count crossings\n particleR = (x[0]**2 + x[1]**2)**.5\n nearestR = nearestIndex(rReduced, particleR)\n nearestZ = nearestIndex(zReduced, x[2])\n particleGrid[nearestZ, nearestR] = 1\n if 9.7 < particleR < 12.3 and -1.3 < x[2] < 1.3:\n crossedHabitat = 1\n if -14 < x[2] < 14 and particleR < 5:\n crossedGDT = 1\n # Will's detectors\n # for det in range(14):\n # vd = (x[0] - det*1.4, x[1], x[2])\n # if (vd[0]**2+vd[1]**2+vd[2]**2)**.5 < 0.5:\n # particleDetectorCounts[det] = 1\n \n # Step\n if steppingMethod == 0:\n x += noAccelStep\n elif steppingMethod == 1:\n x, v = RKnext(x, v, qm, BR, BZ, r, z, dt)\n elif steppingMethod == 2:\n B = BxyzInterpolated(x, BR, BZ, r, z)\n x, v = BBnext(x, v, qm, B, E, dt)\n \n # Stop stepping if out of bounds\n if (particleR**2+x[2]**2)**.5 > rLim + .001: \n trapped = False\n break\n detectorCounts += particleDetectorCounts\n totalGrid += particleGrid\n if trapped:\n trappedGrid += particleGrid\n habitatCrossings += crossedHabitat\n GDTcrossings += crossedGDT\n \n print(\"Will's detectors:\", detectorCounts)\n \n # Divide cell counts by volume of cell\n totalGridUnscaled = totalGrid.copy()\n trappedGridUnscaled = trappedGrid.copy()\n for i in range(len(rReduced)):\n for j in range(len(zReduced)):\n volume = np.pi*((rReduced[i]+rDelta/2.)**2-(rReduced[i]-rDelta/2.)**2)*zDelta\n totalGrid[j, i] /= volume\n trappedGrid[j, i] /= volume\n \n return rReduced, zReduced, totalGrid, trappedGrid, habitatCrossings, GDTcrossings, totalGridUnscaled, trappedGridUnscaled",
"def distribute_KV(self):\n\n assert (self.emitx == self.emity), \"For a KV distribution, the planar emittances must be equal\"\n\n #total emittance of the K-V distribution is equal to the planar emittance\n #this differs from the linear K-V distribution\n emit = self.emitx\n self.emit = emit\n\n # Generate some bounds on the transverse size to reduce waste in generating the bunch\n # Use the lemming method to find the maximum y\n y0 = np.sqrt(self.emit)\n\n yMax = newton(self.whatsleft, y0)\n\n #bounding the horizontal coordinate is difficult, but it should not exceed the pole\n xMax = self.c\n\n # Generate particles by creating trials and finding particles with potential less than emittance,\n # then assign the rest to momentum\n ptclsMade = 0\n phaseSpaceList = []\n\n while ptclsMade < self.npart:\n #Note that the particle coordinates here are distributed in normal coordinates\n xTrial = 2.*(0.5 - random.random())*xMax\n yTrial = 2.*(0.5 - random.random())*yMax\n trialValue = self.compute_potential(xTrial, yTrial)\n if trialValue < self.emit:\n\n pMag = np.sqrt(2.*(self.emit - trialValue))\n pDir = 2.*np.pi * random.random()\n pxHat = pMag * np.cos(pDir)\n pyHat = pMag * np.sin(pDir)\n\n xReal = xTrial * np.sqrt(self.betax)\n yReal = yTrial * np.sqrt(self.betay)\n\n #We want to provide the user with standard (non-normal) coordinates\n pxReal = (pxHat - self.alphax*xTrial)/np.sqrt(self.betax)\n pyReal = (pyHat - self.alphay*yTrial)/np.sqrt(self.betay)\n\n ptclCoords = np.array([xReal, pxReal, yReal, pyReal])\n phaseSpaceList.append(ptclCoords)\n ptclsMade += 1\n\n #Add 3 more particles if creating a quiet start\n if self.quiet:\n self.exact_centroids(ptclCoords, phaseSpaceList)\n ptclsMade += 3\n\n self.particles[:,:4] = np.asarray(phaseSpaceList)",
"def spring_particle(name, num_trajectories, NUM_PARTS, T_max, dt, sub_sample_rate, noise_std, seed):\n num_particles = NUM_PARTS\n collater = {}\n\n def diffeq_hyper(t, q, k, m, nparts):\n num_particles = nparts\n vels = q[2 * num_particles:]\n xs = q[:2 * num_particles]\n xs = xs.reshape(-1, 2)\n forces = np.zeros(xs.shape)\n new_k = np.repeat(k, num_particles) * np.tile(k, num_particles)\n new_k = np.repeat(new_k, 2).reshape(-1, 2)\n dx = np.repeat(xs, num_particles, axis=0) - np.tile(xs, (num_particles, 1))\n resu = -new_k * dx\n forces = np.add.reduceat(resu, np.arange(0, nparts * nparts, nparts)).ravel()\n\n return np.concatenate([vels / np.repeat(m, 2), forces]).ravel()\n\n def hamiltonian(vec, m, k, num_particles):\n num_particles = num_particles\n x = vec[:num_particles * 2]\n p = vec[2 * num_particles:]\n xs = x.reshape(-1, 2)\n ps = p.reshape(-1, 2)\n U1 = 0\n K = 0\n for i in range(num_particles):\n for j in range(i + 1, num_particles):\n U1 += .5 * k[i] * k[j] * ((xs[i] - xs[j]) ** 2).sum()\n K += 0.5 * ((ps[i] ** 2).sum()) / m[i]\n return K, U1\n\n theta = []\n dtheta = []\n energy = []\n mass_arr = []\n ks_arr = []\n lagrangian = []\n np.random.seed(seed)\n\n for traj in range(num_trajectories):\n ks = np.ones(NUM_PARTS)#np.random.uniform(.5, 1, size=(NUM_PARTS))\n positions = np.random.uniform(-1, 1, size=(NUM_PARTS, 2))\n velocities = np.random.uniform(-3, 3, size=(NUM_PARTS, 2))\n masses = np.ones(NUM_PARTS)#np.random.uniform(0.1, 1, size=NUM_PARTS)\n momentum = np.multiply(velocities, np.repeat(masses, 2).reshape(-1, 2))\n q = np.concatenate([positions, momentum]).ravel()\n qnrk = rk(lambda t, y: diffeq_hyper(t, y, ks, masses, num_particles), (0, T_max), q,\n t_eval=np.arange(0, T_max, dt),\n rtol=1e-12, atol=1e-12, method='DOP853')\n accum = qnrk.y.T\n ssr = int(sub_sample_rate / dt)\n accum = accum[::ssr]\n daccum = np.array([diffeq_hyper(0, accum[i], ks, masses, num_particles) for i in range(accum.shape[0])])\n energies = []\n lags = []\n for i in range(accum.shape[0]):\n ktmp, utmp = hamiltonian(accum[i], masses, ks, NUM_PARTS)\n energies.append(ktmp + utmp)\n lags.append(ktmp - utmp)\n\n accum += np.random.randn(*accum.shape) * noise_std\n daccum += np.random.randn(*daccum.shape) * noise_std\n\n theta.append(accum)\n dtheta.append(daccum)\n energy.append(energies)\n mass_arr.append(masses)\n ks_arr.append(ks)\n lagrangian.append(lags)\n\n collater['x'] = np.concatenate(theta)\n collater['dx'] = np.concatenate(dtheta)\n collater['energy'] = np.concatenate(energy)\n collater['lagrangian'] = np.concatenate(lagrangian)\n\n collater['mass'] = mass_arr\n collater['ks'] = ks_arr\n\n f = open(name + \".pkl\", \"wb\")\n pickle.dump(collater, f)\n f.close()\n\n return collater",
"def createParticles(self, type, style, *args):\n if not self.rank:\n logging.info('Creating particles {} with args'.format(type) + (' {}' * len(args)).format(*args))\n\n self.lmp.command('create_atoms {} {}'.format(type, style) + (' {}' * len(args)).format(*args))",
"def pontos(self):\n \n self.sc = 1. \n self.x = self.sc*np.array([-155., -139.4, -124., -108.5, -93., -77.5, -62., -46.5, -31., -15.5, 0, 15.5, 31., 46.5, 62., 77.5, 93., 108.5, 124., 139.5, 155.])\n self.y = self.sc*np.array([ 9.23, 14.37, 18.98, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 23.6, 21.55, 14.37, 3.59])\n self.px_index = len(self.x)\n #self.py_index = len(self.x)/2\n\n self.coord = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n \n self.x = self.x[::-1]\n self.y = -self.y[::-1] \n self.new = np.array([self.x,self.y,np.full(len(self.x),self.z)])\n self.coord = np.array([np.append(self.coord[0],self.new[0]),np.append(self.coord[1],self.new[1]),np.append(self.coord[2],self.new[2])])\n self.coord = np.array([np.append(self.coord[0],self.coord[0,0]),np.append(self.coord[1],self.coord[1,0]),np.append(self.coord[2],self.coord[2,0])])\n\n self.coord[0] = self.coord[0] - (np.amax(self.coord[0])+np.amin(self.coord[0]))/2\n self.coord[1] = self.coord[1] + (np.amax(self.coord[1])-np.amin(self.coord[1]))/2 \n \n self.coordi = np.array(self.coord)\n \n self.cg = np.array([0 + self.dx, self.H/2 + self.dy, self.z]) \n self.cgi = np.array(self.cg)\n \n self.thi = 0. + self.dth \n self.th = float(self.thi) \n \n self.coordnav(self.dx,self.dy,self.dth)",
"def __init__(self,nparticles,size, mass=1, G=1, boundary_periodic = True,early_universe=False, softner=1, position = [], momentum = []):\n self.softner = softner\n self.G = G\n self.boundary_periodic = boundary_periodic\n self.nparticles = nparticles\n self.size = size\n self.mass = np.ones(nparticles)*mass\n #If the boundary condition are not periodic, the grid_size is double but particle kept in the first quadrant so \n #that the particles cannot feel the effect of the particles closed to the opposite boundary when we take the convolution\n if boundary_periodic==True:\n self.grid_size = size\n else:\n self.grid_size = 2*size\n #Initialize the partticle grid\n # if early_universe == True:\n # self.ptclgrid.early_universe_grid(softner)\n # self.mass = self.ptclgrid.mass\n self.ptclgrid = ParticleGrid(nparticles,self.grid_size,self.size, mass=self.mass, soft=softner, early_universe=early_universe)\n #If initial position are givem, place the particle to the right place on the grid\n if len(position) != 0:\n self.ptclgrid.update_position(position, mass)\n\n self.grid = self.ptclgrid.grid\n self.grid_pos = self.ptclgrid.grid_pos\n x0,y0 = self.ptclgrid.position.transpose()\n initial_condition = np.array([x0,y0, self.mass]).transpose()\n #Initialize the Particle list containing the position and momentum of the particles\n self.particles = ParticleList(nparticles, initial_condition)\n #If initial mometa are given, intialize it \n if len(momentum) != 0:\n self.particles.momentum = momentum\n #Computes the green function on the grid\n self.compute_green_function(self.grid_size)\n #Initialize the array with the acceleration of the particles\n self.acc = np.zeros((len(self),2))",
"def __init__(self, number_of_particles, restitution_coefficient, initial_positions, initial_velocities, masses,\n radii, pbc):\n self.N = number_of_particles # amount of particles\n self.restitution_coefficient = restitution_coefficient # coefficient determining the energy lost in collisions\n # initialize variables used in the class\n self.positions = np.zeros((self.N, 3)) # positions of particles\n self.initial_positions = np.zeros((self.N, 3)) # help variable to compute mean square displacement\n self.velocities = np.zeros((self.N, 3)) # velocities of particles\n self.masses = np.zeros(self.N) # mass of each particle\n self.radii = np.zeros(self.N) # radius of each particle\n self.collision_count_particles = np.zeros(self.N) # array keeping track of the number of collisions\n\n # set parameters equal to the input to the class. Use .copy() such that the parameters can be used in outer loop\n self.positions = initial_positions.copy()\n self.initial_positions = initial_positions.copy()\n self.velocities = initial_velocities.copy()\n self.masses = masses\n self.radii = radii\n # a priority queue / heap queue of tuples of (time_collision, collision_entities, collision_count when\n # computing the collision, box number of the particles). The collision count at computation is used to\n # ignore non-valid collisions due to the involved particles being in other collisions between computation and\n # collision. Box number is needed for the pbc.\n self.collision_queue = [] # heap queue needs list structure to work\n\n # In order to create 27 copies for pbc in three dimensions one need to known their relation to the original\n # box. These are given by offsets. Offsets is also used to correct positions of particles colliding in\n # different boxes (due to the pbc).\n self.offsets = [(-1, 1, 1), (0, 1, 1), (1, 1, 1), (-1, 0, 1), (0, 0, 1), (1, 0, 1), (-1, -1, 1), (0, -1, 1),\n (1, -1, 1), (-1, 1, 0), (0, 1, 0), (1, 1, 0), (-1, 0, 0), (0, 0, 0), (1, 0, 0), (-1, -1, 0),\n (0, -1, 0), (1, -1, 0), (-1, 1, -1), (0, 1, -1), (1, 1, -1), (-1, 0, -1), (0, 0, -1),\n (1, 0, -1), (-1, -1, -1), (0, -1, -1), (1, -1, -1)]\n # Crossings is used to compute current positions due to the periodic boundary conditions. It essentially get\n # updated every time a particle cross the edge in the x-, y- or z-direction.\n self.crossings = np.zeros((self.N, 3))\n\n self.pbc = pbc # periodic boundary conditions",
"def bndy_plasma(self):\n self.ne[0], self.ne[-1] = 1e11, 1e11\n self.ni[0], self.ni[-1] = 1e11, 1e11\n self.nn[0], self.nn[-1] = 1e11, 1e11\n self.Te[0], self.Te[-1] = 0.1, 0.1\n self.Ti[0], self.Ti[-1] = 0.01, 0.01\n # self.coll_em[0], self.coll_em[-1] = 1e5, 1e5\n # self.coll_im[0], self.coll_im[-1] = 1e5, 1e5",
"def iterator(self):\n print('Iterator running...')\n for i in range(self.num_itr):\n for j in range(self.part_num):\n # create r1,r2\n r1 = np.random.uniform(self.vmin, self.vmax, self.dim)\n r2 = np.random.uniform(self.vmin, self.vmax, self.dim)\n # Update\n self.particle[j].Vel = self.w * self.particle[j].Vel \\\n + self.c1 * r1 * (self.particle[j].Best_pos - self.particle[j].Pos) \\\n + self.c2 * r2 * (self.GlobalBest_Pos - self.particle[j].Pos)\n self.particle[j].Pos = self.particle[j].Pos + self.particle[j].Vel\n # Check whether position out of search space\n for x in range(len(self.particle[j].Pos)):\n if self.particle[j].Pos[x] > self.var_size[x][1]:\n self.particle[j].Pos[x] = self.var_size[x][1]\n if self.particle[j].Pos[x] < self.var_size[x][0]:\n self.particle[j].Pos[x] = self.var_size[x][0]\n assert self.var_size[x][1] >= self.particle[j].Pos[x] >= self.var_size[x][0]\n # self.particle[j].Pos[2] = int(self.particle[j].Pos[2])\n # Recalculate cost\n #print(self.particle[j].Pos)\n self.particle[j].Cost = self.objective(self.particle[j].Pos)\n print(\"Current cost=\", self.particle[j].Cost, \"With position:\", self.particle[j].Pos)\n if self.particle[j].Cost < self.particle[j].Best_cost:\n self.particle[j].Best_cost = self.particle[j].Cost\n self.particle[j].Best_pos = self.particle[j].Pos\n print(\"Find better personel best, Updating with pos:\", self.particle[j].Pos)\n if self.particle[j].Best_cost < self.GlobalBest_Cost:\n self.GlobalBest_Cost = self.particle[j].Best_cost\n self.GlobalBest_Pos = self.particle[j].Best_pos\n print(\"Find better global solution, Updating with pos:\", self.particle[j].Pos)\n else:\n print(\"Not better than previous global solution, dropping...\")\n else:\n print(\"Not better than previous personal best, dropping...\")\n self.Best_Cost.append(self.GlobalBest_Cost)\n self.w = self.w * 0.9\n print()\n print('iteration', i + 1, ': Cost=', self.GlobalBest_Cost)\n print_params(self.GlobalBest_Pos, self.candidate, net=self.net)",
"def __create_sample_data__(npts = 20):\n\t#data function\n\tdef wavy(x, y):\n\t\treturn np.sin(0.2*np.pi*x)*np.cos(0.4*np.pi*y)\n\t\n\t#make grid\n\txs = np.linspace(0, 2*20, 2*npts + 1)\n\tys = np.linspace(0, 20, npts + 1)\n\t(xgrid, ygrid) = np.meshgrid(xs, ys)\n\tzgrid = wavy(xgrid, ygrid)\n\t\n\treturn (xgrid, ygrid, zgrid)"
] | [
"0.612357",
"0.6045421",
"0.58844817",
"0.5861245",
"0.5850004",
"0.57011443",
"0.5697617",
"0.56546646",
"0.56274694",
"0.5621595",
"0.558431",
"0.5581051",
"0.5563471",
"0.55546993",
"0.5529091",
"0.5474343",
"0.5459851",
"0.5450769",
"0.54444504",
"0.5442273",
"0.54061013",
"0.5380784",
"0.53734106",
"0.5352818",
"0.5309615",
"0.53055173",
"0.53035873",
"0.5256503",
"0.5250319",
"0.52325004"
] | 0.69258934 | 0 |
Shift the spectral fields by n_move cells (with respect to the spatial grid). Shifting is done either on the CPU or the GPU, if use_cuda is True. (Typically n_move is positive, and the fields are shifted backwards) | def shift_spect_grid( self, grid, n_move,
shift_rho=True, shift_currents=True ):
if grid.use_cuda:
shift = grid.d_field_shift
# Get a 2D CUDA grid of the size of the grid
tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] )
# Shift all the fields on the GPU
shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move )
if shift_rho:
shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move )
if shift_currents:
shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move )
shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move )
else:
shift = grid.field_shift
# Shift all the fields on the CPU
shift_spect_array_cpu( grid.Ep, shift, n_move )
shift_spect_array_cpu( grid.Em, shift, n_move )
shift_spect_array_cpu( grid.Ez, shift, n_move )
shift_spect_array_cpu( grid.Bp, shift, n_move )
shift_spect_array_cpu( grid.Bm, shift, n_move )
shift_spect_array_cpu( grid.Bz, shift, n_move )
if shift_rho:
shift_spect_array_cpu( grid.rho_prev, shift, n_move )
if shift_currents:
shift_spect_array_cpu( grid.Jp, shift, n_move )
shift_spect_array_cpu( grid.Jm, shift, n_move )
shift_spect_array_cpu( grid.Jz, shift, n_move ) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shift_spect_array_gpu( field_array, shift_factor, n_move ):\n # Get a 2D CUDA grid\n iz, ir = cuda.grid(2)\n\n # Only access values that are actually in the array\n if ir < field_array.shape[1] and iz < field_array.shape[0]:\n power_shift = 1. + 0.j\n # Calculate the shift factor (raising to the power n_move ;\n # for negative n_move, we take the complex conjugate, since\n # shift_factor is of the form e^{i k dz})\n for i in range( abs(n_move) ):\n power_shift *= shift_factor[iz]\n if n_move < 0:\n power_shift = power_shift.conjugate()\n # Shift fields\n field_array[iz, ir] *= power_shift",
"def move_grids(self, fld, comm, time):\n # To avoid discrepancies between processors, only the first proc\n # decides whether to send the data, and broadcasts the information.\n dz = comm.dz\n if comm.rank==0:\n # Move the continuous position of the moving window object\n self.zmin += self.v * (time - self.t_last_move)\n # Find the number of cells by which the window should move\n zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(\n local=False, with_damp=False, with_guard=False )\n n_move = int( (self.zmin - zmin_global_domain)/dz )\n else:\n n_move = None\n # Broadcast the information to all proc\n if comm.size > 1:\n n_move = comm.mpi_comm.bcast( n_move )\n\n # Move the grids\n if n_move != 0:\n # Move the global domain\n comm.shift_global_domain_positions( n_move*dz )\n # Shift the fields\n Nm = len(fld.interp)\n for m in range(Nm):\n # Modify the values of the corresponding z's\n fld.interp[m].zmin += n_move*fld.interp[m].dz\n fld.interp[m].zmax += n_move*fld.interp[m].dz\n # Shift/move fields by n_move cells in spectral space\n self.shift_spect_grid( fld.spect[m], n_move )\n\n # Because the grids have just been shifted, there is a shift\n # in the cell indices that are used for the prefix sum.\n if fld.use_cuda:\n fld.prefix_sum_shift += n_move\n # This quantity is reset to 0 whenever prefix_sum is recalculated\n\n # Prepare the positions of injection for the particles\n # (The actual creation of particles is done when the routine\n # exchange_particles of boundary_communicator.py is called)\n if comm.rank == comm.size-1:\n # Move the injection position\n self.z_inject += self.v * (time - self.t_last_move)\n # Take into account the motion of the end of the plasma\n self.z_end_plasma += self.v_end_plasma * (time - self.t_last_move)\n # Increment the number of particle cells to add\n nz_new = int( (self.z_inject - self.z_end_plasma)/dz )\n self.nz_inject += nz_new\n # Increment the virtual position of the end of the plasma\n # (When `generate_particles` is called, then the plasma\n # is injected between z_end_plasma - nz_inject*dz and z_end_plasma,\n # and afterwards nz_inject is set to 0.)\n self.z_end_plasma += nz_new*dz\n\n # Change the time of the last move\n self.t_last_move = time",
"def shift_spect_array_cpu( field_array, shift_factor, n_move ):\n Nz, Nr = field_array.shape\n\n # Loop over the 2D array (in parallel over z if threading is enabled)\n for iz in prange( Nz ):\n power_shift = 1. + 0.j\n # Calculate the shift factor (raising to the power n_move ;\n # for negative n_move, we take the complex conjugate, since\n # shift_factor is of the form e^{i k dz})\n for i in range( abs(n_move) ):\n power_shift *= shift_factor[iz]\n if n_move < 0:\n power_shift = power_shift.conjugate()\n # Shift the fields\n for ir in range( Nr ):\n field_array[iz, ir] *= power_shift",
"def _move_in_one_more_block():\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx],\n src[w_offset + w_size * sub_h_idx], 0, 1, sub_w_block, 0, 0)\n # in order to avoid dirty data when multiple core\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block *\n (sub_h_align_block_size + sub_h_idx_1)],\n src[w_offset +\n w_size * (sub_h_size - data_cnt_one_block + sub_h_idx_1)],\n 0, 1, sub_w_block, 0, 0)",
"def grid_shift(grid, advection, trim_edges=0, field_list=None):\n if trim_edges == 0:\n trim_slice = slice(None, None)\n else:\n trim_slice = slice(int(trim_edges), -int(trim_edges))\n\n shifted_grid = copy.deepcopy(grid)\n\n # grab the x and y axis and trim\n shifted_grid.x[\"data\"] = grid.x[\"data\"][trim_slice].copy()\n shifted_grid.y[\"data\"] = grid.y[\"data\"][trim_slice].copy()\n\n # shift each field.\n if field_list is None:\n field_list = grid.fields.keys()\n\n for field in field_list:\n # copy data and fill with nans\n data = grid.fields[field][\"data\"].copy()\n data = np.ma.filled(data, np.nan)\n\n # shift the data\n shifted_data = shift(data, [0, advection[0], advection[1]], prefilter=False)\n\n # mask invalid, trim and place into grid\n shifted_data = np.ma.fix_invalid(\n shifted_data, copy=False, fill_value=get_fillvalue()\n )\n shifted_data = shifted_data[:, trim_slice, trim_slice]\n shifted_grid.fields[field][\"data\"] = shifted_data\n\n return shifted_grid",
"def make_move(self, move):\n self.board[int(move) - 1] = self.nplayer",
"def _data_move_in_mc_on_w(tik_inst, dst, src, data_pos_info):\n\n sub_h_size, sub_w_size, h_size, w_size, w_offset = data_pos_info\n data_cnt_one_block = _get_elment_cnt_one_block(src.dtype)\n sub_w_block = _ceil_div(sub_w_size, data_cnt_one_block)\n sub_h_align_block_size = sub_h_size // data_cnt_one_block * data_cnt_one_block\n sub_h_left = sub_h_size % data_cnt_one_block\n is_not_w_block_align = w_size % data_cnt_one_block > 0\n is_h_size_smaller_one_block = h_size < data_cnt_one_block\n\n def _move_in_one_more_block():\n \"\"\"\n move in one more block of h when h > sub_h and sub_h is not block align\n \"\"\"\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx],\n src[w_offset + w_size * sub_h_idx], 0, 1, sub_w_block, 0, 0)\n # in order to avoid dirty data when multiple core\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block *\n (sub_h_align_block_size + sub_h_idx_1)],\n src[w_offset +\n w_size * (sub_h_size - data_cnt_one_block + sub_h_idx_1)],\n 0, 1, sub_w_block, 0, 0)\n\n with tik_inst.if_scope(is_not_w_block_align):\n # sub_h is block align or h is not enough one block\n with tik_inst.if_scope(tik.any(sub_h_left == 0, is_h_size_smaller_one_block)):\n with tik_inst.for_range(0, sub_h_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx],\n src[w_offset + w_size * sub_h_idx], 0, 1, sub_w_block, 0, 0)\n with tik_inst.else_scope():\n _move_in_one_more_block()\n\n with tik_inst.else_scope():\n with tik_inst.if_scope(tik.any(sub_h_left == 0, is_h_size_smaller_one_block)):\n src_strides = w_size // data_cnt_one_block - sub_w_block\n # mte max strides value is 65535\n with tik_inst.if_scope(src_strides > MTE_STRIDES):\n with tik_inst.for_range(0, sub_h_size) as sub_h_idx_2:\n tik_inst.data_move(dst[sub_w_size * sub_h_idx_2],\n src[w_offset + w_size * sub_h_idx_2],\n 0, 1, sub_w_block, 0, 0)\n with tik_inst.else_scope():\n tik_inst.data_move(dst, src[w_offset], 0, sub_h_size, sub_w_block, src_strides, 0)\n with tik_inst.else_scope():\n _move_in_one_more_block()",
"def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter",
"def move(self, direction):\n new_grid = []\n # get the indices of specific direction\n new_indices = self._grid_indices[direction]\n for cell in new_indices:\n lst = self.traversed_list(cell, direction)\n merged_list = merge(lst)\n new_grid.append(merged_list)\n \n adjusted_grid = adjust_grid(new_grid,direction)\n if self.is_changed(adjusted_grid):\n self.update_grid(adjusted_grid)\n self.new_tile()",
"def _move_in_one_more_block():\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx_0:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx_0],\n src[in_offset + sub_h_idx_0 * w_size],\n 0, 1, sub_w_block, 0, 0)\n # move in one more block of h\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(\n dst[sub_w_block * data_cnt_one_block * (sub_h_align_block_size + sub_h_idx_1)],\n src[in_offset + (sub_h_idx_1 + sub_h_size - data_cnt_one_block) * w_size],\n 0, 1, sub_w_block, 0, 0)",
"def move(self, direction):\n original_grid = []\n for row in self._grid:\n original_row = list(row)\n original_grid.append(original_row)\n steps = 0\n if direction == UP or direction == DOWN:\n steps = self._grid_height\n elif direction == LEFT or direction == RIGHT:\n steps = self._grid_width\n to_move = []\n for initial_cell in self._initial_cells[direction]:\n for step in range(steps):\n new_row = initial_cell[0] + step * OFFSETS[direction][0]\n new_column = initial_cell[1] + step * OFFSETS[direction][1]\n to_move.append(self._grid[new_row][new_column])\n to_move = merge(to_move)\n row = initial_cell[0]\n column = initial_cell[1]\n for step in range(steps):\n self._grid[row + OFFSETS[direction][0] * step][column + OFFSETS[direction][1] * step] = to_move[step]\n to_move = []\n if original_grid != self._grid:\n self.new_tile()",
"def _data_move_out_mc_on_w(tik_inst, dst, src, data_pos_info):\n\n # sub_h_size is the original value without any change\n sub_h_size, sub_w_size, h_size, w_size, out_offset = data_pos_info\n data_size_one_block = _get_elment_cnt_one_block(src.dtype)\n\n def _sub_h_not_block_align_bigger_one_block():\n \"\"\"\n sub_h_size is not block align, sub_h_size is bigger than one block\n \"\"\"\n\n sub_h_block = sub_h_size // data_size_one_block\n with tik_inst.for_range(0, sub_w_size) as sub_w_idx_2:\n with tik_inst.if_scope(sub_h_block > 0):\n tik_inst.data_move(\n dst[out_offset + sub_w_idx_2 * h_size],\n src[sub_w_idx_2 * (sub_h_block + 1) * data_size_one_block],\n 0, 1, sub_h_block, 0, 0)\n # move in one more block for this case\n tik_inst.data_move(\n dst[out_offset + sub_w_idx_2 * h_size + sub_h_size - data_size_one_block],\n src[sub_w_idx_2 * (sub_h_block + 1) * data_size_one_block +\n sub_h_block * data_size_one_block],\n 0, 1, 1, 0, 0)\n\n with tik_inst.if_scope(sub_h_size == h_size):\n # the data order in ub is the expected order\n sub_hw_size = sub_h_size * sub_w_size\n with tik_inst.if_scope(h_size % data_size_one_block == 0):\n tik_inst.data_move(dst[out_offset],\n src,\n 0, 1, sub_hw_size // data_size_one_block, 0, 0)\n with tik_inst.else_scope():\n # sub_h_size is smaller than one block\n with tik_inst.if_scope(h_size < data_size_one_block):\n # the data_move will move 1 block at least\n with tik_inst.if_scope(sub_hw_size < data_size_one_block):\n tik_inst.data_move(dst[out_offset],\n src,\n 0, 1, 1, 0, 0)\n with tik_inst.else_scope():\n sub_hw_block = sub_hw_size // data_size_one_block\n tik_inst.data_move(dst[out_offset],\n src,\n 0, 1, sub_hw_block, 0, 0)\n # in order to avoid dirty data\n with tik_inst.new_stmt_scope():\n temp_reg = [tik_inst.Scalar(src.dtype)\n for i in ADDR_IDX_LIST[:data_size_one_block]]\n for idx in ADDR_IDX_LIST[:data_size_one_block]:\n temp_reg[idx].set_as(src[sub_hw_size - data_size_one_block + idx])\n for idx in ADDR_IDX_LIST[:data_size_one_block]:\n src[idx].set_as(temp_reg[idx])\n tik_inst.data_move(dst[out_offset + sub_hw_size - data_size_one_block],\n src, 0, 1, 1, 0, 0)\n with tik_inst.else_scope():\n # sub_h_size is not block align, sub_h_size is bigger than one block\n _sub_h_not_block_align_bigger_one_block()\n\n with tik_inst.else_scope():\n # h_size > sub_h_size, h_size is block align\n stride_cnt = (h_size - sub_h_size) // data_size_one_block\n with tik_inst.if_scope(tik.all(h_size % data_size_one_block == 0,\n stride_cnt <= MTE_STRIDES)):\n tik_inst.data_move(dst[out_offset],\n src,\n 0, sub_w_size, sub_h_size // data_size_one_block, 0, stride_cnt)\n with tik_inst.else_scope():\n # h_size is not block align, sub_h_size is block align\n with tik_inst.if_scope(sub_h_size % data_size_one_block == 0):\n with tik_inst.for_range(0, sub_w_size) as sub_w_idx:\n tik_inst.data_move(dst[out_offset + sub_w_idx * h_size],\n src[sub_w_idx * sub_h_size],\n 0, 1, sub_h_size // data_size_one_block, 0, 0)\n with tik_inst.else_scope():\n _sub_h_not_block_align_bigger_one_block()",
"def move(self, direction):\r\n # replace with your code\r\n row_dir = OFFSETS[direction][0]\r\n col_dir = OFFSETS[direction][1]\r\n \r\n if row_dir == 0:\r\n new_cells = self._cells\r\n new_dir = col_dir\r\n else:\r\n new_tuples = zip(*self._cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n new_dir = row_dir\r\n \r\n tmp_cells = []\r\n for lists in new_cells:\r\n lists = lists[::new_dir]\r\n merge_lists = merge(lists)\r\n tmp_cells.append(merge_lists[::new_dir])\r\n \r\n if row_dir == 0:\r\n self._cells = tmp_cells\r\n else:\r\n new_tuples = zip(*tmp_cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n self._cells = new_cells\r\n \r\n self.new_tile()",
"def moving(filtertype, S0, n):\n print('-------------------------- moving')\n \n # Constants:\n S = S0.copy() # Avoid overwritting data:\n S_new = np.zeros(len(S))\n nzero = np.zeros(2*n+1)\n \n # Moving median filter:\n if filtertype=='median':\n print 'Moving median filter'\n # Interval: d[n, 1+n, ... , N-1, N-n]\n for i in range(len(S)-2*n): \n S_new[n+i] = np.median(S[range((n+i)-n, (n+i)+n+1)])\n for i in range(n):\n # Interval: d[-n, -(n-1), ... , n-1, n] - Low end of data\n low = nzero\n low[range(n-i)] = S[0]*np.ones(n-i)\n low[-(n+1+i):] = S[range(0, n+1+i)]\n S_new[i] = np.median(low)\n # Interval: d[N-n, N-(n-1), ... , N+(n-1), N+n] - High end of data\n high = nzero\n high[range(n+1+i)] = S[range(len(S)-(n+i+1), len(S))]\n high[-(n-i):] = S[-1]*np.ones(n-i)\n S_new[len(S)-1-i] = np.median(high)\n\n # Moving mean filter:\n if filtertype=='mean':\n print 'Moving mean filter'\n # Interval: d[n, 1+n, ... , N-1, N-n]\n for i in range(len(S)-2*n): \n S_new[n+i] = np.mean(S[range((n+i)-n, (n+i)+n+1)])\n for i in range(n):\n # Interval: d[-n, -(n-1), ... , n-1, n] - Low end of data\n low = nzero\n low[range(n-i)] = S[0]*np.ones(n-i)\n low[-(n+1+i):] = S[range(0, n+1+i)]\n S_new[i] = np.mean(low)\n # Interval: d[N-n, N-(n-1), ... , N+(n-1), N+n] - High end of data\n high = nzero\n high[range(n+1+i)] = S[range(len(S)-(n+1+i), len(S))]\n high[-(n-i):] = S[-1]*np.ones(n-i)\n S_new[len(S)-1-i] = np.mean(high)\n\n # Output:\n return S_new",
"def _shift_amplitudes(qc, n, inplace=False):\n if not inplace:\n qc = qc.copy()\n for q_reg in qc.qregs:\n # Unitary gate representing the shift operation on n qubits\n shift_matrix = np.roll(np.eye(2**q_reg.size), n, axis=1)\n # Add the gate to the circuit\n qc.append(UnitaryGate(shift_matrix), q_reg)\n return qc",
"def pixelMove(*args, **kwargs)->None:\n pass",
"def shift(self):\n \"\"\"\n shift cluster randomly within bounds of im\n \"\"\"\n r = self.std\n mid = self.mid_pixel #center pixel index of 384x384 image\n delta = self.im_size - self.mid_pixel - r - 10\n \n x = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n y = np.random.randint(low=-1*delta,high=delta,size=1)[0]\n\n self.x += x\n self.y += y\n im_shift = np.roll(self.im,shift=y,axis=0)\n self.im = np.roll(im_shift,shift=x,axis=1)\n \n return",
"def update_shift_count(self, move):\n if len(move) == 2:\n self.shift_count += 1\n else:\n self.shift_count = 0",
"def move(self, direction):\r\n # replace with your code\r\n row_increment = OFFSETS[direction][0]\r\n col_increment = OFFSETS[direction][1]\r\n changed = False\r\n for header in self._grid_headers[direction]:\r\n row_header = header[0]\r\n col_header = header[1]\r\n source_line = []\r\n # get the source line first\r\n while (row_header >= 0) and (col_header >= 0) and (row_header < self._grid_height) and (col_header < self._grid_width):\r\n source_line.append(self.get_tile(row_header, col_header))\r\n row_header += row_increment\r\n col_header += col_increment\r\n # merge\r\n result_line = merge(source_line)\r\n # write the result back\r\n row_header = header[0]\r\n col_header = header[1]\r\n result_line_index = 0\r\n while (row_header >= 0) and (col_header >= 0) and (row_header < self._grid_height) and (col_header < self._grid_width):\r\n self.set_tile(row_header, col_header, result_line[result_line_index])\r\n if result_line[result_line_index] != source_line[result_line_index]:\r\n changed = True\r\n result_line_index += 1\r\n row_header += row_increment\r\n col_header += col_increment\r\n if changed:\r\n self.new_tile()",
"def memmove(self, grid):\n self.moveList.append((self.x, self.y))\n self.moveList2.append((self.x, self.y))\n waysList = []\n for f in range(4):\n if self.test(grid, f):\n if (self.x+SPEED_X[f], self.y+SPEED_Y[f]) not in self.moveList:\n waysList.append(f)\n if len(waysList) == 1:\n self.flag = waysList[0]\n self.move(grid)\n return\n elif len(waysList) == 4:\n self.flag = 0\n self.move(grid)\n return\n elif len(waysList) > 1:\n for f in waysList:\n self.mem.append((self.x, self.y, f))\n self.x, self.y, self.flag = self.mem[-1]\n elif len(waysList) == 0:\n self.x, self.y, self.flag = self.mem[-1]\n for i in range(len(self.moveList2)):\n if self.moveList2[i][0] == self.x and self.moveList2[i][1] == self.y:\n del self.moveList2[i+1:]\n break\n self.move(grid)\n self.mem.pop()",
"def fun_no_cut(self, reg_x_len, n_size, block_index, n_loop):\n data_input_ub = self.tik_instance.Tensor(self.dtype_x,\n self.shape_v,\n name=\"data_input_ub\",\n scope=tik.scope_ubuf)\n input_indices_ub = self.tik_instance.Tensor(self.dtype_indices, (8,),\n name=\"input_indices_ub\",\n scope=tik.scope_ubuf)\n self.tik_instance.data_move(input_indices_ub[0],\n self.input_indices_gm[0], 0, 1, 1, 0, 0)\n reg_start = self.tik_instance.Scalar(dtype=\"int32\")\n reg_start.set_as(input_indices_ub[0])\n reg_burst = self.tik_instance.Scalar(dtype=\"int32\")\n if self.dtype_x in (\"float32\", \"int32\"):\n reg_burst.set_as(reg_x_len // 8)\n else:\n reg_burst.set_as(reg_x_len // 16)\n\n with self.tik_instance.for_range(0, n_loop) as n_index:\n with self.tik_instance.if_scope(\n block_index * n_size + n_index != reg_start):\n self.tik_instance.data_move(\n data_input_ub[0],\n self.input_x_gm[(block_index * n_size + n_index) *\n reg_x_len], 0, 1, reg_burst, 0, 0)\n with self.tik_instance.else_scope():\n self.tik_instance.data_move(data_input_ub[0],\n self.input_v_gm[0], 0, 1, reg_burst,\n 0, 0)\n self.tik_instance.data_move(\n self.output_y_gm[(block_index * n_size + n_index) * reg_x_len],\n data_input_ub[0], 0, 1, reg_burst, 0, 0)",
"def piecewise_transform(image, numcols=5, numrows=5, warp_left_right=10, warp_up_down=10, order=1):\n\n rows, cols = image.shape[0], image.shape[1]\n\n numcols = numcols\n numrows = numrows\n\n src_cols = np.linspace(0, cols, numcols, dtype=int)\n src_rows = np.linspace(0, rows, numrows, dtype=int)\n src_rows, src_cols = np.meshgrid(src_rows, src_cols)\n src = np.dstack([src_cols.flat, src_rows.flat])[0]\n\n src_rows_new = np.ndarray.transpose(src_rows)\n src_cols_new = np.ndarray.transpose(src_cols)\n # src_new = np.dstack([src_cols_new.flat, src_rows_new.flat])[0]\n\n dst_cols = np.ndarray(src_cols.shape)\n dst_rows = np.ndarray(src_rows.shape)\n for i in range(0, numcols):\n for j in range(0, numrows):\n if src_cols[i, j] == 0 or src_cols[i, j] == cols:\n dst_cols[i, j] = src_cols[i, j]\n else:\n dst_cols[i, j] = src_cols[i, j] + np.random.uniform(-1, 1) * warp_left_right\n\n if src_rows[i, j] == 0 or src_rows[i, j] == rows:\n dst_rows[i, j] = src_rows[i, j]\n else:\n dst_rows[i, j] = src_rows[i, j] + np.random.uniform(-1, 1) * warp_up_down\n\n dst = np.dstack([dst_cols.flat, dst_rows.flat])[0]\n\n # dst_rows_new = np.ndarray.transpose(dst_rows)\n # dst_cols_new = np.ndarray.transpose(dst_cols)\n # dst_new = np.dstack([dst_cols_new.flat, dst_rows_new.flat])[0]\n\n tform = transform.PiecewiseAffineTransform()\n tform.estimate(src, dst)\n\n img_new = transform.warp(image, tform, output_shape=(rows, cols), order=order, preserve_range=True)\n img_new = img_new.astype(image.dtype)\n \n return img_new",
"def move(self,move):\n for x in range(len(self.coord)):\n self.coord[x] = np.array([y+np.array(move) for y in self.coord[x]])\n return self",
"def move(self):\n self._move_range_shuffle(3)\n self._move_satisfy_random_constraint()\n # self._move_range_shuffle(3)\n #if (curr_energy > 50):\n # self._move_satisfy_random_constraint()\n #else:\n # self._move_range_shuffle(3)",
"def hanoi(n, source, target, helper):\n if n > 0:\n hanoi(n-1, source, helper, target)\n print(\"move disk from\", source, \"to\", target)\n hanoi(n-1, helper, target, source)",
"def warp(x, flo):\n x=torch.squeeze(x,2)\n flo=torch.squeeze(flo,2)\n B, C, H, W = x.size()\n # mesh grid \n xx = torch.arange(0, W).view(1,-1).repeat(H,1)\n yy = torch.arange(0, H).view(-1,1).repeat(1,W)\n xx = xx.view(1,1,H,W).repeat(B,1,1,1)\n yy = yy.view(1,1,H,W).repeat(B,1,1,1)\n grid = torch.cat((xx,yy),1).float()\n\n #if x.is_cuda:\n # grid = grid.cuda()\n vgrid = torch.Tensor(grid).cuda() - flo.cuda()\n\n # scale grid to [-1,1] \n vgrid[:,0,:,:] = 2.0*vgrid[:,0,:,:].clone() / max(W-1,1)-1.0\n vgrid[:,1,:,:] = 2.0*vgrid[:,1,:,:].clone() / max(H-1,1)-1.0\n\n vgrid = vgrid.permute(0,2,3,1) \n #x=x.cuda()\n output = nn.functional.grid_sample(x, vgrid,mode='bilinear')\n mask = torch.Tensor(torch.ones(x.size())).cuda()\n mask = nn.functional.grid_sample(mask, vgrid,mode='bilinear')\n\n # if W==128:\n # np.save('mask.npy', mask.cpu().data.numpy())\n # np.save('warp.npy', output.cpu().data.numpy())\n \n mask[mask<0.9999] = 0\n mask[mask>0] = 1\n return torch.unsqueeze(output,2),torch.unsqueeze(mask,2)",
"def shift(shape, stride, anchors):\n shift_x = (keras.backend.arange(0, shape[1], dtype=keras.backend.floatx()) + keras.backend.constant(0.5,\n dtype=keras.backend.floatx())) * stride\n shift_y = (keras.backend.arange(0, shape[0], dtype=keras.backend.floatx()) + keras.backend.constant(0.5,\n dtype=keras.backend.floatx())) * stride\n\n shift_x, shift_y = meshgrid(shift_x, shift_y)\n shift_x = keras.backend.reshape(shift_x, [-1])\n shift_y = keras.backend.reshape(shift_y, [-1])\n\n shifts = keras.backend.stack([\n shift_x,\n shift_y,\n shift_x,\n shift_y\n ], axis=0)\n\n shifts = keras.backend.transpose(shifts)\n number_of_anchors = keras.backend.shape(anchors)[0]\n\n k = keras.backend.shape(shifts)[0] # number of base points = feat_h * feat_w\n\n shifted_anchors = keras.backend.reshape(anchors, [1, number_of_anchors, 4]) + keras.backend.cast(\n keras.backend.reshape(shifts, [k, 1, 4]), keras.backend.floatx())\n shifted_anchors = keras.backend.reshape(shifted_anchors, [k * number_of_anchors, 4])\n\n return shifted_anchors",
"def flow_to_warp(flow):\n batch, _, ht, wd = flow.shape\n coords = torch.meshgrid(torch.arange(ht), torch.arange(wd))\n coords = torch.stack(coords[::-1], dim=0).float()\n coords = coords[None].repeat(batch, 1, 1, 1)\n return coords + flow",
"def _schedule_winograd(cfg, s, op):\n # get ops and tensors\n output = op.output(0)\n\n Y = op.input_tensors[0]\n M, A = s[Y].op.input_tensors\n U, V = s[M].op.input_tensors\n d, B = s[V].op.input_tensors\n data_pad = s[d].op.input_tensors[0]\n\n # padding\n s[data_pad].compute_inline()\n\n # transform kernel\n if isinstance(U.op, tvm.te.ComputeOp):\n kernel, G = s[U].op.input_tensors\n s[G].compute_inline()\n (eps, nu, co, ci, vco) = s[U].op.axis\n if not autotvm.GLOBAL_SCOPE.in_tuning:\n r_kh, r_kw = s[U].op.reduce_axis\n s[U].reorder(co, ci, eps, nu, r_kh, r_kw, vco)\n _ = [s[U].unroll(x) for x in [eps, nu, r_kh, r_kw]]\n s[U].vectorize(vco)\n tile_and_bind(s, U, co, ci, 1, 256)\n\n # dilation\n if isinstance(kernel.op, tvm.te.ComputeOp) and \"dilate\" in kernel.op.tag:\n s[kernel].compute_inline()\n\n # transform image\n s[B].compute_inline()\n VL = s.cache_write(V, \"local\")\n\n eps, nu, p, ci, vp = s[V].op.axis\n s[V].reorder(p, ci, eps, nu, vp)\n for axis in [eps, nu]:\n s[V].unroll(axis)\n s[V].vectorize(vp)\n fused = s[V].fuse(p, ci)\n\n bb, tt = cfg[\"tile_t1\"].apply(s, V, fused)\n s[V].bind(bb, te.thread_axis(\"blockIdx.x\"))\n s[V].bind(tt, te.thread_axis(\"threadIdx.x\"))\n\n eps, nu, p, ci, vp = s[VL].op.axis\n r_a, r_b = s[VL].op.reduce_axis\n for axis in [eps, nu, r_a, r_b]:\n s[VL].unroll(axis)\n s[VL].vectorize(vp)\n s[d].compute_at(s[V], tt)\n s[VL].compute_at(s[V], tt)\n\n # batch gemm\n bna = cfg[\"tile_bna\"].val\n bnb = cfg[\"tile_bnb\"].val\n\n eps, nu, k, b = s[M].op.axis\n alpha = eps.dom.extent\n c = s[M].op.reduce_axis[0]\n yo, xo, yi, xi = s[M].tile(k, b, bna, bnb)\n c, c_unroll = cfg[\"c_unroll\"].apply(s, M, c)\n s[M].reorder(yo, xo, c, c_unroll, yi, xi)\n s[M].unroll(c_unroll)\n s[M].unroll(yi)\n s[M].vectorize(xi)\n z = s[M].fuse(eps, nu)\n tile_and_bind3d(s, M, z, yo, xo, 1, cfg[\"yt\"].val, 1)\n\n # inverse transform\n s[A].compute_inline()\n k, b, vh, vw = s[Y].op.axis\n r_a, r_b = s[Y].op.reduce_axis\n for axis in [vh, vw, r_a, r_b]:\n s[Y].unroll(axis)\n\n # schedule output and fusion\n if output.op not in s.outputs:\n s[output].compute_inline()\n output = s.outputs[0]\n\n n, co, h, w = s[output].op.axis\n m = alpha - 3 + 1\n h, w, hi, wi = s[output].tile(h, w, m, m)\n s[output].unroll(hi)\n s[output].unroll(wi)\n fused = s[output].fuse(n, co, h, w)\n bb, tt = cfg[\"tile_t2\"].apply(s, output, fused)\n s[output].bind(bb, te.thread_axis(\"blockIdx.x\"))\n s[output].bind(tt, te.thread_axis(\"threadIdx.x\"))\n\n s[Y].compute_at(s[output], tt)",
"def init_shiftind(self, n_t):\n i = np.arange(n_t * n_t)\n i2 = np.arange(n_t).repeat(n_t)\n ik = np.arange(n_t).repeat(n_t)\n ii = np.arange(n_t)[np.newaxis].repeat(n_t, 0).flatten()\n\n si = ik * n_t + (ik + ii) % n_t\n self.shiftinds_fwd = np.roll(si.reshape((n_t, n_t)), int((n_t - 1) / 2), 1)[:, ::-1].flatten()\n\n si = ik * n_t + (ii - ik) % n_t\n self.shiftinds_back = np.roll(np.arange(n_t * n_t).reshape((n_t, n_t))[:, ::-1], -int((n_t - 1) / 2), 1).flatten()[si]\n\n self.shiftinds = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int)\n self.shiftinds_neg = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int)\n self.shiftinds_pos = ((-n_t + i - i2) % n_t + i2 * n_t).astype(int)\n # self.shiftinds = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int).reshape((n_t, n_t)).transpose().flatten()\n # self.shiftinds_neg = ((i + i2 - n_t) % n_t + i2 * n_t).astype(int).reshape((n_t, n_t)).transpose().flatten()\n # self.shiftinds_pos = ((-n_t + i - i2) % n_t + i2 * n_t).astype(int).reshape((n_t, n_t)).transpose().flatten()"
] | [
"0.68111503",
"0.654992",
"0.6478761",
"0.5682042",
"0.55264264",
"0.5491822",
"0.5434507",
"0.54266405",
"0.54103285",
"0.53249466",
"0.52720374",
"0.52512574",
"0.5243212",
"0.51999307",
"0.51817954",
"0.5173916",
"0.5150899",
"0.51255804",
"0.5119478",
"0.51185614",
"0.5106558",
"0.51044023",
"0.50996846",
"0.5088775",
"0.508422",
"0.50755394",
"0.5070164",
"0.5068276",
"0.50580907",
"0.5054507"
] | 0.7541371 | 0 |
Shift the field 'field_array' by n_move cells on CPU. This is done in spectral space and corresponds to multiplying the fields with the factor exp(ikz_truedz)n_move . | def shift_spect_array_cpu( field_array, shift_factor, n_move ):
Nz, Nr = field_array.shape
# Loop over the 2D array (in parallel over z if threading is enabled)
for iz in prange( Nz ):
power_shift = 1. + 0.j
# Calculate the shift factor (raising to the power n_move ;
# for negative n_move, we take the complex conjugate, since
# shift_factor is of the form e^{i k dz})
for i in range( abs(n_move) ):
power_shift *= shift_factor[iz]
if n_move < 0:
power_shift = power_shift.conjugate()
# Shift the fields
for ir in range( Nr ):
field_array[iz, ir] *= power_shift | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shift_spect_array_gpu( field_array, shift_factor, n_move ):\n # Get a 2D CUDA grid\n iz, ir = cuda.grid(2)\n\n # Only access values that are actually in the array\n if ir < field_array.shape[1] and iz < field_array.shape[0]:\n power_shift = 1. + 0.j\n # Calculate the shift factor (raising to the power n_move ;\n # for negative n_move, we take the complex conjugate, since\n # shift_factor is of the form e^{i k dz})\n for i in range( abs(n_move) ):\n power_shift *= shift_factor[iz]\n if n_move < 0:\n power_shift = power_shift.conjugate()\n # Shift fields\n field_array[iz, ir] *= power_shift",
"def move_grids(self, fld, comm, time):\n # To avoid discrepancies between processors, only the first proc\n # decides whether to send the data, and broadcasts the information.\n dz = comm.dz\n if comm.rank==0:\n # Move the continuous position of the moving window object\n self.zmin += self.v * (time - self.t_last_move)\n # Find the number of cells by which the window should move\n zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(\n local=False, with_damp=False, with_guard=False )\n n_move = int( (self.zmin - zmin_global_domain)/dz )\n else:\n n_move = None\n # Broadcast the information to all proc\n if comm.size > 1:\n n_move = comm.mpi_comm.bcast( n_move )\n\n # Move the grids\n if n_move != 0:\n # Move the global domain\n comm.shift_global_domain_positions( n_move*dz )\n # Shift the fields\n Nm = len(fld.interp)\n for m in range(Nm):\n # Modify the values of the corresponding z's\n fld.interp[m].zmin += n_move*fld.interp[m].dz\n fld.interp[m].zmax += n_move*fld.interp[m].dz\n # Shift/move fields by n_move cells in spectral space\n self.shift_spect_grid( fld.spect[m], n_move )\n\n # Because the grids have just been shifted, there is a shift\n # in the cell indices that are used for the prefix sum.\n if fld.use_cuda:\n fld.prefix_sum_shift += n_move\n # This quantity is reset to 0 whenever prefix_sum is recalculated\n\n # Prepare the positions of injection for the particles\n # (The actual creation of particles is done when the routine\n # exchange_particles of boundary_communicator.py is called)\n if comm.rank == comm.size-1:\n # Move the injection position\n self.z_inject += self.v * (time - self.t_last_move)\n # Take into account the motion of the end of the plasma\n self.z_end_plasma += self.v_end_plasma * (time - self.t_last_move)\n # Increment the number of particle cells to add\n nz_new = int( (self.z_inject - self.z_end_plasma)/dz )\n self.nz_inject += nz_new\n # Increment the virtual position of the end of the plasma\n # (When `generate_particles` is called, then the plasma\n # is injected between z_end_plasma - nz_inject*dz and z_end_plasma,\n # and afterwards nz_inject is set to 0.)\n self.z_end_plasma += nz_new*dz\n\n # Change the time of the last move\n self.t_last_move = time",
"def TransformUpMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < side:\n j = len(field) - side + i\n line = []\n l = i\n while l <= j:\n line.append(field[l])\n l = l + side\n\n line = move(line)\n j = len(field) - side + i\n l = i\n k = 0\n while l <= j:\n field[l] = line[k]\n l = l + side\n k = k + 1\n i = i + 1\n return field",
"def shift_spect_grid( self, grid, n_move,\n shift_rho=True, shift_currents=True ):\n if grid.use_cuda:\n shift = grid.d_field_shift\n # Get a 2D CUDA grid of the size of the grid\n tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] )\n # Shift all the fields on the GPU\n shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move )\n else:\n shift = grid.field_shift\n # Shift all the fields on the CPU\n shift_spect_array_cpu( grid.Ep, shift, n_move )\n shift_spect_array_cpu( grid.Em, shift, n_move )\n shift_spect_array_cpu( grid.Ez, shift, n_move )\n shift_spect_array_cpu( grid.Bp, shift, n_move )\n shift_spect_array_cpu( grid.Bm, shift, n_move )\n shift_spect_array_cpu( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_cpu( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_cpu( grid.Jp, shift, n_move )\n shift_spect_array_cpu( grid.Jm, shift, n_move )\n shift_spect_array_cpu( grid.Jz, shift, n_move )",
"def TransformLeftMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < len(field):\n j = (i + side)\n line = []\n for x in range(i, j):\n line.append(field[x])\n\n line = move(line)\n k = 0\n for x in range(i, j):\n field[x] = line[k]\n k = k + 1\n i = i + side\n return field",
"def make_move(self, board, fieldy, fieldx):\n board[self.posy][self.posx], board[fieldy][fieldx] = board[fieldy][fieldx], board[self.posy][self.posx]\n self.posy = fieldy\n self.posx = fieldx",
"def TransformRightMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < len(field):\n j = (i + side) - 1\n line = []\n for x in range(j, i - 1, -1):\n line.append(field[x])\n line = move(line)\n k = 0\n for x in range(j, i - 1, -1):\n field[x] = line[k]\n k = k + 1\n i = i + side\n return field",
"def grid_shift(grid, advection, trim_edges=0, field_list=None):\n if trim_edges == 0:\n trim_slice = slice(None, None)\n else:\n trim_slice = slice(int(trim_edges), -int(trim_edges))\n\n shifted_grid = copy.deepcopy(grid)\n\n # grab the x and y axis and trim\n shifted_grid.x[\"data\"] = grid.x[\"data\"][trim_slice].copy()\n shifted_grid.y[\"data\"] = grid.y[\"data\"][trim_slice].copy()\n\n # shift each field.\n if field_list is None:\n field_list = grid.fields.keys()\n\n for field in field_list:\n # copy data and fill with nans\n data = grid.fields[field][\"data\"].copy()\n data = np.ma.filled(data, np.nan)\n\n # shift the data\n shifted_data = shift(data, [0, advection[0], advection[1]], prefilter=False)\n\n # mask invalid, trim and place into grid\n shifted_data = np.ma.fix_invalid(\n shifted_data, copy=False, fill_value=get_fillvalue()\n )\n shifted_data = shifted_data[:, trim_slice, trim_slice]\n shifted_grid.fields[field][\"data\"] = shifted_data\n\n return shifted_grid",
"def TransformDownMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < side:\n j = len(field) - side + i\n line = []\n l = j\n while l >= i:\n line.append(field[l])\n l = l - side\n\n line = move(line)\n j = len(field) - side + i\n l = j\n k = 0\n while l >= i:\n field[l] = line[k]\n l = l - side\n k = k + 1\n i = i + 1\n return field",
"def shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][1], state[1][2], state[1][3], state[1][0]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][3], state[3][0], state[3][1], state[3][2]",
"def _move_in_one_more_block():\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx],\n src[w_offset + w_size * sub_h_idx], 0, 1, sub_w_block, 0, 0)\n # in order to avoid dirty data when multiple core\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block *\n (sub_h_align_block_size + sub_h_idx_1)],\n src[w_offset +\n w_size * (sub_h_size - data_cnt_one_block + sub_h_idx_1)],\n 0, 1, sub_w_block, 0, 0)",
"def inv_shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][3], state[1][0], state[1][1], state[1][2]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][1], state[3][2], state[3][3], state[3][0]",
"def DELAY(A, n):\r\n At = pivot_table(A)\r\n res = At.shift(n)\r\n res = stack_table(res)\r\n return res",
"def realign_image(arr, shift, angle=0):\n # if both shifts are integers, do circular shift; otherwise perform Fourier shift.\n if np.count_nonzero(np.abs(np.array(shift) - np.round(shift)) < 0.01) == 2:\n temp = np.roll(arr, int(shift[0]), axis=0)\n temp = np.roll(temp, int(shift[1]), axis=1)\n temp = temp.astype('float32')\n else:\n temp = fourier_shift(np.fft.fftn(arr), shift)\n temp = np.fft.ifftn(temp)\n temp = np.abs(temp).astype('float32')\n return temp",
"def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter",
"def fshift_nb(a, n):\n return fshift_1d_nb(a, n)",
"def Repeater(arr,n):\n new_arr = np.zeros((arr.shape[0]*n,arr.shape[1]),dtype=object)\n for i in range(0,arr.shape[0]):\n new_row = np.tile(arr[i,:],(n,1))\n new_arr[i*n:(i+1)*n,:] = new_row\n return new_arr",
"def rgbArray_move(self, rgbList, delay):\n # res\n\n res = self.rgbArrayOfs_move(0,rgbList,delay)\n return res",
"def move_element(self,n_a,n_b):\n self.element_array.insert(n_b,self.element_array.pop(n_a))",
"def yank(self):\r\n self.block.bucket_array.yank_cell(self)",
"def fshift_1d_nb(a, n):\n out = np.empty_like(a, dtype=np.float_)\n out[:n] = np.nan\n out[n:] = a[:-n]\n return out",
"def move(self,move):\n for x in range(len(self.coord)):\n self.coord[x] = np.array([y+np.array(move) for y in self.coord[x]])\n return self",
"def Forward(Fin, z, sizenew, Nnew ):\n if z <= 0:\n raise ValueError('Forward does not support z<=0')\n Fout = Field.begin(sizenew, Fin.lam, Nnew, Fin._dtype)\n \n field_in = Fin.field\n field_out = Fout.field\n \n field_out[:,:] = 0.0 #default is ones, clear\n \n old_size = Fin.siz\n old_n = Fin.N\n new_size = sizenew #renaming to match cpp code\n new_n = Nnew\n\n on2 = int(old_n/2)\n nn2 = int(new_n/2) #read \"new n over 2\"\n dx_new = new_size/(new_n-1)\n dx_old = old_size/(old_n-1)\n #TODO again, dx seems better defined without -1, check this\n \n R22 = _np.sqrt(1/(2*Fin.lam*z))\n\n X_new = _np.arange(-nn2, new_n-nn2) * dx_new\n Y_new = X_new #same\n X_old = _np.arange(-on2, old_n-on2) * dx_old\n Y_old = X_old #same\n for i_new in range(new_n):\n x_new = X_new[i_new]\n \n P1 = R22*(2*(X_old-x_new)+dx_old)\n P3 = R22*(2*(X_old-x_new)-dx_old)\n Fs1, Fc1 = _fresnel(P1)\n Fs3, Fc3 = _fresnel(P3)\n for j_new in range(new_n):\n y_new = Y_new[j_new]\n \n P2 = R22*(2*(Y_old-y_new)-dx_old)\n P4 = R22*(2*(Y_old-y_new)+dx_old)\n Fs2, Fc2 = _fresnel(P2)\n Fs4, Fc4 = _fresnel(P4)\n \n C4C1=_np.outer(Fc4, Fc1) #out[i, j] = a[i] * b[j] \n C2S3=_np.outer(Fc2, Fs3) #-> out[j,i] = a[j]*b[i] here\n C4S1=_np.outer(Fc4, Fs1)\n S4C1=_np.outer(Fs4, Fc1)\n S2C3=_np.outer(Fs2, Fc3)\n C2S1=_np.outer(Fc2, Fs1)\n S4C3=_np.outer(Fs4, Fc3)\n S2C1=_np.outer(Fs2, Fc1)\n C4S3=_np.outer(Fc4, Fs3)\n S2S3=_np.outer(Fs2, Fs3)\n S2S1=_np.outer(Fs2, Fs1)\n C2C3=_np.outer(Fc2, Fc3)\n S4S1=_np.outer(Fs4, Fs1)\n C4C3=_np.outer(Fc4, Fc3)\n C4C1=_np.outer(Fc4, Fc1)\n S4S3=_np.outer(Fs4, Fs3)\n C2C1=_np.outer(Fc2, Fc1)\n \n Fr = 0.5 * field_in.real\n Fi = 0.5 * field_in.imag\n Temp_c = (Fr * (C2S3 + C4S1 + S4C1 + S2C3\n - C2S1 - S4C3 - S2C1 - C4S3)\n + Fi * (-S2S3 + S2S1 + C2C3 - S4S1\n - C4C3 + C4C1 + S4S3 - C2C1)\n + 1j * Fr *(-C4C1 + S2S3 + C4C3 - S4S3\n + C2C1 - S2S1 + S4S1 - C2C3)\n + 1j * Fi*(C2S3 + S2C3 + C4S1 + S4C1\n - C4S3 - S4C3 - C2S1 - S2C1))\n field_out[j_new, i_new] = Temp_c.sum() #complex elementwise sum\n Fout._IsGauss=False\n return Fout",
"def _assemble_tiles(i, n, tile, tsincr_g, output_dir, outtype):\n # pylint: disable=too-many-arguments\n tsincr_file = os.path.join(output_dir, '{}_{}.npy'.format(outtype, n))\n tsincr = np.load(file=tsincr_file)\n tsincr_g[tile.top_left_y:tile.bottom_right_y, tile.top_left_x:tile.bottom_right_x] = tsincr[:, :, i]",
"def _extend_contiguous_traj_field(self, run_idx, traj_idx, field_path, field_data):\n\n traj_grp = self.h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]\n field = traj_grp[field_path]\n\n # make sure this is a feature vector\n assert len(field_data.shape) > 1, \\\n \"field_data must be a feature vector with the same number of dimensions as the number\"\n\n # of datase new frames\n n_new_frames = field_data.shape[0]\n\n # check the field to make sure it is not empty\n if all([i == 0 for i in field.shape]):\n\n # check the feature shape against the maxshape which gives\n # the feature dimensions for an empty dataset\n assert field_data.shape[1:] == field.maxshape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n # if it is empty resize it to make an array the size of\n # the new field_data with the maxshape for the feature\n # dimensions\n feature_dims = field.maxshape[1:]\n field.resize( (n_new_frames, *feature_dims) )\n\n # set the new data to this\n field[0:, ...] = field_data\n\n else:\n # make sure the new data has the right dimensions against\n # the shape it already has\n assert field_data.shape[1:] == field.shape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n\n # append to the dataset on the first dimension, keeping the\n # others the same, these must be feature vectors and therefore\n # must exist\n field.resize( (field.shape[0] + n_new_frames, *field.shape[1:]) )\n # add the new data\n field[-n_new_frames:, ...] = field_data",
"def shift(image,shift_x,shift_y):\n return np.roll(np.roll(image,shift_y,axis=0),shift_x,axis=1)",
"def offsetElements(self, i):\n\n #iterate over each tile and subtract\n #if the value is -1, indicating a blank tile, leave it as that\n for y in range(0, len(self.array)):\n for x in range(0, len(self.array[0])):\n if self.array[y][x] != -1:\n self.array[y][x] -= i",
"def roll(arrayin, shift = (0, 0), silent = True):\r\n arrayout = arrayin.copy()\r\n # if shift is integer valued then use np.roll\r\n if (type(shift[0]) == int) or (type(shift[0]) == np.int) or (type(shift[0]) == np.int32) or (type(shift[0]) == np.int64):\r\n if shift[-1] != 0 :\r\n if silent == False :\r\n print 'arrayout = np.roll(arrayout, shift[-1], -1)'\r\n arrayout = np.roll(arrayout, shift[-1], -1)\r\n # if shift is 1d then don't roll the other dim (if it even exists)\r\n if len(arrayout.shape) >= 2 :\r\n if shift[-2] != 0 :\r\n if silent == False :\r\n print 'arrayout = np.roll(arrayout, shift[-2], -2)'\r\n arrayout = np.roll(arrayout, shift[-2], -2)\r\n # if shift is float valued then use the Fourier shift theorem\r\n elif (type(shift[0]) == float) or (type(shift[0]) == np.float32) or (type(shift[0]) == np.float64):\r\n # if shift is 1d\r\n if len(shift) == 1 :\r\n if silent == False :\r\n print 'arrayout = fftn_1d(arrayout)'\r\n print 'arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))'\r\n print 'arrayout = ifftn_1d(arrayout)'\r\n arrayout = fftn_1d(arrayout)\r\n arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))\r\n arrayout = ifftn_1d(arrayout)\r\n elif len(shift) == 2 :\r\n if silent == False :\r\n print 'arrayout = fftn(arrayout)'\r\n print 'arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))'\r\n print 'arrayout = ifftn(arrayout)'\r\n arrayout = fftn(arrayout)\r\n arrayout = arrayout * phase_ramp(arrayout.shape, shift, origin = (0, 0))\r\n arrayout = ifftn(arrayout)\r\n return arrayout",
"def move(self, direction):\n original_grid = []\n for row in self._grid:\n original_row = list(row)\n original_grid.append(original_row)\n steps = 0\n if direction == UP or direction == DOWN:\n steps = self._grid_height\n elif direction == LEFT or direction == RIGHT:\n steps = self._grid_width\n to_move = []\n for initial_cell in self._initial_cells[direction]:\n for step in range(steps):\n new_row = initial_cell[0] + step * OFFSETS[direction][0]\n new_column = initial_cell[1] + step * OFFSETS[direction][1]\n to_move.append(self._grid[new_row][new_column])\n to_move = merge(to_move)\n row = initial_cell[0]\n column = initial_cell[1]\n for step in range(steps):\n self._grid[row + OFFSETS[direction][0] * step][column + OFFSETS[direction][1] * step] = to_move[step]\n to_move = []\n if original_grid != self._grid:\n self.new_tile()",
"def _field_Fresnel(z, field, dx, lam, dtype, usepyFFTW):\n \n \"\"\" *************************************************************\n Major differences to Cpp based LP version:\n - dx =siz/N instead of dx=siz/(N-1), more consistent with physics \n and rest of LP package\n - fftw DLL uses no normalization, numpy uses 1/N on ifft -> omitted\n factor of 1/(2*N)**2 in final calc before return\n - bug in Cpp version: did not touch top row/col, now we extract one\n more row/col to fill entire field. No errors noticed with the new\n method so far\n ************************************************************* \"\"\"\n _using_pyfftw = False # determined if loading is successful \n if usepyFFTW or _USE_PYFFTW:\n try:\n import pyfftw as _pyfftw\n from pyfftw.interfaces.numpy_fft import fft2 as _fft2\n from pyfftw.interfaces.numpy_fft import ifft2 as _ifft2\n _fftargs = {'planner_effort': 'FFTW_ESTIMATE',\n 'overwrite_input': True,\n 'threads': -1} #<0 means use multiprocessing.cpu_count()\n _using_pyfftw = True \n except ImportError:\n #import warnings\n #warnings.warn(_WARNING)\n _WARNING = '\\n**************************** WARNING ***********************\\n'\\\n +'In the Fresnel command you required FFT with the pyFFTW package.\\n'\\\n +'or _USE_PYFFTW = True in your config.py file.\\n'\\\n +'However LightPipes cannot import pyFFTW because it is not installed.\\n'\\\n +'Falling back to numpy.fft.\\n'\\\n +'(Try to) install pyFFTW on your computer for faster performance.\\n'\\\n +'Enter at a terminal prompt: python -m pip install pyfftw.\\n'\\\n +'Or reinstall LightPipes with the option pyfftw\\n'\\\n +'Enter: python -m pip install lightpipes[pyfftw]\\n\\n'\\\n +'*************************************************************'\n print(_WARNING)\n if not _using_pyfftw:\n from numpy.fft import fft2 as _fft2\n from numpy.fft import ifft2 as _ifft2\n _fftargs = {}\n tictoc.tic()\n N = field.shape[0] #assert square\n \n legacy = True #switch on to numerically compare oldLP/new results\n if legacy:\n kz = 2.*3.141592654/lam * z\n siz = N*dx\n dx = siz/(N-1) #like old Cpp code, even though unlogical\n else:\n kz = 2*_np.pi/lam*z\n \n \n cokz = _np.cos(kz)\n sikz = _np.sin(kz)\n \n No2 = int(N/2) #\"N over 2\"\n \"\"\"The following section contains a lot of uses which boil down to\n 2*No2. For even N, this is N. For odd N, this is NOT redundant:\n 2*No2 is N-1 for odd N, therefore sampling an even subset of the\n field instead of the whole field. Necessary for symmetry of first\n step involving Fresnel integral calc.\n \"\"\"\n if _using_pyfftw:\n in_outF = _pyfftw.zeros_aligned((2*N, 2*N),dtype=dtype)\n in_outK = _pyfftw.zeros_aligned((2*N, 2*N),dtype=dtype)\n else:\n in_outF = _np.zeros((2*N, 2*N),dtype=dtype)\n in_outK = _np.zeros((2*N, 2*N),dtype=dtype)\n \n \"\"\"Our grid is zero-centered, i.e. the 0 coordiante (beam axis) is\n not at field[0,0], but field[No2, No2]. The FFT however is implemented\n such that the frequency 0 will be the first element of the output array,\n and it also expects the input to have the 0 in the corner.\n For the correct handling, an fftshift is necessary before *and* after\n the FFT/IFFT:\n X = fftshift(fft(ifftshift(x))) # correct magnitude and phase\n x = fftshift(ifft(ifftshift(X))) # correct magnitude and phase\n X = fftshift(fft(x)) # correct magnitude but wrong phase !\n x = fftshift(ifft(X)) # correct magnitude but wrong phase !\n A numerically faster way to achieve the same result is by multiplying\n with an alternating phase factor as done below.\n Speed for N=2000 was ~0.4s for a double fftshift and ~0.1s for a double\n phase multiplication -> use the phase factor approach (iiij).\n \"\"\"\n # Create the sign-flip pattern for largest use case and \n # reference smaller grids with a view to the same data for\n # memory saving.\n ii2N = _np.ones((2*N),dtype=float)\n ii2N[1::2] = -1 #alternating pattern +,-,+,-,+,-,...\n iiij2N = _np.outer(ii2N, ii2N)\n iiij2No2 = iiij2N[:2*No2,:2*No2] #slice to size used below\n iiijN = iiij2N[:N, :N]\n\n RR = _np.sqrt(1/(2*lam*z))*dx*2\n io = _np.arange(0, (2*No2)+1) #add one extra to stride fresnel integrals\n R1 = RR*(io - No2)\n fs, fc = _fresnel(R1)\n fss = _np.outer(fs, fs) # out[i, j] = a[i] * b[j]\n fsc = _np.outer(fs, fc)\n fcs = _np.outer(fc, fs)\n fcc = _np.outer(fc, fc)\n \n \"\"\"Old notation (0.26-0.33s):\n temp_re = (a + b + c - d + ...)\n # numpy func add takes 2 operands A, B only\n # -> each operation needs to create a new temporary array, i.e.\n # ((((a+b)+c)+d)+...)\n # since python does not optimize to += here (at least is seems)\n New notation (0.14-0.16s):\n temp_re = (a + b) #operation with 2 operands\n temp_re += c\n temp_re -= d\n ...\n Wrong notation:\n temp_re = a #copy reference to array a\n temp_re += b\n ...\n # changing `a` in-place, re-using `a` will give corrupted\n # result\n \"\"\"\n temp_re = (fsc[1:, 1:] #s[i+1]c[j+1]\n + fcs[1:, 1:]) #c[+1]s[+1]\n temp_re -= fsc[:-1, 1:] #-scp [p=+1, without letter =+0]\n temp_re -= fcs[:-1, 1:] #-csp\n temp_re -= fsc[1:, :-1] #-spc\n temp_re -= fcs[1:, :-1] #-cps\n temp_re += fsc[:-1, :-1] #sc\n temp_re += fcs[:-1, :-1] #cs\n \n temp_im = (-fcc[1:, 1:] #-cpcp\n + fss[1:, 1:]) # +spsp\n temp_im += fcc[:-1, 1:] # +ccp\n temp_im -= fss[:-1, 1:] # -ssp\n temp_im += fcc[1:, :-1] # +cpc\n temp_im -= fss[1:, :-1] # -sps\n temp_im -= fcc[:-1, :-1] # -cc\n temp_im += fss[:-1, :-1]# +ss\n \n temp_K = 1j * temp_im # a * b creates copy and casts to complex\n temp_K += temp_re\n temp_K *= iiij2No2\n temp_K *= 0.5\n in_outK[(N-No2):(N+No2), (N-No2):(N+No2)] = temp_K\n \n in_outF[(N-No2):(N+No2), (N-No2):(N+No2)] \\\n = field[(N-2*No2):N,(N-2*No2):N] #cutting off field if N odd (!)\n in_outF[(N-No2):(N+No2), (N-No2):(N+No2)] *= iiij2No2\n \n tictoc.tic()\n in_outK = _fft2(in_outK, **_fftargs)\n in_outF = _fft2(in_outF, **_fftargs)\n t_fft1 = tictoc.toc()\n \n in_outF *= in_outK\n \n in_outF *= iiij2N\n tictoc.tic()\n in_outF = _ifft2(in_outF, **_fftargs)\n t_fft2 = tictoc.toc()\n #TODO check normalization if USE_PYFFTW\n \n Ftemp = (in_outF[No2:N+No2, No2:N+No2]\n - in_outF[No2-1:N+No2-1, No2:N+No2])\n Ftemp += in_outF[No2-1:N+No2-1, No2-1:N+No2-1]\n Ftemp -= in_outF[No2:N+No2, No2-1:N+No2-1]\n comp = complex(cokz, sikz)\n Ftemp *= 0.25 * comp\n Ftemp *= iiijN\n field = Ftemp #reassign without data copy\n ttotal = tictoc.toc()\n t_fft = t_fft1 + t_fft2\n t_outside = ttotal - t_fft\n debug_time = False\n if debug_time:\n print('Time total = fft + rest: {:.2f}={:.2f}+{:.2f}'.format(\n ttotal, t_fft, t_outside))\n return field"
] | [
"0.7170832",
"0.6100913",
"0.5694191",
"0.562633",
"0.5549602",
"0.5426374",
"0.54186237",
"0.5341644",
"0.5283791",
"0.5212293",
"0.49859214",
"0.4963954",
"0.49574816",
"0.4957023",
"0.4904727",
"0.4896321",
"0.48726845",
"0.4868087",
"0.48667774",
"0.4864888",
"0.4824995",
"0.48084083",
"0.48082078",
"0.48076808",
"0.48027065",
"0.4799215",
"0.47902367",
"0.47746342",
"0.47687024",
"0.4759916"
] | 0.77101654 | 0 |
Shift the field 'field_array' by n_move cells on the GPU. This is done in spectral space and corresponds to multiplying the fields with the factor exp(ikz_truedz)n_move . | def shift_spect_array_gpu( field_array, shift_factor, n_move ):
# Get a 2D CUDA grid
iz, ir = cuda.grid(2)
# Only access values that are actually in the array
if ir < field_array.shape[1] and iz < field_array.shape[0]:
power_shift = 1. + 0.j
# Calculate the shift factor (raising to the power n_move ;
# for negative n_move, we take the complex conjugate, since
# shift_factor is of the form e^{i k dz})
for i in range( abs(n_move) ):
power_shift *= shift_factor[iz]
if n_move < 0:
power_shift = power_shift.conjugate()
# Shift fields
field_array[iz, ir] *= power_shift | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def shift_spect_array_cpu( field_array, shift_factor, n_move ):\n Nz, Nr = field_array.shape\n\n # Loop over the 2D array (in parallel over z if threading is enabled)\n for iz in prange( Nz ):\n power_shift = 1. + 0.j\n # Calculate the shift factor (raising to the power n_move ;\n # for negative n_move, we take the complex conjugate, since\n # shift_factor is of the form e^{i k dz})\n for i in range( abs(n_move) ):\n power_shift *= shift_factor[iz]\n if n_move < 0:\n power_shift = power_shift.conjugate()\n # Shift the fields\n for ir in range( Nr ):\n field_array[iz, ir] *= power_shift",
"def move_grids(self, fld, comm, time):\n # To avoid discrepancies between processors, only the first proc\n # decides whether to send the data, and broadcasts the information.\n dz = comm.dz\n if comm.rank==0:\n # Move the continuous position of the moving window object\n self.zmin += self.v * (time - self.t_last_move)\n # Find the number of cells by which the window should move\n zmin_global_domain, zmax_global_domain = comm.get_zmin_zmax(\n local=False, with_damp=False, with_guard=False )\n n_move = int( (self.zmin - zmin_global_domain)/dz )\n else:\n n_move = None\n # Broadcast the information to all proc\n if comm.size > 1:\n n_move = comm.mpi_comm.bcast( n_move )\n\n # Move the grids\n if n_move != 0:\n # Move the global domain\n comm.shift_global_domain_positions( n_move*dz )\n # Shift the fields\n Nm = len(fld.interp)\n for m in range(Nm):\n # Modify the values of the corresponding z's\n fld.interp[m].zmin += n_move*fld.interp[m].dz\n fld.interp[m].zmax += n_move*fld.interp[m].dz\n # Shift/move fields by n_move cells in spectral space\n self.shift_spect_grid( fld.spect[m], n_move )\n\n # Because the grids have just been shifted, there is a shift\n # in the cell indices that are used for the prefix sum.\n if fld.use_cuda:\n fld.prefix_sum_shift += n_move\n # This quantity is reset to 0 whenever prefix_sum is recalculated\n\n # Prepare the positions of injection for the particles\n # (The actual creation of particles is done when the routine\n # exchange_particles of boundary_communicator.py is called)\n if comm.rank == comm.size-1:\n # Move the injection position\n self.z_inject += self.v * (time - self.t_last_move)\n # Take into account the motion of the end of the plasma\n self.z_end_plasma += self.v_end_plasma * (time - self.t_last_move)\n # Increment the number of particle cells to add\n nz_new = int( (self.z_inject - self.z_end_plasma)/dz )\n self.nz_inject += nz_new\n # Increment the virtual position of the end of the plasma\n # (When `generate_particles` is called, then the plasma\n # is injected between z_end_plasma - nz_inject*dz and z_end_plasma,\n # and afterwards nz_inject is set to 0.)\n self.z_end_plasma += nz_new*dz\n\n # Change the time of the last move\n self.t_last_move = time",
"def shift_spect_grid( self, grid, n_move,\n shift_rho=True, shift_currents=True ):\n if grid.use_cuda:\n shift = grid.d_field_shift\n # Get a 2D CUDA grid of the size of the grid\n tpb, bpg = cuda_tpb_bpg_2d( grid.Ep.shape[0], grid.Ep.shape[1] )\n # Shift all the fields on the GPU\n shift_spect_array_gpu[tpb, bpg]( grid.Ep, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Em, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Ez, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_gpu[tpb, bpg]( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_gpu[tpb, bpg]( grid.Jp, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jm, shift, n_move )\n shift_spect_array_gpu[tpb, bpg]( grid.Jz, shift, n_move )\n else:\n shift = grid.field_shift\n # Shift all the fields on the CPU\n shift_spect_array_cpu( grid.Ep, shift, n_move )\n shift_spect_array_cpu( grid.Em, shift, n_move )\n shift_spect_array_cpu( grid.Ez, shift, n_move )\n shift_spect_array_cpu( grid.Bp, shift, n_move )\n shift_spect_array_cpu( grid.Bm, shift, n_move )\n shift_spect_array_cpu( grid.Bz, shift, n_move )\n if shift_rho:\n shift_spect_array_cpu( grid.rho_prev, shift, n_move )\n if shift_currents:\n shift_spect_array_cpu( grid.Jp, shift, n_move )\n shift_spect_array_cpu( grid.Jm, shift, n_move )\n shift_spect_array_cpu( grid.Jz, shift, n_move )",
"def TransformUpMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < side:\n j = len(field) - side + i\n line = []\n l = i\n while l <= j:\n line.append(field[l])\n l = l + side\n\n line = move(line)\n j = len(field) - side + i\n l = i\n k = 0\n while l <= j:\n field[l] = line[k]\n l = l + side\n k = k + 1\n i = i + 1\n return field",
"def grid_shift(grid, advection, trim_edges=0, field_list=None):\n if trim_edges == 0:\n trim_slice = slice(None, None)\n else:\n trim_slice = slice(int(trim_edges), -int(trim_edges))\n\n shifted_grid = copy.deepcopy(grid)\n\n # grab the x and y axis and trim\n shifted_grid.x[\"data\"] = grid.x[\"data\"][trim_slice].copy()\n shifted_grid.y[\"data\"] = grid.y[\"data\"][trim_slice].copy()\n\n # shift each field.\n if field_list is None:\n field_list = grid.fields.keys()\n\n for field in field_list:\n # copy data and fill with nans\n data = grid.fields[field][\"data\"].copy()\n data = np.ma.filled(data, np.nan)\n\n # shift the data\n shifted_data = shift(data, [0, advection[0], advection[1]], prefilter=False)\n\n # mask invalid, trim and place into grid\n shifted_data = np.ma.fix_invalid(\n shifted_data, copy=False, fill_value=get_fillvalue()\n )\n shifted_data = shifted_data[:, trim_slice, trim_slice]\n shifted_grid.fields[field][\"data\"] = shifted_data\n\n return shifted_grid",
"def TransformRightMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < len(field):\n j = (i + side) - 1\n line = []\n for x in range(j, i - 1, -1):\n line.append(field[x])\n line = move(line)\n k = 0\n for x in range(j, i - 1, -1):\n field[x] = line[k]\n k = k + 1\n i = i + side\n return field",
"def make_move(self, board, fieldy, fieldx):\n board[self.posy][self.posx], board[fieldy][fieldx] = board[fieldy][fieldx], board[self.posy][self.posx]\n self.posy = fieldy\n self.posx = fieldx",
"def TransformDownMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < side:\n j = len(field) - side + i\n line = []\n l = j\n while l >= i:\n line.append(field[l])\n l = l - side\n\n line = move(line)\n j = len(field) - side + i\n l = j\n k = 0\n while l >= i:\n field[l] = line[k]\n l = l - side\n k = k + 1\n i = i + 1\n return field",
"def shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][1], state[1][2], state[1][3], state[1][0]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][3], state[3][0], state[3][1], state[3][2]",
"def TransformLeftMovement(field):\n i = 0\n side = int(math.sqrt(len(field)))\n while i < len(field):\n j = (i + side)\n line = []\n for x in range(i, j):\n line.append(field[x])\n\n line = move(line)\n k = 0\n for x in range(i, j):\n field[x] = line[k]\n k = k + 1\n i = i + side\n return field",
"def inv_shift_rows(state):\n state[1][0], state[1][1], state[1][2], state[1][3] = state[1][3], state[1][0], state[1][1], state[1][2]\n state[2][0], state[2][1], state[2][2], state[2][3] = state[2][2], state[2][3], state[2][0], state[2][1]\n state[3][0], state[3][1], state[3][2], state[3][3] = state[3][1], state[3][2], state[3][3], state[3][0]",
"def move(self):\n x = y = z = 0.0\n for cell in self.cells:\n x += (cell.x)#*n\n y += (cell.y)#*n\n z += (cell.z)#*n\n np = float(len(self.cells))\n med = numpy.array([x/np,y/np,z/np])\n \n dists = []\n for cell in self.cells:\n d = (cell.x-self.x)**2+(cell.y-self.y)**2+(cell.z-self.z)**2\n d = numpy.sqrt(d)\n dists.append(d)\n #md = (cell.x-med[0])**2+(cell.y-med[1])**2+(cell.z-med[2])**2\n #dists[-1] = (dists[-1]+md)/2\n cell = self.cells[numpy.argmin(dists)]\n cc = numpy.array([cell.x, cell.y, cell.z])\n \n t = self.t\n if abs(self.dnp) * ( self.np-self.np_req) > 0:\n t = self.tr\n self.dcenter = (1-t)*(med-self.center + self.u*(cc-med))\n self.x,self.y,self.z = self.center = self.center + self.dcenter",
"def _move_in_one_more_block():\n with tik_inst.for_range(0, sub_h_align_block_size) as sub_h_idx:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block * sub_h_idx],\n src[w_offset + w_size * sub_h_idx], 0, 1, sub_w_block, 0, 0)\n # in order to avoid dirty data when multiple core\n with tik_inst.for_range(0, data_cnt_one_block) as sub_h_idx_1:\n tik_inst.data_move(dst[sub_w_block * data_cnt_one_block *\n (sub_h_align_block_size + sub_h_idx_1)],\n src[w_offset +\n w_size * (sub_h_size - data_cnt_one_block + sub_h_idx_1)],\n 0, 1, sub_w_block, 0, 0)",
"def ShiftFrame(Frame, PixShift):\n \n import numpy as np\n \n F, R, C = Frame.shape\n \n if F > 1:\n msg = f\"'Frame' must be a 2D frame with shape (1, R, C) but has shape\"\\\n + f\" ({F}, {R}, {C}).\"\n \n raise Exception(msg)\n \n # Initialise ShiftedFrame:\n ShiftedFrame = np.zeros((1, R, C), dtype='uint')\n #ShiftedFrame = np.empty_like(Frame, dtype='uint') # this creates 42,932\n # unique values for some reason!\n \n #unique = UniqueItems(Nda=Frame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in Frame')\n #unique = UniqueItems(Nda=ShiftedFrame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in the initialised',\n # f'ShiftedFrame: {unique[:11]}...')\n \n di, dj, dk = PixShift\n \n ##ShiftedFrame[0, dj:, di:] = Frame[0, :-(1+dj), :-(1+di)]\n ##ShiftedFrame[0, :-(1+dj), :-(1+di)] = Frame[0, dj:, di:]\n #ShiftedFrame[0, :R-dj, :C-di] = Frame[0, dj:, di:]\n \n if di > 0 and dj > 0:\n ShiftedFrame[0, dj:, di:] = Frame[0, :-dj, :-di]\n \n elif di < 0 and dj < 0:\n ShiftedFrame[0, :dj, :di] = Frame[0, -dj:, -di:]\n \n elif di > 0 and dj < 0:\n ShiftedFrame[0, :dj, di:] = Frame[0, -dj:, :-di]\n \n elif di < 0 and dj > 0:\n ShiftedFrame[0, dj:, :di] = Frame[0, :-dj, -di:]\n \n elif di == 0 and dj > 0:\n ShiftedFrame[0, dj:, :] = Frame[0, :-dj, :]\n \n elif di == 0 and dj < 0:\n ShiftedFrame[0, :dj, :] = Frame[0, -dj:, :]\n \n elif di > 0 and dj == 0:\n ShiftedFrame[0, :, di:] = Frame[0, :, :-di]\n \n elif di < 0 and dj == 0:\n ShiftedFrame[0, :, :di] = Frame[0, :, -di:]\n \n elif di == 0 and dj == 0:\n ShiftedFrame[0] = Frame[0]\n \n #unique = UniqueItems(Nda=ShiftedFrame, NonZero=False)\n #print(f'\\n---> There are {len(unique)} unique items in the ShiftedFrame',\n # 'after shifting.')\n \n return ShiftedFrame",
"def yank(self):\r\n self.block.bucket_array.yank_cell(self)",
"def move_element(self,n_a,n_b):\n self.element_array.insert(n_b,self.element_array.pop(n_a))",
"def make_move(self, move):\n self.board[int(move) - 1] = self.nplayer",
"def Repeater(arr,n):\n new_arr = np.zeros((arr.shape[0]*n,arr.shape[1]),dtype=object)\n for i in range(0,arr.shape[0]):\n new_row = np.tile(arr[i,:],(n,1))\n new_arr[i*n:(i+1)*n,:] = new_row\n return new_arr",
"def move(self, direction):\n original_grid = []\n for row in self._grid:\n original_row = list(row)\n original_grid.append(original_row)\n steps = 0\n if direction == UP or direction == DOWN:\n steps = self._grid_height\n elif direction == LEFT or direction == RIGHT:\n steps = self._grid_width\n to_move = []\n for initial_cell in self._initial_cells[direction]:\n for step in range(steps):\n new_row = initial_cell[0] + step * OFFSETS[direction][0]\n new_column = initial_cell[1] + step * OFFSETS[direction][1]\n to_move.append(self._grid[new_row][new_column])\n to_move = merge(to_move)\n row = initial_cell[0]\n column = initial_cell[1]\n for step in range(steps):\n self._grid[row + OFFSETS[direction][0] * step][column + OFFSETS[direction][1] * step] = to_move[step]\n to_move = []\n if original_grid != self._grid:\n self.new_tile()",
"def Forward(Fin, z, sizenew, Nnew ):\n if z <= 0:\n raise ValueError('Forward does not support z<=0')\n Fout = Field.begin(sizenew, Fin.lam, Nnew, Fin._dtype)\n \n field_in = Fin.field\n field_out = Fout.field\n \n field_out[:,:] = 0.0 #default is ones, clear\n \n old_size = Fin.siz\n old_n = Fin.N\n new_size = sizenew #renaming to match cpp code\n new_n = Nnew\n\n on2 = int(old_n/2)\n nn2 = int(new_n/2) #read \"new n over 2\"\n dx_new = new_size/(new_n-1)\n dx_old = old_size/(old_n-1)\n #TODO again, dx seems better defined without -1, check this\n \n R22 = _np.sqrt(1/(2*Fin.lam*z))\n\n X_new = _np.arange(-nn2, new_n-nn2) * dx_new\n Y_new = X_new #same\n X_old = _np.arange(-on2, old_n-on2) * dx_old\n Y_old = X_old #same\n for i_new in range(new_n):\n x_new = X_new[i_new]\n \n P1 = R22*(2*(X_old-x_new)+dx_old)\n P3 = R22*(2*(X_old-x_new)-dx_old)\n Fs1, Fc1 = _fresnel(P1)\n Fs3, Fc3 = _fresnel(P3)\n for j_new in range(new_n):\n y_new = Y_new[j_new]\n \n P2 = R22*(2*(Y_old-y_new)-dx_old)\n P4 = R22*(2*(Y_old-y_new)+dx_old)\n Fs2, Fc2 = _fresnel(P2)\n Fs4, Fc4 = _fresnel(P4)\n \n C4C1=_np.outer(Fc4, Fc1) #out[i, j] = a[i] * b[j] \n C2S3=_np.outer(Fc2, Fs3) #-> out[j,i] = a[j]*b[i] here\n C4S1=_np.outer(Fc4, Fs1)\n S4C1=_np.outer(Fs4, Fc1)\n S2C3=_np.outer(Fs2, Fc3)\n C2S1=_np.outer(Fc2, Fs1)\n S4C3=_np.outer(Fs4, Fc3)\n S2C1=_np.outer(Fs2, Fc1)\n C4S3=_np.outer(Fc4, Fs3)\n S2S3=_np.outer(Fs2, Fs3)\n S2S1=_np.outer(Fs2, Fs1)\n C2C3=_np.outer(Fc2, Fc3)\n S4S1=_np.outer(Fs4, Fs1)\n C4C3=_np.outer(Fc4, Fc3)\n C4C1=_np.outer(Fc4, Fc1)\n S4S3=_np.outer(Fs4, Fs3)\n C2C1=_np.outer(Fc2, Fc1)\n \n Fr = 0.5 * field_in.real\n Fi = 0.5 * field_in.imag\n Temp_c = (Fr * (C2S3 + C4S1 + S4C1 + S2C3\n - C2S1 - S4C3 - S2C1 - C4S3)\n + Fi * (-S2S3 + S2S1 + C2C3 - S4S1\n - C4C3 + C4C1 + S4S3 - C2C1)\n + 1j * Fr *(-C4C1 + S2S3 + C4C3 - S4S3\n + C2C1 - S2S1 + S4S1 - C2C3)\n + 1j * Fi*(C2S3 + S2C3 + C4S1 + S4C1\n - C4S3 - S4C3 - C2S1 - S2C1))\n field_out[j_new, i_new] = Temp_c.sum() #complex elementwise sum\n Fout._IsGauss=False\n return Fout",
"def move(self, direction):\r\n # replace with your code\r\n row_dir = OFFSETS[direction][0]\r\n col_dir = OFFSETS[direction][1]\r\n \r\n if row_dir == 0:\r\n new_cells = self._cells\r\n new_dir = col_dir\r\n else:\r\n new_tuples = zip(*self._cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n new_dir = row_dir\r\n \r\n tmp_cells = []\r\n for lists in new_cells:\r\n lists = lists[::new_dir]\r\n merge_lists = merge(lists)\r\n tmp_cells.append(merge_lists[::new_dir])\r\n \r\n if row_dir == 0:\r\n self._cells = tmp_cells\r\n else:\r\n new_tuples = zip(*tmp_cells)\r\n new_cells = [list(item) for item in new_tuples]\r\n self._cells = new_cells\r\n \r\n self.new_tile()",
"def _extend_contiguous_traj_field(self, run_idx, traj_idx, field_path, field_data):\n\n traj_grp = self.h5['{}/{}/{}/{}'.format(RUNS, run_idx, TRAJECTORIES, traj_idx)]\n field = traj_grp[field_path]\n\n # make sure this is a feature vector\n assert len(field_data.shape) > 1, \\\n \"field_data must be a feature vector with the same number of dimensions as the number\"\n\n # of datase new frames\n n_new_frames = field_data.shape[0]\n\n # check the field to make sure it is not empty\n if all([i == 0 for i in field.shape]):\n\n # check the feature shape against the maxshape which gives\n # the feature dimensions for an empty dataset\n assert field_data.shape[1:] == field.maxshape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n # if it is empty resize it to make an array the size of\n # the new field_data with the maxshape for the feature\n # dimensions\n feature_dims = field.maxshape[1:]\n field.resize( (n_new_frames, *feature_dims) )\n\n # set the new data to this\n field[0:, ...] = field_data\n\n else:\n # make sure the new data has the right dimensions against\n # the shape it already has\n assert field_data.shape[1:] == field.shape[1:], \\\n \"field feature dimensions must be the same, i.e. all but the first dimension\"\n\n\n # append to the dataset on the first dimension, keeping the\n # others the same, these must be feature vectors and therefore\n # must exist\n field.resize( (field.shape[0] + n_new_frames, *field.shape[1:]) )\n # add the new data\n field[-n_new_frames:, ...] = field_data",
"def realign_image(arr, shift, angle=0):\n # if both shifts are integers, do circular shift; otherwise perform Fourier shift.\n if np.count_nonzero(np.abs(np.array(shift) - np.round(shift)) < 0.01) == 2:\n temp = np.roll(arr, int(shift[0]), axis=0)\n temp = np.roll(temp, int(shift[1]), axis=1)\n temp = temp.astype('float32')\n else:\n temp = fourier_shift(np.fft.fftn(arr), shift)\n temp = np.fft.ifftn(temp)\n temp = np.abs(temp).astype('float32')\n return temp",
"def move(self,move):\n for x in range(len(self.coord)):\n self.coord[x] = np.array([y+np.array(move) for y in self.coord[x]])\n return self",
"def shift(image,shift_x,shift_y):\n return np.roll(np.roll(image,shift_y,axis=0),shift_x,axis=1)",
"def offsetElements(self, i):\n\n #iterate over each tile and subtract\n #if the value is -1, indicating a blank tile, leave it as that\n for y in range(0, len(self.array)):\n for x in range(0, len(self.array[0])):\n if self.array[y][x] != -1:\n self.array[y][x] -= i",
"def rgbArray_move(self, rgbList, delay):\n # res\n\n res = self.rgbArrayOfs_move(0,rgbList,delay)\n return res",
"def DELAY(A, n):\r\n At = pivot_table(A)\r\n res = At.shift(n)\r\n res = stack_table(res)\r\n return res",
"def piecewise_transform(image, numcols=5, numrows=5, warp_left_right=10, warp_up_down=10, order=1):\n\n rows, cols = image.shape[0], image.shape[1]\n\n numcols = numcols\n numrows = numrows\n\n src_cols = np.linspace(0, cols, numcols, dtype=int)\n src_rows = np.linspace(0, rows, numrows, dtype=int)\n src_rows, src_cols = np.meshgrid(src_rows, src_cols)\n src = np.dstack([src_cols.flat, src_rows.flat])[0]\n\n src_rows_new = np.ndarray.transpose(src_rows)\n src_cols_new = np.ndarray.transpose(src_cols)\n # src_new = np.dstack([src_cols_new.flat, src_rows_new.flat])[0]\n\n dst_cols = np.ndarray(src_cols.shape)\n dst_rows = np.ndarray(src_rows.shape)\n for i in range(0, numcols):\n for j in range(0, numrows):\n if src_cols[i, j] == 0 or src_cols[i, j] == cols:\n dst_cols[i, j] = src_cols[i, j]\n else:\n dst_cols[i, j] = src_cols[i, j] + np.random.uniform(-1, 1) * warp_left_right\n\n if src_rows[i, j] == 0 or src_rows[i, j] == rows:\n dst_rows[i, j] = src_rows[i, j]\n else:\n dst_rows[i, j] = src_rows[i, j] + np.random.uniform(-1, 1) * warp_up_down\n\n dst = np.dstack([dst_cols.flat, dst_rows.flat])[0]\n\n # dst_rows_new = np.ndarray.transpose(dst_rows)\n # dst_cols_new = np.ndarray.transpose(dst_cols)\n # dst_new = np.dstack([dst_cols_new.flat, dst_rows_new.flat])[0]\n\n tform = transform.PiecewiseAffineTransform()\n tform.estimate(src, dst)\n\n img_new = transform.warp(image, tform, output_shape=(rows, cols), order=order, preserve_range=True)\n img_new = img_new.astype(image.dtype)\n \n return img_new",
"def move(self, direction):\n new_grid = []\n # get the indices of specific direction\n new_indices = self._grid_indices[direction]\n for cell in new_indices:\n lst = self.traversed_list(cell, direction)\n merged_list = merge(lst)\n new_grid.append(merged_list)\n \n adjusted_grid = adjust_grid(new_grid,direction)\n if self.is_changed(adjusted_grid):\n self.update_grid(adjusted_grid)\n self.new_tile()"
] | [
"0.73844004",
"0.6082966",
"0.58029294",
"0.5760416",
"0.5458749",
"0.5457597",
"0.54504377",
"0.5396676",
"0.5381811",
"0.53601795",
"0.51622343",
"0.508853",
"0.50696003",
"0.5024297",
"0.5003249",
"0.49621084",
"0.49588436",
"0.49356058",
"0.4930291",
"0.49255875",
"0.49201998",
"0.48937675",
"0.48927703",
"0.48697072",
"0.48549896",
"0.48480734",
"0.48192695",
"0.47968262",
"0.47925448",
"0.47841445"
] | 0.72734666 | 1 |
Note the camelcase name and unused variable. Bad bad bad. | def camelCaseFunc():
unused = 1 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def check_for_unused_names(self):\n for s in self.unused_names:\n self.warning(\"'%s' is unused.\"%s)\n\n# warns for param that specified with -c (but also if name gets defined in __main__,\n# e.g. by default_density=global_params.default_density in a script file\n## for name in self.params():\n## if name in self.context:\n## self.warning(\"'%s' still exists in global_params.context\"%name)\n\n # detect duplicate param value that wasn't used (e.g. specified with after script)\n for name,val in self.params().items():\n if name in self.context:\n if self.context[name]!=self.inspect_value(name):\n self.warning(\"'%s=%s' is unused.\"%(name,self.context[name]))",
"def not_capitalized(): # noqa: D416",
"def nice_name():\n\n pass",
"def name():\n\n pass",
"def unbound(name):",
"def test_instance_vars_have_valid_names(question):\n instance = question[\"instance\"]\n for name in instance.get(\"variables\", {}).keys():\n assert CAMEL_CASE_PATTERN.match(\n name\n ), \"variable {} not slouchingCamelCase\".format(name)",
"def name():\n pass",
"def name():\n pass",
"def test_var_names(var_name):\n assert isinstance(var_name, str)\n if standard_names.is_valid_name(var_name):\n standard_names.StandardName(var_name)\n else:\n warnings.warn(\"not a valid standard name: {name}\".format(name=var_name))",
"def _NiceNameToPreventCompilerErrors(self, attrname):\n # only emit the rhs of a multi part name e.g. undo.UndoItem will appear only as UndoItem\n if attrname.find(\".\") != -1:\n attrname = attrname.split(\".\")[-1] # take the last\n # Prevent compiler errors on the java side by avoiding the generating of java keywords as attribute names\n if attrname in javakeywords:\n attrname = \"_\" + attrname\n return attrname",
"def test_get_name_of_variable(self):\n name = Code()\n self.assertEqual(str(name), 'name')",
"def name(self) -> str: # pragma: no cover",
"def var_name ( self , name ) :\n if name in self.__var_names and not NameDuplicates.allowed() :\n self.warning ( 'The variable name \"%s\" is already defined!' % name )\n \n self.__var_names.add ( name )\n self.__local_names.add ( name )\n return name",
"def lower_case_really():",
"def name(self):",
"def name(self):",
"def name(self):",
"def name(self):",
"def __init__(self):\n self.__name = 'name'",
"def name(self):\n ...",
"def __getattribute__(self, name):\n if name in ('_special_names', '__dict__'):\n return super().__getattribute__(name)\n if hasattr(self, '_special_names'):\n if name in self._special_names:\n raise AttributeError(\n f\"{name} is a reserved variable name and it cannot be read\")\n return super().__getattribute__(name)",
"def dummy(self):\n pass",
"def verify_naming(self, reserved):\n for w in reserved:\n if w in self.decisions:\n raise ParseError('Duplicate variable/block name \"{}\"'.format(w))",
"def __init__(self):\n FooBar = None\n Foo = None\n FOO = None\n foo_bar = None",
"def var(self, name):\n raise NotImplementedError",
"def my_name(self):\n# different block has different namespace\n# local namesapce for a function is created when the function is called, \n# is deleted when the function exit or expception happens and not be handled\n# by the function.\n local_name = 'a'\n my_life = \"alive\"\n print local_name",
"def name(self, name):\n pass",
"def verif_unused(sv):\r\n if Unused in sv.Object and sv.Object[Unused].value: # check presence and integrity of unused list\r\n unusedlist=[applied (x, Unused) for x in sv.Object[Unused].value]\r\n for nam in unusedlist: # check each unused declaration\r\n nod=sv.Object[nam]\r\n if sv.Namedpinlist.get(nam)==[nod.effects]: continue # pin is just named\r\n elif applied(nam, Output):\r\n if len(nod.effects)==1: # only effect is output list\r\n if len(nod.causes)<=2: continue\r\n if len(nod.causes)<=4 and Faux in nod.causes and Ewent in nod.causes: continue # allow 'take event'\r\n elif nod.causes or nod.effects: # object should have no cause and no effect\r\n print(Err_unused_obj) \r\n print(str(nam))\r\n sv.Current_clause=None, None, None\r\n raise ReferenceError",
"def variable(self):",
"def __init__(self, a):\n self.__name__ = a"
] | [
"0.6454768",
"0.63859814",
"0.61285913",
"0.6127803",
"0.5935644",
"0.59143764",
"0.58966666",
"0.58966666",
"0.58946407",
"0.58621436",
"0.5839409",
"0.5818854",
"0.5737015",
"0.5730995",
"0.57293755",
"0.57293755",
"0.57293755",
"0.57293755",
"0.57196563",
"0.57148516",
"0.5700287",
"0.56968695",
"0.56788254",
"0.56771505",
"0.567612",
"0.5672481",
"0.56635886",
"0.5631864",
"0.560034",
"0.55763716"
] | 0.6955739 | 0 |
Whether or not the Window supports user resizing | def resizable(self):
return self._frame._root.resizable() == '1 1' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def IsResizeable(self):\r\n \r\n return self.HasFlag(self.optionResizable)",
"def ev_windowsizechanged(self, event: WindowResized) -> None:",
"def isSelectionResizing(self):\n return self.resizing",
"def ev_windowsizechanged(self, event: tcod.event.WindowResized) -> T | None:",
"def ev_windowresized(self, event: WindowResized) -> None:",
"def AuiManager_HasLiveResize(manager):\r\n\r\n # With Core Graphics on Mac, it's not possible to show sash feedback,\r\n # so we'll always use live update instead.\r\n \r\n if wx.Platform == \"__WXMAC__\":\r\n return True\r\n else:\r\n return (manager.GetAGWFlags() & AUI_MGR_LIVE_RESIZE) == AUI_MGR_LIVE_RESIZE",
"def check_resize(self):\n yx = self.screen.getmaxyx()\n if self.current_yx != yx:\n self.current_yx = yx\n self.resize(yx)",
"def __window_resizeTo(self, iWidth, iHeight):\n pass",
"def check_window_size():\n \n wight = 870\n height = 519\n \n window = win32gui.FindWindow(MINECRAFT_CLASS_NAME, MINECRAFT_TITLE + MINECRAFT_VERSION)\n x0, y0, x1, y1 = win32gui.GetWindowRect(window)\n # x0 and y0 are initial points, upper left corner and lower left corner\n # then we need the difference between upper left corner and upper right corner to get the wight and\n # the difference between lower left corner and lower right corner to get the height\n \n w = x1 - x0\n h = y1 - y0\n \n if w is not wight or h is not height:\n win32gui.MoveWindow(window, x0, y0, wight, height, True)",
"def ev_windowmaximized(self, event: WindowEvent) -> None:",
"def ev_windowresized(self, event: tcod.event.WindowResized) -> T | None:",
"def ev_windowminimized(self, event: WindowEvent) -> None:",
"def get_window_size(self):\n raise NotImplementedError",
"def HasMaximizeButton(self):\r\n \r\n return self.HasFlag(self.buttonMaximize)",
"def resize_x(self) -> bool:\n raise NotImplementedError",
"def ev_windowmaximized(self, event: tcod.event.WindowEvent) -> T | None:",
"def getwinsize(self):",
"def ev_windowminimized(self, event: tcod.event.WindowEvent) -> T | None:",
"def fullscreen(self) -> bool:\n return bool(self.tk_ref.wm_attributes('-fullscreen'))",
"def __window_resizeBy(self, xDelta, yDelta):\n pass",
"def IsMaximized(self):\r\n \r\n return self.HasFlag(self.optionMaximized)",
"def _get_window_width(self):",
"def sizeHint( self ):\n return self.window_size",
"def IsFixed(self):\r\n \r\n return not self.HasFlag(self.optionResizable)",
"def handleResize(self):\n pass",
"def scale(self, _: Application) -> bool:\n return False",
"def resize_y(self) -> bool:\n raise NotImplementedError",
"def CanUseModernDockArt(self):\r\n\r\n if not _winxptheme:\r\n return False\r\n\r\n # Get the size of a small close button (themed)\r\n hwnd = self._frame.GetHandle()\r\n hTheme = winxptheme.OpenThemeData(hwnd, \"Window\")\r\n\r\n if not hTheme:\r\n return False\r\n\r\n return True",
"def Resizable(self, resizable=True):\r\n \r\n return self.SetFlag(self.optionResizable, resizable)",
"def maximize_option():\n Width=MaxWidth\n Height=MaxHeight - WinTitle -WinBorder\n PosX=LeftPadding\n PosY=TopPadding\n move_active(PosX,PosY,Width,Height)\n raise_window(\":ACTIVE:\")"
] | [
"0.7450744",
"0.67073464",
"0.6607859",
"0.65714055",
"0.6563378",
"0.6560024",
"0.64793974",
"0.6461006",
"0.63823223",
"0.63217413",
"0.624592",
"0.62382627",
"0.6228985",
"0.61842704",
"0.6160696",
"0.6158254",
"0.61509866",
"0.61256456",
"0.610373",
"0.60751265",
"0.60684496",
"0.6063032",
"0.6051795",
"0.6042474",
"0.6041099",
"0.6008556",
"0.5990024",
"0.5957024",
"0.59343606",
"0.5909357"
] | 0.7568574 | 0 |
The list of all turtles attached to this Window This attribute may not be altered directly | def turtles(self):
return self._turtles[:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def turtles(self):\n return self._turtles",
"def turbines(self):\n return self.turbine_map.turbines",
"def getturtle(self):\n return self",
"def thermostats(self):\n\n return self._thermostats",
"def lights(self):\n return list(self.GetLights())",
"def get_light_list(self):\n return self.light_array",
"def lights(self) -> List[dict]:\n return self.items_by_domain(\"light\")",
"def swing_list(self):\n return self._swing_list",
"def graphicsItems(self):\n return self.ctrl.getGraphicsItems()",
"def getAllTriStimulus(self):\n return self.tristimulus",
"def terminals(self) -> AbstractSet[Terminal]:\n return self._terminals",
"def wires(self):\n return [o.wires for o in self.obs]",
"def getListOfAllInstantiatedElements(self):\n return _libsbml.Submodel_getListOfAllInstantiatedElements(self)",
"def get_triples(self):\n return [\n triple\n for uid, cuds_object in self._registry.items()\n for triple in cuds_object.get_triples()\n ]",
"def _all_subnodes(self):\n return self.__dict__.values()",
"def listglobal(self):\n return list(self.attributes.keys())",
"def items(self) -> List[RadioStation]:\n return self._items",
"def terminals(self) -> List[Terminal]:\n return [terminal for prim in self.primitives for terminal in prim._terminals]",
"def get_all_thermals(self):\n return self._thermal_list",
"def trios(self):\n return self._trios",
"def byteruns(self):\n return self._byteruns",
"def drawables(self):\n\treturn self._Widget__w['drawables']",
"def getRaceList(self):\n\t\tl = []\n\t\tfor r in self.races:\n\t\t\tl.append(r.name)\n\t\treturn l",
"def get_symbols_list(self):\n return self.symbols_list",
"def items(self):\n return self.root.items()",
"def getAll(self):\n return self.__lst",
"def getTouchdowns(self):\n return self.touchdowns",
"def _get_all_spectra(self):\n pass",
"def get_rings(self):\n return iter(self)",
"def list(self):\n return self.cell.objects+self.cell.tempObjects"
] | [
"0.8411543",
"0.6130211",
"0.5962633",
"0.59047854",
"0.5882312",
"0.5679309",
"0.56639826",
"0.5651566",
"0.5635582",
"0.5587322",
"0.5562676",
"0.5513544",
"0.5510087",
"0.5497294",
"0.54633754",
"0.54598933",
"0.5458061",
"0.5453143",
"0.5407893",
"0.53814805",
"0.53572404",
"0.5338319",
"0.53330684",
"0.5332764",
"0.5330036",
"0.53272617",
"0.5320306",
"0.53073496",
"0.52968353",
"0.5290663"
] | 0.8240958 | 1 |
The list of all pens attached to this Window This attribute may not be altered directly | def pens(self):
return self._pencils[:] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def drawables(self):\n\treturn self._Widget__w['drawables']",
"def getPixels(self):\n\t\treturn self.strip.ledsColorBuffer",
"def swing_list(self):\n return self._swing_list",
"def getPixelsBuffer(self):\n\t\treturn self.leds",
"def get_list_powers(self):\r\n return self.ps",
"def getMenuItemPixels(cls):\n return cls.menuItemPixels",
"def graphicsItems(self):\n return self.ctrl.getGraphicsItems()",
"def window_handles(self):\n pass",
"def children_list(self):\n return [\n # self.notify,\n # self.snap_multiplier,\n # self.range_low, self.range_high,\n # self.activity_threshold\n ]",
"def make_list(self):\n return list(self.widget_dict.values())",
"def prvs(self): \n return self._link_reg.prvs",
"def swing_list(self):\n return None",
"def gpio_properties(self):\n res = self._dll.JLINK_EMU_GPIO_GetProps(0, 0)\n if res < 0:\n raise errors.JLinkException(res)\n\n num_props = res\n buf = (structs.JLinkGPIODescriptor * num_props)()\n res = self._dll.JLINK_EMU_GPIO_GetProps(ctypes.byref(buf), num_props)\n if res < 0:\n raise errors.JLinkException(res)\n\n return list(buf)",
"def colors(self):\r\n\t\treturn self._colors",
"def listdimension(self):\n return list(self.dimensions.keys())",
"def pids(self):\r\n return copy(self._pids)",
"def get_all_drawables(self): \n drawables = []\n if len(self.component_list) > 0:\n for c in self.component_list:\n drawables.append(c.get_drawables())\n return drawables",
"def port_list(self):\n return self._port_list",
"def power_pumps(self):\n return self._link_reg.power_pumps",
"def pumps(self): \n return self._link_reg.pumps",
"def get_components_drawables(self):\n # print self.component_list\n print len(self.component_list)\n for c in self.component_list:\n return c.get_drawables()",
"def colors(self):\n return self[\"colors\"]",
"def colors(self):\n return self[\"colors\"]",
"def colors(self):\n return self._colors",
"def knobs(self):\n return self.Knobs(self)",
"def get_Pbs(self):\r\n return self.Pbs",
"def GetAttributes(self, pane):\r\n\r\n attrs = []\r\n attrs.extend([pane.window, pane.frame, pane.state, pane.dock_direction,\r\n pane.dock_layer, pane.dock_pos, pane.dock_row, pane.dock_proportion,\r\n pane.floating_pos, pane.floating_size, pane.best_size,\r\n pane.min_size, pane.max_size, pane.caption, pane.name,\r\n pane.buttons, pane.rect, pane.icon, pane.notebook_id,\r\n pane.transparent, pane.snapped, pane.minimize_mode])\r\n\r\n return attrs",
"def ppix(self):\n return self._ppix",
"def get_light_list(self):\n return self.light_array",
"def lights(self):\n return list(self.GetLights())"
] | [
"0.6194239",
"0.5969787",
"0.59447014",
"0.58146423",
"0.58107215",
"0.5731491",
"0.5615673",
"0.557614",
"0.5560097",
"0.55471444",
"0.5541459",
"0.5525621",
"0.54055196",
"0.53989977",
"0.5346334",
"0.53336155",
"0.53113025",
"0.5300297",
"0.52949387",
"0.52927554",
"0.5269405",
"0.52664447",
"0.52664447",
"0.5252926",
"0.5240103",
"0.52373326",
"0.5233286",
"0.51972896",
"0.51836956",
"0.5163817"
] | 0.7543301 | 0 |
Add a turtle to this window. | def _addTurtle(self,turt):
assert (type(turt) == Turtle), "Parameter %s is not a valid Turtle object" % `turt`
self._turtles.append(turt) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_new(event):\n t = turtle.Turtle()\n screen_w, screen_h = t.screen._window_size()\n t.goto(event.x - screen_w//2, screen_h //2 - event.y)",
"def add_body(self):\r\n new_turtle = generate_turtle()\r\n new_turtle.goto(self.all_turtles[-1].position())\r\n self.all_turtles.append(new_turtle)",
"def setTurtle(t):\r\n t.pu()\r\n t.goto(initialCoordinates())",
"def init_turtle():\n turtle.up()\n turtle.home()",
"def initialize(turtle_shape, bg_color, turtle_color, turtle_speed):\n turtle_instance = turtle.Turtle()\n turtle_instance.shape(turtle_shape)\n turtle.bgcolor(bg_color)\n turtle_instance.color(turtle_color)\n turtle_instance.speed(turtle_speed)\n return turtle_instance",
"def make_window(colr, ttle):\n w = turtle.Screen()\n w.bgcolor(colr)\n w.title(ttle)\n return w",
"def make_window(colr, ttle):\n w = turtle.Screen()\n w.bgcolor(colr)\n w.title(ttle)\n w.setup(width=1800, height=600)\n return w",
"def cool_turtle():\n # Make the TurtleWindow.\n window = rg.TurtleWindow()\n\n # Make the SimpleTurtle.\n cool_turtle = rg.SimpleTurtle('turtle')\n cool_turtle.pen = rg.Pen('forest green', 1) # Try thickness 5 too\n cool_turtle.speed = 1 # Slow\n\n # Move the SimpleTurtle to her starting position.\n start_at = rg.Point(100, -50)\n cool_turtle.pen_up()\n cool_turtle.go_to(start_at)\n cool_turtle.pen_down()\n\n # Set up some parameters that control the nature of the shape drawn.\n size = 100 # Try 150 too\n angle = 1 # Try 20 too\n iterations = 360 # Try 90 too\n\n # Store the animation speed (to reset it later).\n tracer_n, tracer_d = window.tracer(), window.delay()\n\n # Make the animation go much faster.\n # First number: bigger means faster.\n # Second number: bigger means slower.\n window.tracer(5, 5)\n\n for _ in range(iterations):\n cool_turtle.right(angle)\n cool_turtle.draw_square(size)\n\n # Reset the animation to its original speed.\n window.tracer(tracer_n, tracer_d)\n\n window.close_on_mouse_click()",
"def _drawturtle(self):\n screen = self.screen\n shape = screen._shapes[self.Myturtle.shapeIndex]\n ttype = shape._type\n titem = self.Myturtle._item\n if self._shown and screen._updatecounter == 0 and screen._tracing > 0:\n self._hidden_from_screen = False\n tshape = shape._data\n if ttype == \"polygon\":\n if self._resizemode == \"noresize\": w = 1\n elif self._resizemode == \"auto\": w = self._pensize\n else: w =self._outlinewidth\n shape = self._polytrafo(self._getshapepoly(tshape))\n fc, oc = self._fillcolor, self._pencolor\n screen._drawpoly(titem, shape, fill=fc, outline=oc,\n width=w, top=True)\n elif ttype == \"image\":\n screen._drawimage(titem, self._position, tshape)\n elif ttype == \"compound\":\n for item, (poly, fc, oc) in zip(titem, tshape):\n poly = self._polytrafo(self._getshapepoly(poly, True))\n screen._drawpoly(item, poly, fill=self._cc(fc),\n outline=self._cc(oc), width=self._outlinewidth, top=True)\n else:\n if self._hidden_from_screen:\n return\n if ttype == \"polygon\":\n screen._drawpoly(titem, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n elif ttype == \"image\":\n screen._drawimage(titem, self._position,\n screen._shapes[\"blank\"]._data)\n elif ttype == \"compound\":\n for item in titem:\n screen._drawpoly(item, ((0, 0), (0, 0), (0, 0)), \"\", \"\")\n self._hidden_from_screen = True",
"def make_turtle(color, size):\n t = turtle.Turtle()\n t.color(color)\n t.pensize(size)\n return t",
"def _spawn_turtle(self, trt_x, trt_y, name=None):\n\n\t\tif name is None or name == \"\":\n\t\t\tname = self._create_unique_turtle_name()\n\t\telif self._has_turtle(name):\n\t\t\treturn \"\"\n\n\t\tturtle = Turtle(name, Point(trt_x, trt_y))\n\t\tself._turtles[name] = turtle\n\n\t\trospy.loginfo(\"New turtle [%s] at x=[%d], y=[%d]\", name, trt_x, trt_y)\n\n\t\treturn name",
"def make_turtle(colr, sz):\n t = turtle.Turtle()\n t.color(colr)\n t.pensize(sz)\n return t",
"def turtle(self,turtleType):\n if self.turtleType == turtleType:\n return\n if self.turtleType and self.turtleType != PLAYER:\n self.mc.removeEntity(self.turtleId)\n self.turtleType = turtleType\n if turtleType == PLAYER:\n self.turtleId = None\n elif turtleType:\n self.turtleId = self.mc.spawnEntity(turtleType,\n self.position.x,self.position.y,self.position.z,\n \"{NoAI:1}\")\n self.setEntityCommands()\n self.positionOut()\n self.directionOut()",
"def turtle_setup():\n # ___ ___ _ _ ___ _____ __ __ ___ ___ ___ _____ __\n # | \\ / _ \\ | \\| |/ _ \\_ _| | \\/ |/ _ \\| \\_ _| __\\ \\ / /\n # | |) | (_) | | .` | (_) || | | |\\/| | (_) | |) | || _| \\ V /\n # |___/ \\___/ |_|\\_|\\___/ |_| |_| |_|\\___/|___/___|_| |_|\n # _____ _ _ ___ ___ ___ _ _ _ _ ___ _____ ___ ___ _ _\n # |_ _| || |_ _/ __| | __| | | | \\| |/ __|_ _|_ _/ _ \\| \\| |\n # | | | __ || |\\__ \\ | _|| |_| | .` | (__ | | | | (_) | .` |\n # |_| |_||_|___|___/ |_| \\___/|_|\\_|\\___| |_| |___\\___/|_|\\_|\n #\n # Create the turtle graphics screen and set a few basic properties.\n screen = turtle.Screen()\n screen.setup( WIDTH, HEIGHT, MARGIN, MARGIN )\n screen.bgcolor( \"SkyBlue\" )\n\n # Create two turtles, one for drawing and one for writing.\n artist = turtle.Turtle()\n writer = turtle.Turtle()\n\n # Change the artist turtle's shape so the artist and writer are distinguishable.\n artist.shape( \"turtle\" )\n\n # Make the animation as fast as possible and hide the turtles.\n if DRAW_FAST:\n screen.delay( 0 )\n artist.hideturtle()\n artist.speed( \"fastest\" )\n writer.hideturtle()\n writer.speed( \"fastest\" )\n\n # Set a few properties of the writing turtle useful since it will only be writing.\n writer.setheading( 90 ) # Straight up, which makes it look sort of like a cursor.\n writer.penup() # A turtle's pen does not have to be down to write text.\n writer.setposition( 0, HEIGHT // 2 - FONT_SIZE * 2 ) # Centered at top of the screen.\n\n return screen, artist, writer",
"def make_turtle(color, size):\n t = turtle.Turtle()\n t.color(color)\n t.pensize(size)\n t.hideturtle() # do not show turtle\n t.speed(0) # 0 - 10 scale, 0 is fastest\n return t",
"def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)",
"def draw_petal():\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)\n turtle.forward(30)\n turtle.left(45)\n turtle.forward(30)\n turtle.left(135)",
"def add_donut(self):\n self.scenes[self.current_scene].add_object(Donut())\n self.redraw()",
"def make_window(color, title):\n w = turtle.Screen()\n w.bgcolor(color)\n w.title(title)\n return w",
"def draw_shape(self, r=0, g=0, b=0): # black is the default color\r\n turtles= turtle.Turtle()\r\n turtles.speed(0) # Makes the turtle speed up\r\n turtles.color(r, g, b)\r\n turtles.showturtle()\r\n turtles.penup()\r\n turtles.pendown()\r\n\r\n # draws the Shape to the screen\r\n\r\n for i in range(self.num_sides):\r\n turtles.forward(self.side_length)\r\n turtles.left(360/(self.num_sides))\r\n turtles.hideturtle()",
"def _prepare_turtle():\n turtle.setup(width=screen_width)\n turtle.shape(turtle_shape)\n turtle.title(title)",
"def init():\n turtle.setworldcoordinates(-WINDOW_WIDTH / 2, -WINDOW_WIDTH / 2,\n WINDOW_WIDTH / 2, WINDOW_HEIGHT / 2)\n\n turtle.up()\n turtle.setheading(0)\n turtle.title('squares')\n pass",
"def up():\n turtleTmp.penup()",
"def screen_setup(screen_size):\n window = turtle.Screen()\n window.bgcolor(\"black\")\n window.title(\"Maze Game\")\n window.setup(screen_size, screen_size)",
"def main():\n tortue_1 = turtle.Turtle()\n tortue_1.shape(\"turtle\")\n tortue_1.color(\"aquamarine4\")\n longueur = 200\n largeur = 200\n nbre_carres = 3\n angle_entre_carres = 15\n for i in range(nbre_carres):\n trace_rectangle(tortue_1, longueur, largeur)\n tortue_1.left(angle_entre_carres * (i + 1))\n\n turtle.exitonclick() # Empêche la fenêtre de se fermer automatiquement à la fin du tracé",
"def draw_flower():\n turtle.right(45)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(90)\n draw_petal()\n turtle.right(135)\n turtle.forward(150)",
"def drawRectangle_1():\n Lucia.color(\"green\",\"yellow\") # Sets the pen color to green and fill color to yellow\n Lucia.seth(90) # Set the initial orientation of the turtle to 0 degrees\n Lucia.begin_fill()\n Lucia.forward(50) # Move the turtle forward by 50 units in the direction that it was pointing\n Lucia.left(90) # Turn the turtle left by 90 degrees relative to the direction it was pointing\n Lucia.forward(100) # Move the turtle forward by 100 units\n Lucia.left(90)\n Lucia.forward(50)\n Lucia.left(90)\n Lucia.forward(100)\n Lucia.left(90) # Make sure the turtle is oriented back to its initial orientation\n Lucia.end_fill()",
"def shapes():\r\n turtle.up()\r\n turtle.forward(500)\r\n turtle.down()\r\n draw_hexagon()\r\n draw_square()\r\n draw_triangle()",
"def __init__(self):\r\n pen.up()\r\n pen.setheading(0)\r\n pen.hideturtle()\r\n turtle.title(\"My name\")\r\n pen.speed(0)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)",
"def main():\n # Your code here\n draw_graph(turtle, -500, -200, 0)"
] | [
"0.68280035",
"0.6453635",
"0.6225176",
"0.6220096",
"0.5967827",
"0.59463257",
"0.5936159",
"0.59193295",
"0.5886304",
"0.58368546",
"0.5714807",
"0.5633327",
"0.5586595",
"0.55554694",
"0.552829",
"0.5502977",
"0.5502977",
"0.5468977",
"0.5448978",
"0.5388961",
"0.5375666",
"0.53516215",
"0.5297699",
"0.5274558",
"0.52661216",
"0.5258611",
"0.5241604",
"0.5207058",
"0.5205266",
"0.5188304"
] | 0.7149174 | 0 |
Add a pen to this window. | def _addPen(self,pen):
assert (type(pen) == Pen), "Parameter %s is not a valid graphics pen" % `turt`
self._pencils.append(pen) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pen(self, width=None, rgb=None, alpha=None):\n self.call('pen', width, rgb, alpha)",
"def pen(self, pen=None, **pendict):\n _pd = {\"shown\" : self._shown,\n \"pendown\" : self._drawing,\n \"pencolor\" : self._pencolor,\n \"fillcolor\" : self._fillcolor,\n \"pensize\" : self._pensize,\n \"speed\" : self._speed,\n \"resizemode\" : self._resizemode,\n \"stretchfactor\" : self._stretchfactor,\n \"outline\" : self._outlinewidth,\n \"tilt\" : self._tilt\n }\n\n if not (pen or pendict):\n return _pd\n\n if isinstance(pen, dict):\n p = pen\n else:\n p = {}\n p.update(pendict)\n\n _p_buf = {}\n for key in p:\n _p_buf[key] = _pd[key]\n\n if self.undobuffer:\n self.undobuffer.push((\"pen\", _p_buf))\n\n newLine = False\n if \"pendown\" in p:\n if self._drawing != p[\"pendown\"]:\n newLine = True\n if \"pencolor\" in p:\n if isinstance(p[\"pencolor\"], tuple):\n p[\"pencolor\"] = self._colorstr((p[\"pencolor\"],))\n if self._pencolor != p[\"pencolor\"]:\n newLine = True\n if \"pensize\" in p:\n if self._pensize != p[\"pensize\"]:\n newLine = True\n if newLine:\n self._newLine()\n if \"pendown\" in p:\n self._drawing = p[\"pendown\"]\n if \"pencolor\" in p:\n self._pencolor = p[\"pencolor\"]\n if \"pensize\" in p:\n self._pensize = p[\"pensize\"]\n if \"fillcolor\" in p:\n if isinstance(p[\"fillcolor\"], tuple):\n p[\"fillcolor\"] = self._colorstr((p[\"fillcolor\"],))\n self._fillcolor = p[\"fillcolor\"]\n if \"speed\" in p:\n self._speed = p[\"speed\"]\n if \"resizemode\" in p:\n self._resizemode = p[\"resizemode\"]\n if \"stretchfactor\" in p:\n sf = p[\"stretchfactor\"]\n if isinstance(sf, (int, float)):\n sf = (sf, sf)\n self._stretchfactor = sf\n # if \"shearfactor\" in p:\n # self._shearfactor = p[\"shearfactor\"]\n if \"outline\" in p:\n self._outlinewidth = p[\"outline\"]\n if \"shown\" in p:\n self._shown = p[\"shown\"]\n if \"tilt\" in p:\n self._tilt = p[\"tilt\"]\n \n self._update()",
"def SetConnectionPen(self, pen):\r\n\r\n self._dottedPen = pen\r\n self._dirty = True",
"def SetPen(*args):\n return _gdi_.GraphicsContext_SetPen(*args)",
"def SetPen(*args, **kwargs):\n return _gdi_.DC_SetPen(*args, **kwargs)",
"def setPen(self, *args, **kwargs):\n if kwargs == {} and (args == () or args == ('default',)):\n self.opts['pen'] = fn.mkPen(getConfigOption('foreground'))\n else:\n self.opts['pen'] = fn.mkPen(*args, **kwargs)\n\n self.picture = None\n self.update()",
"def SetPen(*args, **kwargs):\n return _gdi_.PseudoDC_SetPen(*args, **kwargs)",
"def penup(self):\n if not self._drawing:\n return\n self.pen(pendown=False)",
"def set_pen_color(self, color: tuple) -> Rectangle:\n self.pen.color = color\n return self",
"def test_set_pen(self):\n painter = biotracker.QPainter()\n painter.setPen(100, 50, 30, 33)\n self.assertEqual(\"p(100,50,30,33)\", painter.to_msg())",
"def SetBorderPen(self, pen):\r\n\r\n self._borderPen = pen\r\n self.RefreshSelected()",
"def pensize(self, width):\n self._penwidth = width",
"def __init__(self, *args, **kwargs):\n _gdi_.GraphicsPen_swiginit(self,_gdi_.new_GraphicsPen(*args, **kwargs))",
"def setPenColor( self, color ):\n self._penColor = QColor(color)\n self.setDirty()",
"def CreatePen(*args, **kwargs):\n return _gdi_.GraphicsRenderer_CreatePen(*args, **kwargs)",
"def CreatePen(*args, **kwargs):\n return _gdi_.GraphicsContext_CreatePen(*args, **kwargs)",
"def draw(self, renderer):\n renderer.drawRect(pyui.colors.black, self.windowRect)\n renderer.drawText( \"Strokes: %d\" % len(self.strokes), (650,50), pyui.colors.white)\n for start, end, color in self.strokes:\n renderer.drawLine(start[0], start[1], end[0], end[1], color)",
"def add_draw(self, draw):\n self.draws.append(draw)",
"def add_brush(self, item: 'Solid') -> None:\n self.brushes.append(item)",
"def draw(self, win):\n self.rect.draw(win)\n self.text.draw(win)",
"def test_set_pen_noalpha(self):\n painter = biotracker.QPainter()\n painter.setPen(100, 50, 30)\n self.assertEqual(\"p(100,50,30,255)\", painter.to_msg())",
"def setSymbol(self, \n symbolStyle=None, \n brushColor=None, brushStyle=None, \n penColor=None, penWidth=None, penStyle=None, \n symbolHeight=None, symbolWidth=None):\n for item in self.__selectedCurves:\n oldSymbol = item.symbol()\n if symbolStyle is None:\n symbolStyle = oldSymbol.style()\n if brushColor is None:\n brushColor = oldSymbol.brush().color()\n if brushStyle is None:\n brushStyle = oldSymbol.brush().style()\n if penColor is None:\n penColor = oldSymbol.pen().color()\n if penWidth is None:\n penWidth = oldSymbol.pen().width()\n if penStyle is None:\n penStyle = oldSymbol.pen().style()\n if symbolHeight is None:\n symbolHeight = oldSymbol.size().height()\n if symbolWidth is None:\n symbolWidth = oldSymbol.size().width()\n pen = QtGui.QPen(penColor, penWidth, penStyle)\n symbol = Qwt.QwtSymbol(symbolStyle, oldSymbol.brush(), pen, QtCore.QSize(width, height)) \n item.setSymbol(symbol)\n self.replot()",
"def draw_but(self, window):\n # draws the rectangular button\n p1 = graphics.Point(self.cen_point_x - self.width / 2, \n self.cen_point_y - self.height / 2)\n p2 = graphics.Point(self.cen_point_x + self.width / 2, \n self.cen_point_y + self.height / 2)\n self.button = graphics.Rectangle(p1, p2)\n self.button.setOutline(\"Orange\")\n self.button.draw(window)\n \n # draws the text on the button\n self.text.draw(window)",
"def draw_s(self):\r\n pen.down()\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(20)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.left(90)\r\n pen.forward(50)",
"def draw(self):\n if self.is_clicked:\n pg.draw.circle(self.window, self.color, (self.x, self.y), self.r, 0)\n else:\n pg.draw.circle(self.window, self.color, (self.x, self.y), self.r, 1)",
"def __init__(self, *args, **kwargs):\n _gdi_.Pen_swiginit(self,_gdi_.new_Pen(*args, **kwargs))",
"def paint(self):\r\n self.win.bkgd(\" \", COLOR_PAIR[\"con_text\"])",
"def draw_a(self):\r\n pen.down()\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.right(90)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(20)\r\n pen.right(90)\r\n pen.down()\r\n pen.forward(40)\r\n pen.up()\r\n pen.left(90)\r\n pen.forward(20)\r\n pen.left(90)\r\n pen.forward(50)",
"def draw_n(self):\r\n pen.down()\r\n pen.left(90)\r\n pen.forward(40)\r\n pen.right(135)\r\n pen.forward(1.414*40)\r\n pen.left(135)\r\n pen.forward(40)\r\n pen.up()\r\n pen.back(40)\r\n pen.right(90)\r\n pen.back(40)\r\n pen.forward(50)",
"def pencolor(self, *args):\n if args:\n color = self._colorstr(args)\n if color == self._pencolor:\n return\n self.pen(pencolor=color)\n else:\n return self._color(self._pencolor)"
] | [
"0.6879283",
"0.6544037",
"0.6461168",
"0.63628376",
"0.629559",
"0.62903273",
"0.6202111",
"0.61660004",
"0.61124593",
"0.5987985",
"0.5954313",
"0.59414625",
"0.58582664",
"0.5821973",
"0.57936865",
"0.5761476",
"0.5666378",
"0.55963165",
"0.55450845",
"0.54638934",
"0.5450481",
"0.5449085",
"0.5416598",
"0.53866947",
"0.53828204",
"0.53809124",
"0.53408736",
"0.53077817",
"0.52982634",
"0.52855396"
] | 0.7343928 | 0 |
Remove a pen from this window. | def _removePen(self,pen):
if pen in self._pencils:
self._pencils.remove(pen) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __del__(self):\n self._screen._removePen(self)\n del self._turtle",
"def remove(self) -> None:\n self.map.remove_brush(self)",
"def removePick(self):\n self.pnt = None\n vtkRenWin.delMarker(self.renWin)",
"def remove_brush(self, brush: 'Solid') -> None:\n try:\n self.brushes.remove(brush)\n except ValueError:\n pass # Already removed.",
"def penup(self):\n if not self._drawing:\n return\n self.pen(pendown=False)",
"def delwin(self):\n\t\tfor c in self.components:\n\t\t\tc.delwin()\n\t\tself.win = None",
"def RemoveShape(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_RemoveShape(self, *args)",
"def remove_curve(self, name):\n self._curve_reg.__delitem__(name)",
"def remove_drawing_rect(self):\n self.drawing_rect = QPolygonF()\n if self.connecting_rect:\n self.connecting_rect.setVisible(False)\n self.connecting_rect = None\n self.first_draw = True",
"def delete_current_shape(self):\n print(\"deleting shape!\")\n self.shapes.remove(self.current_shape)\n self.current_shape = None\n self.changed()",
"def remove_animation(attr):\n pm.cutKey(attr, clear=True)",
"def undraw(self):\n \n if not self.canvas: return\n if not self.canvas.isClosed():\n #self.canvas.delete(self.id)\n _tkExec(self.canvas.delete, self.id)\n if self.canvas.autoflush:\n #_root.update()\n _tkCall(_root.update)\n pass\n self.canvas = None\n self.id = None",
"def removeDisplayOverrides(self, primPath):\n with self.editInPrimStateLayer():\n self._stage.RemovePrim(primPath)",
"def removeScene(self):\n del self.scene, self.imgPixmapItem",
"def OnRemoveAutomation(self, event, automation):\n\n self.app.RemoveAutomation(automation)\n for child in self.GetChildren():\n child.Destroy()\n\n self.Draw()",
"def __del__(self):\n self.clear()\n self._screen._removeTurtle(self)\n del self._turtle",
"def erase_plot(self, line_position=0):\n self.axplot.lines.pop(line_position).remove\n self.fig.canvas.draw()\n return",
"def remove_canvas(self,):\r\n # reset plot view beofre change\r\n self.canvas.toolbar.home()\r\n # remove widgets from canvas_vlayout\r\n self.canvas_vlayout.removeWidget(self.toolbar)\r\n self.toolbar.close()\r\n self.canvas_vlayout.removeWidget(self.canvas)\r\n self.canvas.close()",
"def remove_button(self):\n self.scene.remove_child(self.toggle_button_el)",
"def __remove_brick(self, g_object):\n if type(g_object) == GRect:\n self.__window.remove(g_object)\n self.__bricks_total -= 1\n self.__score += 1\n self.__set_record_board()",
"def removeTooltip(self): \n if self.tooltipWindow:\n self.window.remove_child(self.tooltipWindow) \n self.tooltipWindow.destroy ()\n self.tooltipWindow = None",
"def pointer_clear_focus(self) -> None:\n\n return lib.wlr_seat_pointer_clear_focus(self._ptr)",
"def cleanup(self):\r\n\r\n # Remove strip from window.\r",
"def del_curve(self, key):\n del self[key]\n del self._labels[key]",
"def remove(self):\n\n\t\t\t\tself.parent.thing.remove_sheet(self.thing)\n\t\t\t\tdel self.parent[self.label]",
"def remove_piece(self, piece):\n\n self._active_pieces[piece.get_color()].remove(piece)",
"def pop_focus(self):\n self._focus.pop()",
"def delX(self):\n del self.components[0]",
"def delX(self):\n del self.components[0]",
"def cog_unload(self):\n self._get_sketch_prompt.cancel()"
] | [
"0.6923281",
"0.6495933",
"0.62122864",
"0.616254",
"0.60080194",
"0.590251",
"0.56725013",
"0.56136733",
"0.5546344",
"0.55072165",
"0.5487803",
"0.54865557",
"0.5449742",
"0.5432165",
"0.5357028",
"0.53393936",
"0.53330404",
"0.52942204",
"0.52776873",
"0.5274253",
"0.5271509",
"0.5266129",
"0.5238867",
"0.5234722",
"0.5217645",
"0.52117854",
"0.5207225",
"0.5206207",
"0.5206207",
"0.52000946"
] | 0.74714136 | 0 |
Sets the maximum size for this window Any attempt to resize a dimension beyond the maximum size will fail. | def setMaxSize(self,width,height):
assert (type(width) == int), "width %s is not an int" % `width`
assert (width > 0), "width %s is negative" % `width`
assert (type(height) == int), "height %s is not an int" % `height`
assert (height > 0), "height %s is negative" % `height`
self._frame._root.maxsize(width,height) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_max_size(self, size):\n # The hard Qt limit is 16777215 (which is 2**24 - 1) and will\n # print warnings to the shell if we attemp to set a max size\n # over that amount. This can be attempted when a QtMainWindow\n # has a central widget size equal to max size, and it also has\n # a menu bar and other components. Clipping the max size like\n # this will not have an effect on layout computation and thus\n # is relatively safe.\n max_width, max_height = size\n max_width = min(max_width, 16777215)\n max_height = min(max_height, 16777215)\n self.widget.setMaximumSize(max_width, max_height)",
"def set_max_size(self, width: int, height: int):\n self.tk_ref.maxsize(width=width, height=height)",
"def setmaxsize(self, maxsize):\n self.maxsize = maxsize",
"def resize_to_maximum(self):\n if self.initialized:\n max_size = self._compute_maximum_size()\n self.set_max_size(max_size)\n self.resize(max_size)",
"def maximum_size(self, maximum_size):\n\n self._maximum_size = maximum_size",
"def maxsize(self, maxsize):\n self.shape = (int(maxsize), ) + self.shape[1:]\n self.clear()",
"def update_maximum_size(self):\n if self.initialized:\n max_size = self._compute_maximum_size()\n self.set_max_size(max_size)",
"def SetWindowSize(self, size):\n self.WINDOW_SIZE = size",
"def set_maxSize(self, maxSize):\n if self.__log:\n self.__logger.info(f\"Setting max size to {maxSize}\")\n self.__maxSize = maxSize # Set max size\n self.__handle_cache_size() # Adapt to new changes",
"def _maximum_size_changed(self):\n self.update_maximum_size()",
"def max_size(self):\n max_size = self.widget.maximumSize()\n return Size(max_size.width(), max_size.height())",
"def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)",
"def setWindowSize(self, value):\n return self._set(windowSize=value)",
"def resize(self, size):\n self.widget.resize(*size)",
"def MaxSize1(self, size):\r\n\r\n self.max_size = size\r\n return self",
"def resize(self):\n h, w = self.win.getmaxyx()\n self.maxh, self.maxw = h, w\n if w == 0 or h == 2:\n return\n self.win.resize(h, w)\n self.lpane.do_resize(h, w)\n self.rpane.do_resize(h, w)\n self.statusbar.resize(h, w)\n self.tabbar.resize(1,w)\n self.regenerate()\n self.display()",
"def set_maxItemSize(self, maxItemSize):\n if self.__log:\n self.__logger.info(f\"Setting max item size to {maxItemSize}\")\n self.__maxItemSize = maxItemSize\n self.__handle_cache_size()",
"def maximize(self):\n lib.SDL_MaximizeWindow(self._ptr)",
"def SetMaxArea(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_ShapeDivideArea_SetMaxArea(self, *args)",
"def __window_resizeTo(self, iWidth, iHeight):\n pass",
"def SetMaxArea(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_FaceDivideArea_SetMaxArea(self, *args)",
"def set_max_chunk_height(self, val=256):\n self._max_chunk_height = val",
"def _set_maximum(self):\n self._level_gen.maximum_length = self._maximum_length_spinbox.value()\n self._refresh_view()",
"def set_maximum(self, max_value):\n\n self._progress.setMaximum(max_value)",
"def set_max_position_size(\n self,\n asset=None,\n max_shares=None,\n max_notional=None,\n on_error='fail'):\n control = MaxPositionSize(asset=asset,\n max_shares=max_shares,\n max_notional=max_notional,\n on_error=on_error)\n self.register_trading_control(control)",
"def set_igv_window_size(self, width=800, height=600):\n self.set_igv_window_width(width)\n self.set_igv_window_height(height)",
"def message_box_size_limit(self, message_box_size_limit: ConfigNodePropertyInteger):\n\n self._message_box_size_limit = message_box_size_limit",
"def setMinSize(self,width,height):\n assert (type(width) == int), \"width %s is not an int\" % `width`\n assert (width > 0), \"width %s is negative\" % `width`\n assert (type(height) == int), \"height %s is not an int\" % `height`\n assert (height > 0), \"height %s is negative\" % `height`\n self._frame._root.minsize(width,height)",
"def SetMinMaxSize(self, size: (int, int)):\r\n # TODO: if the resultset have less than 400px we don't want \r\n # the space need for the vertcal scrollbar\r\n sbh = wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X)\r\n self.SetMaxClientSize((size[0] - sbh, size[1]))\r\n self.SetMinClientSize((size[0] - sbh, size[1]))",
"def setMaxValue(self, max_value):\r\n\t\tself.MaxValue = max_value"
] | [
"0.8343203",
"0.808209",
"0.7743762",
"0.7440697",
"0.7391813",
"0.71794146",
"0.7152232",
"0.70025915",
"0.6995238",
"0.69787806",
"0.6910393",
"0.6805439",
"0.67991096",
"0.6738618",
"0.67204857",
"0.66982836",
"0.667508",
"0.66253215",
"0.6608448",
"0.6564544",
"0.65028757",
"0.64696825",
"0.6448357",
"0.6434331",
"0.641322",
"0.64088196",
"0.6398256",
"0.6396454",
"0.63957804",
"0.6385469"
] | 0.81691533 | 1 |
Sets the minimum size for this window Any attempt to resize a dimension below the minimum size will fail. | def setMinSize(self,width,height):
assert (type(width) == int), "width %s is not an int" % `width`
assert (width > 0), "width %s is negative" % `width`
assert (type(height) == int), "height %s is not an int" % `height`
assert (height > 0), "height %s is negative" % `height`
self._frame._root.minsize(width,height) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def SetMinimumPaneSize(self, minSize):\n self._minimumPaneSize = minSize",
"def set_min_size(self, size):\n self.widget.setMinimumSize(*size)",
"def set_min_size(self, width: int, height: int):\n self.tk_ref.minsize(width=width, height=height)",
"def SetMinSize(self, s):\r\n\r\n self.min_size = wx.Size(*s)",
"def minimum_size(self, minimum_size):\n\n self._minimum_size = minimum_size",
"def setMinimumWidth( self, value ):\n self._minimumWidth = value",
"def resize_to_minimum(self):\n if self.initialized:\n min_size = self._compute_minimum_size()\n self.set_min_size(min_size)\n self.resize(min_size)",
"def update_minimum_size(self):\n if self.initialized:\n min_size = self._compute_minimum_size()\n self.set_min_size(min_size)",
"def _minimum_size_changed(self):\n self.update_minimum_size()",
"def MinSize1(self, size):\r\n self.min_size = size\r\n return self",
"def _set_size(self):\n if self.width_key is not None:\n width = config.get(self.width_key)\n height = config.get(self.height_key)\n self.window.resize(width, height)",
"def min_size(self, size):\n\n self._min_size = size\n self._is_min_size_stale = False",
"def setMinimumHeight( self, value ):\n self._minimumHeight = value",
"def min_size(self):\n min_size = self.widget.minimumSize()\n return Size(min_size.width(), min_size.height())",
"def min_pixels(self, value) -> 'Size':\n raise_not_number(value)\n self.minimum = '{}px'.format(value)\n return self",
"def minimumSizeHint(self):\n return QSize(1490, 800)",
"def MinSize2(self, x, y):\r\n\r\n self.min_size = wx.Size(x, y)\r\n return self",
"def SetWindowSize(self, size):\n self.WINDOW_SIZE = size",
"def SetMinArea(self, *args):\n return _ShapeUpgrade.ShapeUpgrade_RemoveInternalWires_SetMinArea(self, *args)",
"def GetMinimumPaneSize(self):\n return self._minimumPaneSize",
"def minimumSizeHint(self):\n height = self._dayu_size * 1.2\n return QtCore.QSize(height, height / 2)",
"def GetMinSize(self):\r\n\r\n return self.min_size",
"def setMinW(self, w):\n return self._set(minW=w)",
"def SetInitialSize(self, size=None):\n\n if size is None:\n size = wx.DefaultSize\n\n wx.Control.SetInitialSize(self, size)",
"def _compute_minimum_size(self):\n # If the user has supplied an explicit minimum size, use that.\n computed_width, computed_height = self.minimum_size\n if computed_width != -1 and computed_height != -1:\n return Size(computed_width, computed_height)\n \n # Otherwise, try to compute a default from the central widget.\n widget = self.central_widget\n if widget is not None:\n\n # If the central widget is a container, we have it compute\n # the minimum size for us, otherwise, we use the size hint\n # of the widget as the value.\n if isinstance(widget, Container):\n min_width, min_height = widget.compute_min_size()\n else:\n min_width, min_height = widget.size_hint()\n\n # If the hug and resist clip policies of the widget are\n # weaker than the resize strength of the window, then\n # we ignore its value in that direction.\n if ((widget.hug_width not in STRONGER_THAN_RESIZE) and\n (widget.resist_clip_width not in STRONGER_THAN_RESIZE)):\n min_width = -1\n \n if ((widget.hug_height not in STRONGER_THAN_RESIZE) and\n (widget.resist_clip_height not in STRONGER_THAN_RESIZE)):\n min_height = -1 \n\n if computed_width == -1:\n computed_width = min_width\n\n if computed_height == -1:\n computed_height = min_height\n \n # We use the last resort values to replace any remaining \n # -1 values. This ensures the return value will be >= 0 \n # in both width and height\n if computed_width == -1 or computed_height == -1:\n default_width, default_height = self.minimum_size_default\n if computed_width == -1:\n computed_width = default_width\n if computed_height == -1:\n computed_height = default_height\n \n return Size(computed_width, computed_height)",
"def SetMinMaxSize(self, size: (int, int)):\r\n # TODO: if the resultset have less than 400px we don't want \r\n # the space need for the vertcal scrollbar\r\n sbh = wx.SystemSettings.GetMetric(wx.SYS_VSCROLL_X)\r\n self.SetMaxClientSize((size[0] - sbh, size[1]))\r\n self.SetMinClientSize((size[0] - sbh, size[1]))",
"def defaultWindowSize(self):\n self.resize(self.defaultWindowWidth, self.defaultWindowHeight)",
"def setMaxSize(self,width,height):\n assert (type(width) == int), \"width %s is not an int\" % `width`\n assert (width > 0), \"width %s is negative\" % `width`\n assert (type(height) == int), \"height %s is not an int\" % `height`\n assert (height > 0), \"height %s is negative\" % `height`\n self._frame._root.maxsize(width,height)",
"def set_max_size(self, size):\n # The hard Qt limit is 16777215 (which is 2**24 - 1) and will\n # print warnings to the shell if we attemp to set a max size\n # over that amount. This can be attempted when a QtMainWindow\n # has a central widget size equal to max size, and it also has\n # a menu bar and other components. Clipping the max size like\n # this will not have an effect on layout computation and thus\n # is relatively safe.\n max_width, max_height = size\n max_width = min(max_width, 16777215)\n max_height = min(max_height, 16777215)\n self.widget.setMaximumSize(max_width, max_height)",
"def minimum_size(self):\n return self._minimum_size"
] | [
"0.82620406",
"0.8125616",
"0.7964996",
"0.7790341",
"0.7647176",
"0.7619907",
"0.74496925",
"0.7417188",
"0.73141354",
"0.72229654",
"0.70047134",
"0.6895434",
"0.68838006",
"0.6880474",
"0.67831236",
"0.6776431",
"0.67747736",
"0.6735547",
"0.66872287",
"0.66674685",
"0.6595648",
"0.6565349",
"0.6561743",
"0.65322226",
"0.64010483",
"0.63585526",
"0.63574594",
"0.6317511",
"0.6278679",
"0.62656796"
] | 0.81438893 | 1 |
The heading of this turtle in degrees. Heading is measured counter clockwise from due east. | def heading(self):
return float(self._turtle.heading()) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def heading(self):\n x, y = self._orient\n result = round(math.atan2(y, x)*180.0/math.pi, 10) % 360.0\n result /= self._degreesPerAU\n return (self._angleOffset + self._angleOrient*result) % self._fullcircle",
"def raw_heading(self):\n\n self._heading = math.atan2(self._mag[X], self._mag[Y])\n\n if self._heading < 0:\n self._heading += 2*math.pi\n if self._heading > 2*math.pi:\n self._heading -= 2*math.pi\n\n self._heading_degrees = round(math.degrees(self._heading),2)\n\n return self._heading_degrees",
"def heading(self) -> float:\n return self._state[2]",
"def heading_idx(self):\n if self.heading > 0:\n idx = self.heading * 180\n else:\n idx = 360 + self.heading * 180\n return int(idx - 1)",
"def heading(self):\n\n self.update()\n\n truncate = [0,0,0]\n for i in range(X, Z+1):\n truncate[i] = math.copysign(min(math.fabs(self._accel[i]), 1.0), self._accel[i])\n try:\n pitch = math.asin(-1*truncate[X])\n roll = math.asin(truncate[Y]/math.cos(pitch)) if abs(math.cos(pitch)) >= abs(truncate[Y]) else 0\n # set roll to zero if pitch approaches -1 or 1\n\n self._tiltcomp[X] = self._mag[X] * math.cos(pitch) + self._mag[Z] * math.sin(pitch)\n self._tiltcomp[Y] = self._mag[X] * math.sin(roll) * math.sin(pitch) + \\\n self._mag[Y] * math.cos(roll) - self._mag[Z] * math.sin(roll) * math.cos(pitch)\n self._tiltcomp[Z] = self._mag[X] * math.cos(roll) * math.sin(pitch) + \\\n self._mag[Y] * math.sin(roll) + \\\n self._mag[Z] * math.cos(roll) * math.cos(pitch)\n self._tilt_heading = math.atan2(self._tiltcomp[Y], self._tiltcomp[X])\n\n if self._tilt_heading < 0:\n self._tilt_heading += 2*math.pi\n if self._tilt_heading > 2*math.pi:\n self._heading -= 2*math.pi\n\n self._tilt_heading_degrees = round(math.degrees(self._tilt_heading),2)\n return self._tilt_heading_degrees\n\n except Exception:\n return None",
"def current_heading():\n global current_pose\n while current_pose is None:\n pass\n x = current_pose.pose.orientation.x\n y = current_pose.pose.orientation.y\n z = current_pose.pose.orientation.z\n w = current_pose.pose.orientation.w\n\n t3 = +2.0 * (w * z + x * y)\n t4 = +1.0 - 2.0 * (y * y + z * z)\n yaw_z = math.atan2(t3, t4)\n heading = math.degrees(yaw_z) - 90\n if heading < 0:\n heading += 360\n return heading",
"def heading_at(self, longitudinal: float) -> float:\n raise NotImplementedError()",
"def getH(self):\n\t\thAngle = (math.atan2(self.y,self.x))/(2*math.pi)\n\t\tif self.y < 0:\n\t\t\thAngle = 1 + hAngle\t\n\t\treturn hAngle",
"def getPosHeading(self) :\n\t\treturn (self.avatarNP.getX(), self.avatarNP.getY(), \\\n\t\t\tself.avatarNP.getZ(), (self.avatarNP.getHpr()[0])%360)",
"def getHeadingTime(self) -> float:\n return self.timestep_cached_heading_tm",
"def __get_heading(self, robot_position, robot_yaw):\n abs_heading = math.atan2(self.pinger_loc[1] - robot_position[1],\n self.pinger_loc[0] - robot_position[0])\n return self.normalize(\n abs_heading - robot_yaw + random.gauss(0, self.noise))",
"def get_EUL_Heading(self):\n eul_raw = self.i2c.mem_read(2, self.addr, OUT_EUL_HEADING_LSB)\n eul_heading = self.sign_val(((eul_raw[1]<<8) + eul_raw[0]))/16.0\n return eul_heading\n #print(eul_heading)",
"def get_heading(hunter_position, target_position):\n hunter_x, hunter_y = hunter_position\n target_x, target_y = target_position\n heading = atan2(target_y - hunter_y, target_x - hunter_x)\n heading = angle_trunc(heading)\n return heading",
"def get_heading(hunter_position, target_position):\n hunter_x, hunter_y = hunter_position\n target_x, target_y = target_position\n heading = atan2(target_y - hunter_y, target_x - hunter_x)\n heading = angle_trunc(heading)\n return heading",
"def Get_Heading(x1, y1, x2, y2):\n\n heading = 0\n dx = x2 - x1\n dy = y2 - y1\n\n if dx != 0:\n heading = (90 - math.degrees(math.atan2(dy,dx)) + 360) % 360\n\n elif dy > 0: heading = 0\n\n elif dy < 0: heading = 180\n\n return heading",
"def heading_difference(self, other_heading):\n diff = abs(self.heading - other_heading)\n if diff > 180:\n diff = 360 - diff\n return diff",
"def Get_Heading_Change(heading_last, heading_current):\n r = heading_current - heading_last + 180\n return (r % 360) - 180",
"def theta_deg(self):\n return self.theta * 180 / np.pi",
"def course(self) -> float:\n crab_angle = np.arctan2(self.velocity[1], self.velocity[0])\n return self.heading + crab_angle",
"def calculate_heading(self):\r\n self.radius = Bullet.side # for collision detection\r\n self.angle += self.boss.turretAngle\r\n self.mass = Bullet.mass\r\n self.vel = Bullet.vel\r\n\r\n # Designing a Bullet\r\n image = pygame.Surface((Bullet.side * 2, Bullet.side))\r\n image.fill((128, 128, 128))\r\n pygame.draw.rect(image, (252, 65, 3), (0, 0, int(Bullet.side * 1.5), Bullet.side))\r\n pygame.draw.circle(image, self.color, (int(self.side * 1.5), self.side // 2), self.side // 2)\r\n image.set_colorkey((128, 128, 128))\r\n\r\n # Converting bullet surface to image\r\n self.image0 = image.convert_alpha()\r\n self.image = pygame.transform.rotate(self.image0, self.angle)\r\n self.rect = self.image.get_rect()\r\n\r\n # Positioning of bullet\r\n self.dx = math.cos(degrees_to_radians(self.boss.turretAngle)) * self.vel\r\n self.dy = math.sin(degrees_to_radians(-self.boss.turretAngle)) * self.vel",
"def get_heading(self):\n return self.__heading",
"def get_heading(self):\n return self.__heading",
"def hp(self):\n return float(self.hp_angle)",
"def degrees(self) -> float:\n return math.degrees(self.radians)",
"def heading(self):\n return self._heading",
"def orientationToHeading(orientation):\n res = [0, 0, 0, 0]\n res[0] = orientation.x\n res[1] = orientation.y\n res[2] = orientation.z\n res[3] = orientation.w\n return tf.transformations.euler_from_quaternion(res)[2]",
"def getHeading(self, request, context):\n \n return droneconnect_pb2.Heading(heading = float(self.vehicle.heading))",
"def GetHeading(self):\n if self._imu_data == None:\n # No data yet.\n return 0\n\n if self._thread_state != 0:\n # IMU down.\n return None\n\n return self._imu_data['fusionPose'][2]",
"def _calc_cycle_delta_heading(self):\n first_frame = self._frames[0]\n last_frame = self._frames[-1]\n\n rot_start = self.get_frame_root_rot(first_frame)\n rot_end = self.get_frame_root_rot(last_frame)\n inv_rot_start = transformations.quaternion_conjugate(rot_start)\n drot = transformations.quaternion_multiply(rot_end, inv_rot_start)\n cycle_delta_heading = motion_util.calc_heading(drot)\n\n return cycle_delta_heading",
"def angle(self) -> float:\n ..."
] | [
"0.85323304",
"0.7825337",
"0.77631426",
"0.7465228",
"0.7281472",
"0.7165995",
"0.70578945",
"0.6940017",
"0.6924136",
"0.6804612",
"0.6803771",
"0.677972",
"0.6762503",
"0.6762503",
"0.67530805",
"0.6701139",
"0.66272557",
"0.65986854",
"0.6573453",
"0.6512578",
"0.65064764",
"0.65064764",
"0.64914626",
"0.6480391",
"0.6439444",
"0.64054114",
"0.63982445",
"0.6391681",
"0.6342559",
"0.62989235"
] | 0.87159824 | 0 |
Indicates whether the turtle's icon is visible. Drawing commands will still work while the turtle icon is hidden. There will just be no indication of the turtle's current location on the screen. | def visible(self):
return self._turtle.isvisible() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_visible(self):",
"def is_visible(self):\n return self._visible",
"def isVisible( self ):\n layer = self.layer()\n if ( layer and not layer.isVisible() ):\n return False\n# \n# if ( self.isIsolateHidden() ):\n# return False\n# \n return self._visible",
"def is_visible(self):\n return self.container['is_visible']",
"def is_visible(self):\n return self.proto.display_type == DISPLAY_TYPE.Visible.value",
"def is_ruler_visible(self):\n return self.container['is_ruler_visible']",
"def is_visible(self):\n return self.rect.x < self.screen_rect.width",
"def is_visible(self, path):\n return True",
"def is_visible(self):\n return self.real > 0",
"def isVisible(self):\n\t\treturn True",
"def is_visible(self):\n return self._currently_shown",
"def is_element_visible(self):\n if self.web_element.is_displayed():\n return True\n else:\n return False",
"def set_visible(self):\n\t\tself.hide()\n\t\tself.__sys_tray_icon.setVisible(True)",
"def is_visible(self, position, size=0):\n # return True\n size /= self.scale # size is in pixel\n in_x = (self.focus.x + self.offset.x / self.scale - size <=\n position.x <=\n self.focus.x - self.offset.x / self.scale + size)\n in_y = (self.focus.y + self.offset.y / self.scale - size <=\n position.y <=\n self.focus.y - self.offset.y / self.scale + size)\n # if name == \"earth\":\n # print(\"{:+e} {:+e} {}\".format(self.focus.y + self.offset2.y\n # , position.y, in_y))\n # print(\"{:+e} {:+e}\".format(self.focus.x, self.focus.y))\n return in_x and in_y",
"def visible(self):\n return self._visible",
"def visible(self):\n return self._visible",
"def get_visible(self):\n return self._visible",
"def is_visible(self):\n try:\n return self.element.is_displayed()\n except (NoSuchElementException,\n ElementNotVisibleException,\n StaleElementReferenceException):\n return False",
"def isShown(self):\n return self.shown",
"def inspectedNodeIsVisible(self):\n return self._inspected_node_is_visible",
"def is_visible ( self ):\n return not self.is_hidden and (\n self.priority is None or self.priority >= 0\n )",
"def is_alive(self):\r\n return self.visible",
"def show(self):\r\n if self.visible == 1 and time() - self.lastMotion > self.delay:\r\n self.visible = 2\r\n if self.visible == 2:\r\n self.deiconify()",
"def show(self):\r\n if self.visible == 1 and time() - self.lastMotion > self.delay:\r\n self.visible = 2\r\n if self.visible == 2:\r\n self.deiconify()",
"def IsShown(self):\r\n\r\n return self._shown",
"def show(self):\n if self.visible == 1 and time() - self.lastMotion > self.delay:\n self.visible = 2\n if self.visible == 2:\n self.deiconify()",
"def is_hidden():\n return False",
"def is_hidden():\n return False",
"def is_visible(self, url=''):\n return bool(url)",
"def IsHidden(self):\r\n\r\n return self._hidden"
] | [
"0.71561986",
"0.6992037",
"0.69445086",
"0.6929709",
"0.689857",
"0.6858378",
"0.67312574",
"0.6723745",
"0.6712027",
"0.66735023",
"0.6668584",
"0.66361713",
"0.66052437",
"0.6484988",
"0.648333",
"0.648333",
"0.64757276",
"0.6454804",
"0.631289",
"0.6297589",
"0.62900037",
"0.6249281",
"0.6192523",
"0.6192523",
"0.6188453",
"0.6182069",
"0.61766773",
"0.61766773",
"0.6164323",
"0.6163558"
] | 0.7854587 | 1 |
Indicates whether the turtle is in draw mode. All drawing calls are active if an only if this mode is True | def drawmode(self):
return self._turtle.isdown() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def isdrawn(self):\n return hasattr(self, 'drawn')",
"def setDrawingMode(self):\n pass",
"def get_drawing_mode(self) -> int:\n return self._drawing_mode",
"def draw(self, canvas) -> bool:\n return False",
"def _set_draw_mode(draw_mode):\n###############################################################################\n global _draw_mode\n _draw_mode = draw_mode",
"def conditionsAreMetForDrawing(self):\n\t\tcurrentController = self.controller.view().window().windowController()\n\t\tif currentController:\n\t\t\ttool = currentController.toolDrawDelegate()\n\t\t\ttextToolIsActive = tool.isKindOfClass_( NSClassFromString(\"GlyphsToolText\") )\n\t\t\thandToolIsActive = tool.isKindOfClass_( NSClassFromString(\"GlyphsToolHand\") )\n\t\t\tif not textToolIsActive and not handToolIsActive: \n\t\t\t\treturn True\n\t\treturn False",
"def draw (self, screen):\n drew = bool(self.draw_fn(self, screen, self.dirty))\n self.dirty = False\n return drew",
"def isdown(self):\n return self._drawing",
"def draw2DOutlineEnabled(self):\n\n opts = self.opts\n overlay = self.overlay\n\n return ((overlay.trimesh is not None) and\n (opts.outline or opts.vertexData is not None))",
"def GetDrawOption(self):\n return self._drawoption",
"def draw(self):\n return self._draw",
"def on_draw(self):\n return self._on_draw",
"def setPrimDrawMode(self, primPath, drawMode):\n prim = self._stage.GetPrimAtPath(primPath)\n if not primPath.IsValid():\n return False\n\n if drawMode == self.DrawMode.inherit:\n prim.RemoveProperty(self.drawModeAttribute)\n return True\n if drawMode == self.DrawMode.geometry:\n prim.GetAttribute(self.drawModeAttribute).Clear()\n return True\n if drawMode == self.DrawMode.boundingBox:\n prim.GetAttribute(self.drawModeAttribute).Set(Vt.Token(\"bounds\"))\n return True\n\n return False",
"def get_active(self):\n if hasattr(self, 'canvas'):\n return True\n else:\n return False",
"def can_draw(self,point):\n if point <= 0:\n return False\n else:\n return True",
"def _sketch_mode(self):\r\n self._mode_select(1)",
"def check_mode(self):\n if self.proximity.check_press():\n self.cycle_mode()\n return self.mode",
"def save_drawing_if_necessary(self):\n\n app_doc_data = AppDocData.instance()\n if app_doc_data.activeDrawing and app_doc_data.activeDrawing.modified:\n #if QMessageBox.Yes == QMessageBox.question(self, self.tr(\"Question\"),\n # self.tr(\"Do you want to save drawing?\"),\n # QMessageBox.Yes | QMessageBox.No):\n # self.actionSaveCliked()\n # return True\n if QMessageBox.Ignore == QMessageBox.question(self, self.tr('Continue?'),\n self.tr('Changes may not have been saved.'),\n QMessageBox.Ignore | QMessageBox.Cancel):\n return False\n return True",
"def draw (self):\n screen = self.screen\n dirty = False\n for display in self.displays:\n dirty |= display.draw(screen)\n return dirty",
"def is_graphic_driver(self):\n if self.class_id == \"0x03\":\n return True\n else:\n return False",
"def _isoff(self):\n return self.dp.state()==PyTango.DevState.OFF",
"def checkDraw(self) -> D:\n if self.board.positions.count(\" \") == 0:\n print(\"DRAW!\")\n return True",
"def drawCells(self):\r\n self.drawing = not self.drawing\r\n if self.drawing:\r\n self.draw_button['text'] = \"No Draw\"\r\n else:\r\n self.draw_button['text'] = \"Draw\"",
"def game_draw(self):\n pass",
"def is_rendering_enabled():\n return _rendering_enabled",
"def is_canvas(self):\n return self.canvas._isCanvas",
"def toggle_draw_axes(self):\n if self.draw_axes:\n self.draw_axes = False\n else:\n self.draw_axes = True\n self.redraw()",
"def toggle_draw_axes(self):\n if self.draw_axes:\n self.draw_axes = False\n else:\n self.draw_axes = True\n self.redraw()",
"def draw(_user_id):\n _board = boards[_user_id]\n return _board.can_claim_draw()",
"def draw(self):\n\n for row in self._board:\n for slot in row:\n if slot == 0:\n return False\n print \"It's a draw!\"\n return True"
] | [
"0.7248577",
"0.6596341",
"0.6579881",
"0.6529058",
"0.65262514",
"0.64363253",
"0.6411703",
"0.6403673",
"0.6296374",
"0.62232405",
"0.6160772",
"0.60842884",
"0.5986026",
"0.5977863",
"0.59161794",
"0.5741559",
"0.5738247",
"0.56827796",
"0.5679077",
"0.56439203",
"0.56351316",
"0.5596153",
"0.55085737",
"0.5489354",
"0.54806435",
"0.5447787",
"0.54397124",
"0.54397124",
"0.5431791",
"0.54225755"
] | 0.8096658 | 0 |
Deletes this turtle object. | def __del__(self):
self.clear()
self._screen._removeTurtle(self)
del self._turtle | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __del__(self):\n self._screen._removePen(self)\n del self._turtle",
"def delete(self):\n self.graph._del(handle=self.handle)",
"def remove(self):\n self.node.destroy()",
"def delete(self):\n\t\tself.canvas.delete('node_'+self.identifier)\n\t\tself.canvas.tag_unbind('node_'+self.identifier,\"<Any>\")",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def delete(self):\n self._vertex_list.delete()\n self._vertex_list = None",
"def delete(self):\n del self.shx.atoms[self.index]",
"def delete(self):\n # exit contains our clean up code\n self.exit()\n GenericAnimatedProp.GenericAnimatedProp.delete(self)",
"def remove(self):\r\n\t\tself._delete()",
"def delete(self) -> None:\n shutil.rmtree(self.path)",
"def __del__(self):\n try:\n self._frame._destroy()\n except:\n pass\n self._turtles = []\n self._pencils = []\n del self._frame",
"def destroy(self):\n for node in self.find_references():\n node.destroy()\n self._bld.RemoveObject(self.get_sobj())",
"def clear(self):\n self._turtle.clear()",
"def clear(self):\n self._turtle.clear()",
"def __del__(self) -> None:\n self.delete()",
"def delete(self):\n if self.shape is not None:\n self.shape.delete()\n if self in shared.obstacles:\n shared.obstacles.remove(self)",
"def destroy(self):\r\n self._obj.destroy()\r\n self._obj = None"
] | [
"0.72901684",
"0.69489294",
"0.6906722",
"0.68213683",
"0.68130356",
"0.68130356",
"0.68130356",
"0.68130356",
"0.68130356",
"0.68130356",
"0.68130356",
"0.68130356",
"0.68130356",
"0.68130356",
"0.68130356",
"0.68130356",
"0.68130356",
"0.68130356",
"0.6793991",
"0.6787353",
"0.6783469",
"0.67716306",
"0.67514575",
"0.6730611",
"0.6634641",
"0.6629618",
"0.6629618",
"0.65821636",
"0.65815085",
"0.65547854"
] | 0.79840356 | 0 |
Moves the turtle forward by the given amount. | def forward(self,distance):
assert (type(distance) in [int, float]), "parameter distance:%s is not a valid number" % `distance`
self._turtle.forward(distance) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def forward(self, amount):\n newX = self._x + round(amount * math.sin(math.radians(self._rotation)), 2)\n newY = self._y - round(amount * math.cos(math.radians(self._rotation)), 2)\n self.goto(newX, newY)",
"def advance(self, amount=1):\n self._current += amount\n self.redraw()",
"def advance_by(self, amount: float):\n if amount < 0:\n raise ValueError(\"cannot retreat time reference: amount {} < 0\"\n .format(amount))\n self.__delta += amount",
"def move_turtle(self):\n self.forward(self.move_speed)",
"def move_forward(self, distance):\r\n return self.move('forward', distance)",
"def advance(self, amount):\n raise NotImplementedError()",
"def forward(self):\n self.position += 1",
"def advance(self, amount=1):\n raise NotImplementedError()",
"def move_forward(self, distance):\n quad_offset = self.quad_offset_mapping['forward']\n client.moveByVelocityAsync(self.velocity * quad_offset[0], self.velocity * quad_offset[1],\n 0.15, distance/self.velocity).join()\n # if self.logging:\n # self.log_arr.append(\"forward\")",
"def advance(self, amount=1):\n self._current += amount\n if self._current - self._updateRate >= self._lastUpdated:\n self.redraw()\n # go to nearest multiple of updateRate less than current\n self._lastUpdated = (self._current // self._updateRate)*self._updateRate",
"def move(self,amount):\n self.positionx=self.positionx+self.amount\n return self.positionx",
"def advance(self):\n self.amount = self._nextAmount",
"def move_forward():\n pass",
"def backward(self, amount):\n newX = self._x - round(amount * math.sin(math.radians(self._rotation)), 2)\n newY = self._y + round(amount * math.cos(math.radians(self._rotation)), 2)\n self.goto(newX, newY)",
"def step_to(self, direction):\n s = self\n s.steps += s.speed\n s.physics.move_bomberman(self, direction)\n s.update_pos()",
"def forward(self, speed):\n self.controller.forward(speed)",
"def move_forward(self,length,draw=True):\r\n new_x = self.x + length * math.cos(math.radians(self.angle))\r\n new_y = self.y + length * math.sin(math.radians(self.angle))\r\n self.draw_tool.line(((self.x,self.y),(new_x,new_y)), fill=(0,0,0),width=2)\r\n self.x = new_x\r\n self.y = new_y",
"def move(self,amount):\n angle=self.dirction/180*math.pi\n self.postionx += amount*math.cos(angle)\n self.postiony += amount*math.sin(angle)",
"def forward(self, step):\r\n x = self.pos_x + math.cos(math.radians(self.rotation)) * step\r\n y = self.pos_y + math.sin(math.radians(self.rotation)) * step\r\n prev_brush_state = self.brush_on\r\n self.brush_on = True\r\n self.move(x, y)\r\n self.brush_on = prev_brush_state",
"def move_forward(self, speed):\n\n # Clamp the speed\n speed = clamp(delta_unit(speed), 0, delta_unit(Car.max_speed))\n\n # Appends the speed according to the direction\n rad = np.radians(self.direction)\n self.fx += speed * np.cos(rad)\n self.fy += speed * np.sin(rad)\n\n # Set marker to move\n self.moved = True",
"def move_forward(self, dist):\r\n self.send_command_without_response(f'forward {dist}')",
"def move_forward(self, val):\n val = val * 180 / math.pi\n print(\"gyro diff\", self.gyro - val)\n print(\"gyrof\", self.gyro)\n if math.fabs(self.gyro - val) > 0.6:\n if self.gyro - val > 0:\n self.om_right = self.om_right - 0.7\n self.om_left = self.om_left + 0.5\n self.set_speed(self.om_left, self.om_right)\n print(\"om_l\", self.om_left)\n print(\"om_r\", self.om_right)\n else:\n self.om_right = self.om_right + 0.3\n self.om_left = self.om_left - 0.5\n self.set_speed(self.om_left, self.om_right)\n print(\"om_l\", self.om_left)\n print(\"om_r\", self.om_right)\n else:\n self.om_right = 10\n self.om_left = 10",
"def forward(self, dist):\n start = (self.pos_x, self.pos_y)\n self.pos_x += dist * math.cos(math.radians(self.angle))\n self.pos_y += dist * math.sin(math.radians(self.angle))\n self._update_limits()\n end = (self.pos_x, self.pos_y)\n if self.pen_down:\n self.draw.line([start, end], fill=self.colour, width=self.width)",
"def move_by(self, increment):\n return self.move_to(self.position + increment)",
"def move_to(self, new_pos, pass_go=True):\r\n new_pos = new_pos % 40\r\n if self.pos > new_pos and pass_go:\r\n self.money += 200\r\n self.pos = new_pos",
"def increment(self, amount):\n pass",
"def move_by(cls, value):\n cls.set_position(cls._position + value)",
"def move_forward(self, steps):\n\t\tif self.movement <= steps:\n\t\t\tif self.heading == 0:\n\t\t\t\tself.grid_y -= steps\n\t\t\telif self.heading == 90:\n\t\t\t\tself.grid_x += steps\n\t\t\telif self.heading == 180:\n\t\t\t\tself.grid_y += steps\n\t\t\telif self.heading == 270:\n\t\t\t\tself.grid_x -= steps",
"def move(self):\r\n segments = len(self.all_turtles) - 1\r\n for i in range(len(self.all_turtles)):\r\n if segments == 0:\r\n self.all_turtles[segments].forward(MOVE_DISTANCE)\r\n else:\r\n new_x = self.all_turtles[segments - 1].xcor()\r\n new_y = self.all_turtles[segments - 1].ycor()\r\n self.all_turtles[segments].goto(new_x, new_y)\r\n segments -= 1",
"def go_forward(self, distance, speed=0.1):\n while (self._last_odom_msg == None):\n\t rospy.sleep(1.0)\n start = copy.deepcopy(self._last_odom_msg.pose.pose.position)\n rate = rospy.Rate(10)\n while self.distance_fn(self._last_odom_msg.pose.pose.position, start) < math.fabs(distance):\n direction = -1 if distance < 0 else 1\n self.move(direction * speed, 0)\n rate.sleep()"
] | [
"0.805098",
"0.7168281",
"0.71475154",
"0.7083541",
"0.6969918",
"0.6827667",
"0.682601",
"0.6692048",
"0.66343707",
"0.66327345",
"0.6570666",
"0.64380103",
"0.6395247",
"0.6392661",
"0.6286539",
"0.6284747",
"0.6278547",
"0.6266692",
"0.6242889",
"0.6207868",
"0.6171793",
"0.6100668",
"0.60597295",
"0.6040921",
"0.6037567",
"0.6027313",
"0.6005516",
"0.59614027",
"0.5951403",
"0.59485054"
] | 0.7646355 | 1 |
Moves the turtle backward by the given amount. | def backward(self,distance):
assert (type(distance) in [int, float]), "parameter distance:%s is not a valid number" % `distance`
self._turtle.backward(distance) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def backward(self, amount):\n newX = self._x - round(amount * math.sin(math.radians(self._rotation)), 2)\n newY = self._y + round(amount * math.cos(math.radians(self._rotation)), 2)\n self.goto(newX, newY)",
"def move_backward(self, distance):\r\n return self.move('back', distance)",
"def move_backward(self, dist):\r\n self.send_command_without_response(f'back {dist}')",
"def back(self, distance):\n self._go(-distance)",
"def move_backward(self, distance):\n quad_offset = self.quad_offset_mapping['backward']\n client.moveByVelocityAsync(self.velocity * quad_offset[0], self.velocity * quad_offset[1],\n 0.15, distance/self.velocity).join()\n # if self.logging:\n # self.log_arr.append(\"backward\")",
"def back(self, step):\r\n self.forward(-step)",
"def backward(self, speed):\n self.controller.reverse(speed)",
"def down(self, angle):\n self.up(-angle)",
"def backward(self, duration):\n self.set_motor(self.left_motor, 'right', 0.5)\n self.set_motor(self.right_motor, 'left', 0.5)\n time.sleep(duration)",
"def move_down(self):\n self.y -= 1",
"def down(self):\n self.move(0,-1)",
"def move_down(self, distance):\r\n return self.move('down', distance)",
"def moveDown():\n tt.right(90)\n tt.forward(60)\n tt.right(90)\n tt.forward(250)\n tt.right(180)",
"def move_down(self):\n self.move_step(1)",
"def lose(self, amount: int):\n self.win(-amount)\n return self ## fluent",
"def back(self):\n self.position -= 1",
"def move_backward():\n pass",
"def move_down(self):\n self.pitch_motor.step_forward()",
"def forward(self, amount):\n newX = self._x + round(amount * math.sin(math.radians(self._rotation)), 2)\n newY = self._y - round(amount * math.cos(math.radians(self._rotation)), 2)\n self.goto(newX, newY)",
"def go_backward(self):\n command = _build_robovac_command(RobovacModes.GO_BACKWARD, RobovacCommands.MOVE)\n message = self._build_command_user_data_message(command)\n\n self._send_packet(message, False)",
"def withdraw(self, amount):\n self.balance -= amount",
"def move_lift_down():\n return _move_lift(0.2)",
"def move_back(t,n):\n lt(t)\n bk(t, n)\n rt(t)",
"def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)",
"def backward(self, speed):\n\n self.pwm_forward.ChangeDutyCycle(0)\n self.pwm_backward.ChangeDutyCycle(speed)",
"def down(self):\n if self.bottom == self.current:\n return\n else:\n self.current -= 1",
"def move_down(self):\n self.move_measurement(1)",
"def right_backward(self):\n self.right_motor.run_forever(speed_sp=-self.MAX_SPEED)",
"def backward(self, param):\n\t\tif param:\n\t\t\tself.linear_move(-1 * param * .3048)\n\t\telse:\n\t\t\tself.linear_move(-1 * riu.default_dist * .3048)",
"def move_down(self):\n client.moveByVelocityAsync(0, 0, -1, 0.3).join()\n # if self.logging:\n # self.log_arr.append(\"down\")"
] | [
"0.82581335",
"0.73676413",
"0.6859591",
"0.6773016",
"0.67576206",
"0.6722952",
"0.67022717",
"0.6687509",
"0.66345984",
"0.6610055",
"0.65762097",
"0.6467502",
"0.6444055",
"0.6443134",
"0.6412966",
"0.6340994",
"0.63096267",
"0.6303683",
"0.62736183",
"0.62425214",
"0.6214122",
"0.61998975",
"0.6198537",
"0.61974204",
"0.61974204",
"0.61464936",
"0.6139964",
"0.61381555",
"0.6136587",
"0.611242"
] | 0.78231084 | 1 |
The fill status of this pen. If the fill status is True, then the pen will fill the insides of any polygon or circle subsequently traced by its drawLine or drawCircle method. If the attribute changes, it only affects future draw commands, not past ones. Switching this attribute between True and False allows the pen to draw both solid and hollow shapes. | def fill(self):
return self._turtle.fill() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setFilled(self, fill):\n isFilled = fill\n repaint()",
"def GetFillAlpha(self):\n return self._attalpha[\"fill\"]",
"def fillcolor(self):\n return self._fillcolor",
"def setFill(self, fill):\n self.area_show = fill",
"def fill(self):\n return self[\"fill\"]",
"def fill(self):\n return self[\"fill\"]",
"def color(self):\n assert False, 'Pen does not have a color; use pencolor or fillcolor'",
"def isFilled(self):\n return self.isFilled",
"def fill(self) -> int:\n return self._fill_color",
"def filled(\n self,\n filled: FillReturn,\n fill_type: FillType,\n ax: figure | int = 0,\n color: str = \"C0\",\n alpha: float = 0.7,\n ) -> None:\n fig = self._get_figure(ax)\n color = self._convert_color(color)\n xs, ys = filled_to_bokeh(filled, fill_type)\n if len(xs) > 0:\n fig.multi_polygons(xs=[xs], ys=[ys], color=color, fill_alpha=alpha, line_width=0)",
"def write_fill(self, fill: FillFormat):\n if self.fill_type is not None:\n self._write_fill_type(fill)",
"def update_fill(self, event):\n if event.type == 'FILL':\n self.update_positions_from_fill(event)\n self.update_holdings_from_fill(event)",
"def update_fill(self, event):\n if event.type == 'FILL':\n self.update_positions_from_fill(event)\n self.update_holdings_from_fill(event)",
"def update_fill(self, event):\r\n\r\n if event.type == 'FILL':\r\n self.update_positions_from_fill(event)\r\n self.update_holdings_from_fill(event)",
"def fillcolor(self, *args):\n if args:\n color = self._colorstr(args)\n if color == self._fillcolor:\n return\n self.pen(fillcolor=color)\n else:\n return self._color(self._fillcolor)",
"def is_filled(self):\n return(self.order_master.amount==self.order_master.filled)",
"def fill_color(self) -> String:\r\n from apysc.type import value_util\r\n self._initialize_fill_color_if_not_initialized()\r\n fill_color: String = value_util.get_copy(value=self._fill_color)\r\n return fill_color",
"def getFillColor(self):\n return getColor() if (fillColor == None) else fillColor",
"def update_fill(self, event):\n if event.type == 'FILL':\n self.update_positions_from_fill(event)\n self.update_prices_from_fill(event)\n self.update_holdings_from_fill(event)",
"def fill(self, value):\n self.fill_color = value",
"def filled(self):\n return(self.order_master.filled)",
"def fill_color(self, fill_color=None):\n\n if fill_color is None:\n return self._fill_color\n else:\n self._fill_color = process_color(fill_color)",
"def fill(self, color):",
"def getFill(self):\n return self.area_show",
"def setPointFill(self, fill):\n for point in self.points:\n point.fill = fill",
"def set_green(self):\n self.fill= Cell.FILLED_COLOR_BG\n self.draw()",
"def SetFillAlpha(self, alpha):\n self._attalpha[\"fill\"] = alpha\n self.SetFillColorAlpha(self.GetFillColor(), alpha)",
"def show(self):\n stroke(*self.status.value)\n fill(*self.status.value)\n circle((self.position.x, self.position.y), radius = 7)",
"def getPointFill(self):\n l = [point.fill for point in self.points]\n if l.count(l[0]) == len(l):\n return l[0]\n else:\n raise ValueError(\"The fill attributes of the points must be the same otherwise it makes no sense.\")",
"def _switch(self):\n self.fill= not self.fill"
] | [
"0.68982977",
"0.6480413",
"0.64142907",
"0.61500674",
"0.60670793",
"0.60670793",
"0.60242504",
"0.5965748",
"0.593573",
"0.59075147",
"0.5873632",
"0.57370543",
"0.57370543",
"0.5725248",
"0.5699221",
"0.553684",
"0.55129635",
"0.549395",
"0.5492293",
"0.54508805",
"0.5449537",
"0.5435158",
"0.5419864",
"0.5395306",
"0.5337515",
"0.5331456",
"0.5316708",
"0.5293443",
"0.52748305",
"0.5247344"
] | 0.6480622 | 1 |
The pen color of this pen. The pen color is used for drawing lines and circles. All subsequent draw commands draw using this color. If the color changes, it only affects future draw commands, not past ones. This color is only used for lines and the border of circles. It is not the color used for filling in solid areas (if the ``fill`` attribute is True). See the attribute ``fillcolor`` for solid shapes. | def pencolor(self):
return self._pencolor | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def penColor( self ):\n return self._penColor",
"def color(self):\n assert False, 'Pen does not have a color; use pencolor or fillcolor'",
"def pencolor(self, *args):\n if args:\n color = self._colorstr(args)\n if color == self._pencolor:\n return\n self.pen(pencolor=color)\n else:\n return self._color(self._pencolor)",
"def set_pen_color(self, color: tuple) -> Rectangle:\n self.pen.color = color\n return self",
"def setPenColor( self, color ):\n self._penColor = QColor(color)\n self.setDirty()",
"def color(self):\n return self.__color",
"def color(self):\n return self._color",
"def color(self):\n return self._color",
"def get_color(self):\n\n return self._color",
"def get_color(self):\n\n return self.color",
"def get_color(self):\r\n return self.__color",
"def get_color(self):\n return self._color",
"def get_color(self):\n return self._color",
"def getColor(self):\n return self.__color",
"def getColor(self):\n return self.__color",
"def getColor(self):\n return self.__color",
"def get_color(self):\r\n return self._color",
"def get_color(self):\n return self.color",
"def getColor(self):\n return self.color",
"def line_color(self) -> String:\r\n from apysc.type import value_util\r\n self._initialize_line_color_if_not_initialized()\r\n line_color: String = value_util.get_copy(value=self._line_color)\r\n return line_color",
"def getColor(self):\r\n return self.color",
"def get_color(self) -> str:\n return self.color",
"def color(self):\n return self.COLOR",
"def color(self):\n return self['color']",
"def get_color(self):\r\n if self.color:\r\n return \"RED\"\r\n else:\r\n return \"BLACK\"",
"def get_color(self) -> str:\r\n return self.color",
"def stroke_style(self, color=None):\n self._impl.stroke_style(color)",
"def color(self) -> Optional[str]:\n return self.colour",
"def color(self):\n if \"color\" in self._prop_dict:\n return self._prop_dict[\"color\"]\n else:\n return None",
"def get_color(self, point):\n return self._color.dup()"
] | [
"0.81438965",
"0.7675859",
"0.7551927",
"0.6896309",
"0.67128396",
"0.6509215",
"0.64415",
"0.64415",
"0.64305776",
"0.6405034",
"0.6389262",
"0.6373281",
"0.6373281",
"0.6361102",
"0.6361102",
"0.6361102",
"0.6358667",
"0.63095975",
"0.62246627",
"0.62037814",
"0.6115269",
"0.6089257",
"0.6041361",
"0.6026057",
"0.6005723",
"0.5990957",
"0.5971471",
"0.5970854",
"0.593137",
"0.59016216"
] | 0.7982437 | 1 |
The fill color of this turtle. The fill color is used for filling in solid shapes. If the ``fill`` attribute is True, all subsequent draw commands fill their insides using this color. If the color changes, it only affects future draw commands, not past ones. This color is only used for filling in the insides of solid shapes. It is not the color used for the shape border. See the attribute ``pencolor`` for the border color. | def fillcolor(self):
return self._fillcolor | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def fill_color(self, fill_color=None):\n\n if fill_color is None:\n return self._fill_color\n else:\n self._fill_color = process_color(fill_color)",
"def fillcolor(self, *args):\n if args:\n color = self._colorstr(args)\n if color == self._fillcolor:\n return\n self.pen(fillcolor=color)\n else:\n return self._color(self._fillcolor)",
"def fill_color(self) -> String:\r\n from apysc.type import value_util\r\n self._initialize_fill_color_if_not_initialized()\r\n fill_color: String = value_util.get_copy(value=self._fill_color)\r\n return fill_color",
"def setFill(self, color):\n self._reconfig(\"fill\", color)",
"def set_fill_color(self, color: tuple) -> Rectangle:\n self.fill.color = color\n return self",
"def getFillColor(self):\n return getColor() if (fillColor == None) else fillColor",
"def fill(self) -> int:\n return self._fill_color",
"def sparkline_fill_color(self, sparkline_fill_color):\n\n self._sparkline_fill_color = sparkline_fill_color",
"def setFillColor(self, color):\n fillColor = color\n repaint()",
"def fill(self):\n return self._turtle.fill()",
"def fill(self, value):\n self.fill_color = value",
"def setFilled(self, fill):\n isFilled = fill\n repaint()",
"def fill(self, color):",
"def fill_style(self, color=None):\n self._impl.fill_style(color)",
"def write_fill(self, fill: FillFormat):\n if self.fill_type is not None:\n self._write_fill_type(fill)",
"def GetFillAlpha(self):\n return self._attalpha[\"fill\"]",
"def setFill(self, fill):\n self.area_show = fill",
"def color(self):\n assert False, 'Pen does not have a color; use pencolor or fillcolor'",
"def fill(self, color):\n self.format.fill(self, color)",
"def setPointFill(self, fill):\n for point in self.points:\n point.fill = fill",
"def fill(self, color: Union[int, Tuple[int, int, int]]) -> None:\n self._fill_color = color\n if color is None:\n self._palette[0] = 0x00\n self._palette.make_transparent(0)\n else:\n self._palette[0] = color\n self._palette.make_opaque(0)",
"def position_fill(self, position_fill):\n allowed_values = [\"OPEN_ONLY\", \"REDUCE_FIRST\", \"REDUCE_ONLY\", \"DEFAULT\"] # noqa: E501\n if position_fill not in allowed_values:\n raise ValueError(\n \"Invalid value for `position_fill` ({0}), must be one of {1}\" # noqa: E501\n .format(position_fill, allowed_values)\n )\n\n self._position_fill = position_fill",
"def position_fill(self, position_fill):\n allowed_values = [\"OPEN_ONLY\", \"REDUCE_FIRST\", \"REDUCE_ONLY\", \"DEFAULT\"] # noqa: E501\n if position_fill not in allowed_values:\n raise ValueError(\n \"Invalid value for `position_fill` ({0}), must be one of {1}\" # noqa: E501\n .format(position_fill, allowed_values)\n )\n\n self._position_fill = position_fill",
"def fill(self, color):\n self.fill_rect(0, 0, self.width, self.height, color)",
"def SetFillAlpha(self, alpha):\n self._attalpha[\"fill\"] = alpha\n self.SetFillColorAlpha(self.GetFillColor(), alpha)",
"def SetLevelOfFill(self, lev_fill):\n return _hypre.HypreILU_SetLevelOfFill(self, lev_fill)",
"def fill(self):\n return self[\"fill\"]",
"def fill(self):\n return self[\"fill\"]",
"def fillColor(c, mode='RGB'):\n \n # if we are using a color object (defined above), use the object's fill method\n if hasattr(c, 'colorMode'):\n \tc.setFill()\n \n # if we are passed an integer or float, set it as a simple RGB.\n elif isinstance(c, (int, float)):\n \tfill(c, c, c)\n \t\n # if we are dealing with a CMYKA tuple, set it\n elif len(c) == 5:\n cmykFill(c[0], c[1], c[2], c[3], c[4])\n \n # if we have a CMYK tuple and mode is set to CMYK\n elif len(c) and mode.upper() == 'CMYK':\n \tcmykFill(c[0], c[1], c[2], c[3])\n \t\n # otherwise we will assume that four-item tuples are RGBA\n elif len(c) == 4:\n fill(c[0], c[1], c[2], c[3])\n \n # last but not least, RGB!\n elif len(c) == 3:\n fill(c[0], c[1], c[2])",
"def _initialize_fill_color_if_not_initialized(self) -> None:\r\n if hasattr(self, '_fill_color'):\r\n return\r\n self._fill_color = String('')"
] | [
"0.8201772",
"0.7598433",
"0.7313783",
"0.67560655",
"0.6748844",
"0.6730906",
"0.6724899",
"0.67112285",
"0.6703533",
"0.6412003",
"0.63272303",
"0.6276365",
"0.6182439",
"0.6151799",
"0.6137227",
"0.61257184",
"0.59977",
"0.59752995",
"0.59577733",
"0.5939774",
"0.58236915",
"0.5753857",
"0.5753857",
"0.57025933",
"0.5681763",
"0.568141",
"0.56135905",
"0.56135905",
"0.5579686",
"0.5577562"
] | 0.76650685 | 1 |
Indicates whether the pen's icon is visible. Drawing commands will still work while the pen icon is hidden. There will just be no indication of the pen's current location on the screen. | def visible(self):
return self._turtle.isvisible() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_visible(self):\n return self._visible",
"def is_visible(self):\n return self.proto.display_type == DISPLAY_TYPE.Visible.value",
"def is_visible(self):\n return self.container['is_visible']",
"def is_visible(self):\n return self.rect.x < self.screen_rect.width",
"def is_visible(self):",
"def isVisible( self ):\n layer = self.layer()\n if ( layer and not layer.isVisible() ):\n return False\n# \n# if ( self.isIsolateHidden() ):\n# return False\n# \n return self._visible",
"def GetGripperVisible(self):\r\n\r\n return self._gripper_visible",
"def is_visible(self):\n return self._currently_shown",
"def isVisible(self):\n\t\treturn True",
"def is_visible(self, position, size=0):\n # return True\n size /= self.scale # size is in pixel\n in_x = (self.focus.x + self.offset.x / self.scale - size <=\n position.x <=\n self.focus.x - self.offset.x / self.scale + size)\n in_y = (self.focus.y + self.offset.y / self.scale - size <=\n position.y <=\n self.focus.y - self.offset.y / self.scale + size)\n # if name == \"earth\":\n # print(\"{:+e} {:+e} {}\".format(self.focus.y + self.offset2.y\n # , position.y, in_y))\n # print(\"{:+e} {:+e}\".format(self.focus.x, self.focus.y))\n return in_x and in_y",
"def is_visible(self, path):\n return True",
"def visible(self):\n return self._visible",
"def visible(self):\n return self._visible",
"def is_element_visible(self):\n if self.web_element.is_displayed():\n return True\n else:\n return False",
"def is_ruler_visible(self):\n return self.container['is_ruler_visible']",
"def visible(self):\n return -PipePair.WIDTH < self.x < WIN_WIDTH",
"def get_visible(self):\n return self._visible",
"def is_visible(self):\n return self.real > 0",
"def is_visible(self):\n return self.window.active_panel() == self.full_name",
"def is_visible(self):\n try:\n return self.element.is_displayed()\n except (NoSuchElementException,\n ElementNotVisibleException,\n StaleElementReferenceException):\n return False",
"def _is_visible(self, key) -> bool:\n return self._get_DecoSetting(key).visible",
"def is_outline_shown(self):\n return self.container['is_outline_shown']",
"def visible(self):\n return ctypes.windll.user32.IsWindowVisible(self.hwnd)",
"def IsShown(self):\r\n\r\n return self._shown",
"def isShown(self):\n return self.shown",
"def is_visible(self, x, y) :\n\t\tres_x = (x > self.x_min) and (x < self.x_max)\n\t\t# print 'res_x : {0}, x : {1}, x_min : {2}, x_max:{3}'.format(res_x, x, self.x_min, self.x_max)\n\t\tres_y = (y > self.y_min) #and (y < self.y_max)\n\t\treturn res_x and res_y",
"def _is_visible(self, point):\n return point[0] > 0 and point[0] < 1 and point[1] > 0 and point[1] < 1",
"def isdrawn(self):\n return hasattr(self, 'drawn')",
"def is_alive(self):\r\n return self.visible",
"def inspectedNodeIsVisible(self):\n return self._inspected_node_is_visible"
] | [
"0.70938164",
"0.7088974",
"0.7023082",
"0.6997821",
"0.68921804",
"0.6873909",
"0.68134916",
"0.67763966",
"0.66723704",
"0.6664352",
"0.6607723",
"0.660474",
"0.660474",
"0.6599054",
"0.6597098",
"0.6593233",
"0.6585666",
"0.65637505",
"0.65612817",
"0.65610343",
"0.6546338",
"0.6461928",
"0.64561045",
"0.6427342",
"0.6420023",
"0.64048123",
"0.6392349",
"0.6287439",
"0.62764007",
"0.6264809"
] | 0.72434735 | 0 |
Deletes this pen object. | def __del__(self):
self._screen._removePen(self)
del self._turtle | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def delete(self):\n # exit contains our clean up code\n self.exit()\n GenericAnimatedProp.GenericAnimatedProp.delete(self)",
"def delete(self):\n self.graph._del(handle=self.handle)",
"def delete(self):\n del self.shx.atoms[self.index]",
"def __del__(self):\n\n # Delete sprite (if it has been defined)\n try:\n self.canvas.delete(self.sprite)\n except AttributeError:\n pass\n except tk.TclError:\n pass",
"def delete(self):\n\t\tself.canvas.delete('node_'+self.identifier)\n\t\tself.canvas.tag_unbind('node_'+self.identifier,\"<Any>\")",
"def delete(self) -> None:\n self.pop()",
"def delete(self):\n\n raise NotImplementedError('Must be implemented by subclasses')",
"def delete(self):\n self._vertex_list.delete()\n self._vertex_list = None",
"def delX(self):\n del self.components[0]",
"def delX(self):\n del self.components[0]",
"def delete_current_shape(self):\n print(\"deleting shape!\")\n self.shapes.remove(self.current_shape)\n self.current_shape = None\n self.changed()",
"def remove(self):\r\n\t\tself._delete()",
"def __delitem__(self, key):\n self.deleteCurve(key)",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def remove(self):\n self._delete()",
"def del_curve(self, key):\n del self[key]\n del self._labels[key]",
"def _removePen(self,pen):\n if pen in self._pencils:\n self._pencils.remove(pen)",
"def delete(self):\n self.parent.delete_node(self)"
] | [
"0.6839339",
"0.6739376",
"0.66108614",
"0.65926075",
"0.6585168",
"0.6571784",
"0.65023196",
"0.64651775",
"0.6461599",
"0.6461599",
"0.63839555",
"0.63446337",
"0.63371813",
"0.63204235",
"0.63204235",
"0.63204235",
"0.63204235",
"0.63204235",
"0.63204235",
"0.63204235",
"0.63204235",
"0.63204235",
"0.63204235",
"0.63204235",
"0.63204235",
"0.63204235",
"0.63204235",
"0.6317959",
"0.6292306",
"0.62916356"
] | 0.71101624 | 0 |
Draws a line segment (dx,dy) from the current pen position | def drawLine(self, dx, dy):
assert (type(dx) in [int, float]), "parameter x:%s is not a valid number" % `dx`
assert (type(dy) in [int, float]), "parameter y:%s is not a valid number" % `dy`
x = self._turtle.xcor()
y = self._turtle.ycor()
self._turtle.setposition(x+dx, y+dy) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __draw_line(display, color, ball_pos, dx, dy):\n pygame.draw.line(display, color, ball_pos, (ball_pos[0] + dx, ball_pos[1] + dy), 2)",
"def draw_line():\n global y1, y2\n canvas.create_line(x1, y1, x2, y2, width=2, fill=color)\n y1 -= 10\n y2 += 10",
"def draw_line(self, x):\n self.PDF.setStrokeColor(black01)\n self.PDF.setLineWidth(1)\n self.PDF.line(75, x, 550, x)\n self.PDF.setStrokeColor(\"black\")",
"def draw(self):\n # s1 = ShowPoint(self.cnv, self.p1.xpt, self.p1.ypt)\n # s2 = ShowPoint(self.cnv, self.p2.xpt, self.p2.ypt)\n # s1.draw()\n # # s2.draw()\n self.cnv.create_line(self.p1.xpt, self.p1.ypt, self.p2.xpt, self.p2.ypt)",
"def draw(x,y,x1,y1,d,color=1):\n d.add(dxf.line((x,y),(x1,y1),color=color, layer='LINES',thickness=0.01))",
"def draw_line(x1, y1, x2, y2):\r\n #global _canvas\r\n #global _current_line_thickness\r\n #global _current_color\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n path = Path(Point(x1, y1), Point(x2, y2))\r\n path.setBorderWidth(_current_line_thickness)\r\n path.setBorderColor(_current_color)\r\n _canvas.add(path)",
"def draw_line(color, start_pos, end_pos, width=1):\n pygame.draw.line(screen, color, start_pos, end_pos, width)",
"def drawLine(self,start,stop):\n startX = int(self.vert[start][0]*self.scale + self.size/2)\n startY = int(self.vert[start][1]*self.scale + self.size/2)\n endX = int(self.vert[stop][0]*self.scale + self.size/2)\n endY = int(self.vert[stop][1]*self.scale + self.size/2)\n \n self.canvas.create_line(startX,startY,endX,endY,fill='white')",
"def draw_line(self, x0, y0, x1, y1, color=Color['white']):\n pygame.draw.line(self.display, color, (x0, y0), (x1, y1))",
"def dline(x, y):\n glClear(GL_COLOR_BUFFER_BIT)\n glColor3f(0.0, 0.0, 1.0)\n glPointSize(10.0)\n glBegin(GL_POINTS)\n while (x <= y):\n glVertex2f(x, x)\n x += 0.05\n glEnd()\n glFlush()",
"def drawSlope(self):\n length = sqrt(1 + self.slope**2) # Length of the line segment over 1 x-unit\n xOffset = (segmentLength / length) / 2 # Figures out how many times the length of the 1 unit length fits into the desired length\n # then divides by 2 becuase half is on the left and half on the right of the center\n\n\n # Left end point\n xLeft = self.x - xOffset\n yLeft = (self.slope * (xLeft - self.x)) + self.y\n\n # Right end point\n xRight = self.x + xOffset\n yRight = (self.slope * (xRight - self.x)) + self.y\n\n\n # Converts the left and right end points from cartesian coordinates to screen coordinates\n left = cartesianToScreen(xLeft , yLeft)\n right = cartesianToScreen(xRight, yRight)\n\n\n pygame.draw.aaline(display, self.color, left, right, 1) # DRAWS THE LINE AHHHHHHHHHHHHHHHHHH :P",
"def _defLine(self):\n self._dline=GPath(points = [0,100,GAME_WIDTH,100], linewidth = 1.5,\n linecolor = 'cyan')",
"def startLineDrawing(self, startPos):\n self.line = LineNodePath(render2d, thickness=2, colorVec=(0.8,0.8,0.8,1))\n self.line.moveTo(startPos)\n t = taskMgr.add(self.drawLineTask, \"drawLineTask\")\n t.startPos = startPos",
"def DrawLine(*args, **kwargs):\n return _gdi_.PseudoDC_DrawLine(*args, **kwargs)",
"def draw_path(self):\r\n if len(self.path) > 1:\r\n for i in range(1, len(self.path)):\r\n pg.draw.line(self.screen, (0, 150, 0),\r\n self.path[i - 1], self.path[i], 1)\r\n elif len(self.path) == 1:\r\n pg.draw.circle(self.screen, (0, 150, 0),\r\n (int(self.path[0].x), int(self.path[0].y)), 1)",
"def DrawLinePoint(*args, **kwargs):\n return _gdi_.PseudoDC_DrawLinePoint(*args, **kwargs)",
"def draw_line(self, pt0, pt1, color):\n steep = False\n if abs(pt0[0]-pt1[0]) < abs(pt0[1]-pt1[1]):\n pt0[0], pt0[1] = pt0[1], pt0[0]\n pt1[0], pt1[1] = pt1[1], pt1[0]\n steep = True\n\n if pt0[0] > pt1[0]:\n pt0[0], pt1[0] = pt1[0], pt0[0]\n pt0[1], pt1[1] = pt1[1], pt0[1]\n\n if pt0[1] > pt1[1]:\n dy = pt0[1] - pt1[1]\n inc_y = -1\n else:\n dy = pt1[1] - pt0[1]\n inc_y = 1\n\n dx = pt1[0] - pt0[0]\n d = 2 * dy - dx\n incr_e = 2 * dy\n incr_ne = 2 * (dy - dx)\n x = pt0[0]\n y = pt0[1]\n\n if not steep:\n self.buffer.set_pixel((x, y), color)\n while x < pt1[0]:\n if d <= 0:\n d = d + incr_e\n x = x + 1\n else:\n d = d + incr_ne\n x = x + 1\n y = y + inc_y\n self.buffer.set_pixel((x, y), color)\n else:\n self.buffer.set_pixel((y, x), color)\n while x < pt1[0]:\n if d <= 0:\n d = d + incr_e\n x = x + 1\n else:\n d = d + incr_ne\n x = x + 1\n y = y + inc_y\n self.buffer.set_pixel((y, x), color)",
"def wdraw_line(self, wx0, wy0, wx1, wy1, color, arrow):\r\n dx0, dy0 = self.w_to_d(wx0, wy0)\r\n dx1, dy1 = self.w_to_d(wx1, wy1)\r\n self.canvas.create_line(dx0, dy0, dx1, dy1, fill=color, arrow=arrow)",
"def draw_line(self, gray=0, nextline=0):\n\n self.fontsize = 4\n if nextline:\n self.nextline()\n else:\n self.linespace(8)\n self.resetx()\n c = self.canvas\n c.setStrokeGray(gray)\n c.setLineWidth(1)\n #self.y = self.y + self.linespacing + (self.fontsize/2)\n c.line(self.x, self.y, self.width - self.x, self.y)\n self.y = self.y + (self.linespacing)",
"def __draw_path(\n self, x_path, y_path, opt_line, opt_marker,\n opt_colour, thickness=0.05):\n # Get colour\n colour = self.__get_colour_from_string(opt_colour)\n\n # For every point in the list, draw a line to the next one\n # (excluding last point)\n for point in range(0, len(x_path)):\n # Get point 1\n x1 = x_path[point]\n y1 = y_path[point]\n p1 = vector(x1, y1, 0)\n\n # If at end / only coordinate - draw a marker\n if point == len(x_path) - 1:\n create_marker(self.scene, x1, y1, opt_marker, colour)\n return\n\n # Get point 2\n x2 = x_path[point + 1]\n y2 = y_path[point + 1]\n p2 = vector(x2, y2, 0)\n\n if opt_line == '':\n # Only one marker to avoid double-ups\n create_marker(self.scene, x1, y1, opt_marker, colour)\n elif opt_line == '-':\n create_line(\n p1, p2, self.scene, colour=colour, thickness=thickness)\n # Only one marker to avoid double-ups\n create_marker(self.scene, x1, y1, opt_marker, colour)\n elif opt_line == '--':\n create_segmented_line(\n p1, p2, self.scene, 0.3, colour=colour,\n thickness=thickness)\n # Only one marker to avoid double-ups\n create_marker(self.scene, x1, y1, opt_marker, colour)\n elif opt_line == ':':\n create_segmented_line(\n p1, p2, self.scene, 0.05, colour=colour,\n thickness=thickness)\n # Only one marker to avoid double-ups\n create_marker(self.scene, x1, y1, opt_marker, colour)\n elif opt_line == '-.':\n raise NotImplementedError(\"Other line types not implemented\")\n else:\n raise ValueError(\"Invalid line type given\")",
"def draw_lines(self):\n # draw x lines\n y = self.step_y\n while y <= self.height:\n x = 0\n while x <= self.width:\n self.canvas.create_line(x, y, x+3.5, y)\n self.canvas.update()\n x += 3.5\n y += self.step_y\n \n # draw y lines\n x = self.step_x\n while x <= self.width:\n y = 0\n while y <= self.height:\n self.canvas.create_line(x, y, x, y+3.5)\n self.canvas.update()\n y += 3.5\n x += self.step_x\n \n self.is_operating = False",
"def draw_line(self, DISP, side:str, indizes:tuple, pink = False):\r\n offset = 1 #< Just to draw the line nicely\r\n pos = (indizes[0] - 1) * self.grid_size, indizes[1] * self.grid_size\r\n # Check if it's a pink line\r\n if pink:\r\n start_pos = pos[0], pos[1] + self.grid_size // 2\r\n end_pos = pos[0] + self.grid_size, pos[1] + self.grid_size // 2\r\n # Check if the line should be vertically. u for up\r\n elif side == 'u':\r\n start_pos = pos[0] + self.width - offset + self.grid_size // 2, pos[1] + self.grid_size // 2\r\n end_pos = pos[0] + self.grid_size + offset + self.grid_size // 2 - self.width, pos[1] + self.grid_size // 2\r\n # Check if the line should be horizontally. l for left\r\n elif side == 'l':\r\n start_pos = pos[0] + self.grid_size // 2, pos[1] + self.width - offset + self.grid_size // 2\r\n end_pos = pos[0] + self.grid_size // 2, pos[1] - self.width + self.grid_size + offset + self.grid_size // 2\r\n if not pink:\r\n pg.draw.line(DISP, Colors.colors['BLACK'], start_pos,end_pos, self.width + 2 * offset) \r\n else:\r\n pg.draw.line(DISP, Colors.colors['PINK'], start_pos,end_pos, self.width + 2 * offset)",
"def draw_line(self, start_p, end_p, color, thickness: float):\n line_seg = LineSegs(\"interface\")\n line_seg.setColor(*color)\n line_seg.moveTo(start_p[0] * self.w_scale, 0, start_p[1] * self.h_scale)\n line_seg.drawTo(end_p[0] * self.w_scale, 0, end_p[1] * self.h_scale)\n line_seg.setThickness(thickness)\n line_np = self.aspect2d.attachNewNode(line_seg.create(False))\n return line_np",
"def drawPath(self):\r\n bgl.glColor4f(0.8,0.8,0.9,0.01)\r\n bgl.glLineWidth(0.01)\r\n\r\n bgl.glBegin(bgl.GL_LINES)\r\n bgl.glVertex3f(self.p1[0],self.p1[1],self.p1[2])\r\n bgl.glVertex3f(self.p2[0],self.p2[1],self.p2[2])\r\n bgl.glEnd()\r\n\r\n bgl.glNormal3f(0.0,0.0,1.0)\r\n bgl.glShadeModel(bgl.GL_SMOOTH);",
"def draw_line_segment(\n x1: float, y1: float, x2: float, y2: float, color: C3F\n ) -> None:\n pyglet.graphics.draw(\n 2,\n pyglet.gl.GL_LINE_STRIP,\n (GeoDrawer._VERTEX_MODE, [x1, y1, x2, y2]),\n (GeoDrawer._COLOR_MODE, color * 2),\n )",
"def line(self, x, y):\n self.call('line', x, y)",
"def DrawLinePoint(*args, **kwargs):\n return _gdi_.DC_DrawLinePoint(*args, **kwargs)",
"def DrawDottedLine(self, dc, point, length, vertical):\r\n\r\n for i in xrange(0, length, 2):\r\n dc.DrawPoint(point.x, point.y)\r\n if vertical:\r\n point.y += 2\r\n else:\r\n point.x += 2",
"def DrawLine(*args, **kwargs):\n return _gdi_.DC_DrawLine(*args, **kwargs)",
"def draw_point(self, p):\n length = 3\n self.set_line_width(0.1)\n self.set_source_rgba(0, 0, 1, 1)\n self.move_to(p.x + length, p.y)\n self.line_to(p.x - length, p.y)\n self.stroke()\n self.move_to(p.x, p.y + length)\n self.line_to(p.x, p.y - length)\n self.stroke()"
] | [
"0.74554735",
"0.68954694",
"0.6881306",
"0.68413955",
"0.68105817",
"0.67588437",
"0.6756782",
"0.67204547",
"0.6719156",
"0.67131805",
"0.66987574",
"0.6690832",
"0.6682115",
"0.6653802",
"0.66391045",
"0.6631516",
"0.6622917",
"0.6610148",
"0.65972066",
"0.65874225",
"0.65817493",
"0.6572892",
"0.65692616",
"0.64773583",
"0.6474336",
"0.64631367",
"0.6453293",
"0.64431775",
"0.64224786",
"0.6415529"
] | 0.7574059 | 0 |
Draw a circle of radius r centered on the pen. | def drawCircle(self, r):
assert (type(r) in [int, float]), "parameter r:%s is not a valid number" % `r`
x = self._turtle.xcor()
y = self._turtle.ycor()
# Move the pen into position
fstate = self._turtle.pendown()
if fstate:
self._turtle.penup()
self._turtle.setposition(x, y-r)
if fstate:
self._turtle.pendown()
# Draw the circle and fill if necessary
self._turtle.circle(r)
self.flush()
self._turtle.forward(0)
# Return the pen to the position
if fstate:
self._turtle.penup()
self._turtle.setposition(x, y)
if fstate:
self._turtle.pendown() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def drawCircle(x, y, r):\n pen1.up()\n pen1.goto(x,y)\n pen1.down()\n pen1.circle(r)",
"def circle(draw, centrex, centrey, radius, color=\"#AAAAAAFF\") -> None:\n # convert cartesian centre to pixel centre\n cx, cy = pixelcoord(centrex, centrey)\n # top left and bottom right coordinates\n rect = [(cx-radius, cy-radius), (cx+radius, cy+radius)]\n # draw\n draw.arc(rect, 0, 360, color)",
"def draw_circle(c):\n turtle.circle(c.radius)",
"def draw_circle(c):\n turtle.circle(c.radius)",
"def draw_circle(self, x0, y0, r, color=None):\n f = 1 - r\n ddF_x = 1\n ddF_y = -2 * r\n x = 0\n y = r\n\n self.set(x0, y0 + r, color)\n self.set(x0, y0 - r, color)\n self.set(x0 + r, y0, color)\n self.set(x0 - r, y0, color)\n\n while x < y:\n if f >= 0:\n y -= 1\n ddF_y += 2\n f += ddF_y\n x += 1\n ddF_x += 2\n f += ddF_x\n\n self.set(x0 + x, y0 + y, color)\n self.set(x0 - x, y0 + y, color)\n self.set(x0 + x, y0 - y, color)\n self.set(x0 - x, y0 - y, color)\n self.set(x0 + y, y0 + x, color)\n self.set(x0 - y, y0 + x, color)\n self.set(x0 + y, y0 - x, color)\n self.set(x0 - y, y0 - x, color)",
"def __drawCircle(self, center, radius, color, drawwidth=1):\n radius *= self.viewZoom\n if radius < 1: radius = 1\n else: radius = int(radius)\n\n pygame.draw.circle(self.screen, color, center, radius, drawwidth)",
"def draw_circle(self, x, y, radius, color=Color['white']):\n pygame.draw.circle(self.display, color, (x, y), radius)",
"def circle(self, center, radius, color=(255, 255, 255), width=0):\n center = self._transform(center)\n pygame.draw.circle(self.screen, color, center, radius, width)",
"def draw_circle(self, color, center, radius, width):\n _c = self.T.itrans(center)\n pg.draw.circle(self.screen, color, _c(), radius, width)",
"def DrawCircle(self, center, radius, color, drawwidth=1):\r\n radius *= self.zoom\r\n if radius < 1:\r\n radius = 1\r\n else: radius = int(radius)\r\n\r\n pygame.draw.circle(self.surface, color.bytes, center, radius, drawwidth)",
"def circle(self, x, y, r, solid = False):\n px = 0\n py = r\n d = 1 - 2 * r\n err = 0\n while py >= 0:\n if solid:\n for i in range(x - px, x + px + 1):\n self.pixel(i, y + py, 1)\n self.pixel(i, y - py, 1)\n else:\n self.pixel(x + px, y + py, 1)\n self.pixel(x + px, y - py, 1)\n self.pixel(x - px, y + py, 1)\n self.pixel(x - px, y - py, 1)\n err = 2 * (d + py) - 1\n if d < 0 and err <= 0:\n px += 1\n d += 2 *px + 1\n else:\n err = 2 * (d - px) - 1\n if d > 0 and err > 0:\n py -= 1\n d += 1 - 2 * py\n else:\n px += 1\n d += 2 * (px - py)\n py -= 1",
"def drawCircle(r):\r\n # create a turtle-painter instance using turtle library\r\n painter = turtle.Turtle()\r\n\r\n # turtle properties (we want the turtle to look nicer)\r\n painter.shape(\"turtle\") # setting painter shape to turtle\r\n painter.shapesize(3,3,1) # making turtle-painter 3 times bigger\r\n painter.color(\"limegreen\") # setting painting color to limegreen\r\n\r\n # move the turtle-painter to ready position\r\n painter.pu() # we just move without drawing anything\r\n x0 = coordX(r, 0) # compute initial coordinate x0\r\n y0 = coordY(r, 0) # compute initial coordinate y0\r\n\r\n painter.goto(x0,y0) # move the turtle to the ready position\r\n \r\n # tell the turtle to put pencil down on the paper\r\n painter.pd()\r\n\r\n # draw a circle\r\n for theta in range(0, 361, 1):\r\n x = coordX(r, theta, useradians = False)\r\n y = coordY(r, theta, useradians = False)\r\n\r\n painter.goto(x,y)\r\n\r\n # tell the turtle to put pencil up from the paper\r\n painter.pu()\r\n # hide the painter after he finished to draw\r\n painter.ht()\r\n print(\"Draw a circle of r = \", r )",
"def DrawSolidCircle(self, center, radius, axis, color):\r\n radius *= self.zoom\r\n if radius < 1:\r\n radius = 1\r\n else: radius = int(radius)\r\n\r\n pygame.draw.circle(self.surface, (color/2).bytes+[127],\r\n center, radius, 0)\r\n pygame.draw.circle(self.surface, color.bytes, center, radius, 1)\r\n pygame.draw.aaline(self.surface, (255, 0, 0), center,\r\n (center[0] - radius*axis[0], center[1] +\r\n radius*axis[1]))",
"def _circle(i, r=.05):\n\treturn Circle((i, 0), r, fill=True, color='black')",
"def plot_circle(r,**kw):\n try:\n fmt = kw.pop('fmt')\n except:\n fmt='k'\n try:\n label = kw.pop('label')\n except:\n label = None\n x = num.arange(-r,r+0.01,0.01)\n y = num.sqrt(num.fabs(r**2. - x**2.))\n pyplot.plot(x,y,fmt,**kw)\n pyplot.plot(x,-y,fmt,label=label,**kw)",
"def draw_circle(color, position, radius, width=0):\n #print('(color={}, position={}, radius={}, width={})')\n pygame.draw.circle(screen, color, position, radius, width)",
"def draw_circle(self, color, position, radius, width = 0, anchor= 'topleft'):\n color = spyral.color._determine(color)\n offset = self._calculate_offset(anchor)\n pygame.draw.circle(self._surf, color, position + offset, radius, width)",
"def draw_circle(centerx, centery, radius):\r\n global _canvas\r\n global _current_color\r\n if _canvas == None:\r\n raise RuntimeError(\"Canvas is not open yet.\")\r\n else:\r\n circle = Circle()\r\n circle.move(centerx, centery)\r\n circle.setRadius(radius)\r\n _set_not_filled(circle)\r\n _canvas.add(circle)",
"def drawCircle(t, x, y, radius):\r\n t.up()\r\n t.goto(x + radius, y)\r\n t.setheading(90)\r\n t.down()\r\n for count in range(120):\r\n t.left(3)\r\n t.forward(2.0 * math.pi * radius / 120.0)",
"def create_circle(self, x, y, r, **kwargs):\n return self.create_oval(*self.circ_to_oval(x, y, r), **kwargs)",
"def draw_circle_filled(center_x, center_y, radius, color):\n width = radius\n height = radius\n draw_ellipse_filled(center_x, center_y, width, height, color)",
"def circle(radius, center, dim):\n kern = np.zeros(shape=(radius*2,radius*2))\n kern[draw.circle(r=radius, c=radius, radius=radius)] = 1\n return kern",
"def draw_circle(self, center, radius, line_width, line_color, fill_color=\"\"):\n line_color, fill_color = check_color(line_color), check_color(fill_color)\n SToval.oval(self.canvas, center, radius, line_width, line_color, fill_color)",
"def draw_circle(t, circle):\n t.pu()\n t.goto(circle.center.x, circle.center.y)\n t.pd()\n polygon.circle(t, circle.radius)",
"def circle(self, center, rad):\n self.gc.show_circles(center[0], center[1], rad, facecolor='none', edgecolor=self.color, linewidth=0.5)",
"def circle(self, pos, radius, draw=None, fill=\"black\", lw=0, options=None, kwoptions=None):\n\n fill = norm_colour(fill)\n self.use_colour(fill)\n\n draw = norm_colour(draw)\n if draw is None:\n draw = fill\n self.use_colour(draw)\n\n self._commands.append(rf\"\\filldraw[line width={lw},\"\n rf\"{fmt_options(options, kwoptions, draw=draw, fill=fill)}] \"\n rf\" {fmt_point(pos)} circle ({radius});\")",
"def circle(self, center_x, center_y, radius, color):\n x = radius - 1\n y = 0\n d_x = 1\n d_y = 1\n err = d_x - (radius << 1)\n while x >= y:\n self.pixel(center_x + x, center_y + y, color)\n self.pixel(center_x + y, center_y + x, color)\n self.pixel(center_x - y, center_y + x, color)\n self.pixel(center_x - x, center_y + y, color)\n self.pixel(center_x - x, center_y - y, color)\n self.pixel(center_x - y, center_y - x, color)\n self.pixel(center_x + y, center_y - x, color)\n self.pixel(center_x + x, center_y - y, color)\n if err <= 0:\n y += 1\n err += d_y\n d_y += 2\n if err > 0:\n x -= 1\n d_x += 2\n err += d_x - (radius << 1)",
"def circle(self, x, y, r, cls=None, style=None):\n x, y, r = self._meta.units(x, y, r)\n cls_str = 'class=\"%s\" ' % cls if cls else ''\n style_str = 'style=\"%s\" ' % self._meta.make_style(style) if style else ''\n self.elements.append(\"\"\"\n <circle cx=\"%s\" cy=\"%s\" r=\"%s\" %s%s/>\n \"\"\".strip() % (\n x, y, r, cls_str, style_str\n ))\n return self",
"def circle(self, x, y, r, cls=None, style=None):\n x, y, r = self._meta.units(x, y, r)\n cls_str = 'class=\"%s\" ' % cls if cls else ''\n style_str = 'style=\"%s\" ' % self._meta.make_style(style) if style else ''\n self.elements.append(\"\"\"\n <circle cx=\"%s\" cy=\"%s\" r=\"%s\" %s%s/>\n \"\"\".strip() % (\n x, y, r, cls_str, style_str\n ))\n return self",
"def draw_circle_outline(center_x, center_y, radius, color, border_width=1):\n width = radius\n height = radius\n draw_ellipse_outline(center_x, center_y, width, height,\n color, border_width)"
] | [
"0.8421582",
"0.78745735",
"0.78179467",
"0.78179467",
"0.7747963",
"0.7729969",
"0.77170885",
"0.76944184",
"0.7692904",
"0.7636455",
"0.7619516",
"0.7604409",
"0.74545527",
"0.7423694",
"0.7403186",
"0.7349896",
"0.73346525",
"0.732114",
"0.7312092",
"0.7307844",
"0.729076",
"0.7290352",
"0.72844625",
"0.7284075",
"0.72728354",
"0.7221165",
"0.7216204",
"0.7195231",
"0.7195231",
"0.7181659"
] | 0.8723447 | 0 |
Fills in the current drawing, but retains state. Normally, an object is not filled until you set the state to False. Calling this method executes this fill, without setting the state to False. If fill is False, this method does nothing. | def flush(self):
if self.fill:
self._turtle.fill(False)
self._turtle.fill(True) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setFilled(self, fill):\n isFilled = fill\n repaint()",
"def fill(self):\n return self._turtle.fill()",
"def update_fill(self, event):\r\n\r\n if event.type == 'FILL':\r\n self.update_positions_from_fill(event)\r\n self.update_holdings_from_fill(event)",
"def update_fill(self, event):\n if event.type == 'FILL':\n self.update_positions_from_fill(event)\n self.update_holdings_from_fill(event)",
"def update_fill(self, event):\n if event.type == 'FILL':\n self.update_positions_from_fill(event)\n self.update_holdings_from_fill(event)",
"def setFill(self, fill):\n self.area_show = fill",
"def fill():\n # Switch in edit mode\n bpy.ops.object.mode_set(mode = 'EDIT')\n \n # Fill hole\n bpy.ops.mesh.fill()",
"def _switch(self):\n self.fill= not self.fill",
"def clear(self, fill = 0x00):\n self._buffer = [ fill ] * ( self.width * self.height )",
"def update_fill(self, event):\n if event.type == 'FILL':\n self.update_positions_from_fill(event)\n self.update_prices_from_fill(event)\n self.update_holdings_from_fill(event)",
"def end_fill():\n turtleTmp.end_fill()",
"def setPointFill(self, fill):\n for point in self.points:\n point.fill = fill",
"def on_draw(self):\n self.clear()\n self.gamestatemanager.peek().on_draw(self.get_size())",
"def draw(self):\n self._group.set_state_recursive()\n self._vertex_list.draw(self._draw_mode)\n self._group.unset_state_recursive()",
"def write_fill(self, fill: FillFormat):\n if self.fill_type is not None:\n self._write_fill_type(fill)",
"def on_draw(self, da, ctx):\n self.referee.get_current_state().draw(ctx)",
"def clear(self):\r\n\t\tself.grid.fill(False)",
"def filled(self, fill_value):\n sdata = self.data\n new_data = numpy.ma.filled(sdata, fill_value=fill_value)\n if new_data == sdata:\n return self\n else:\n return type(self)(new_data, self.bset)",
"def draw(self, surface):\n checked_color = (0, 196, 0) if self.checked else pg.Color(\"white\")\n surface.fill(pg.Color(\"black\"), self.rect)\n surface.fill(self.color, self.rect.inflate(-2,-2))\n surface.fill(pg.Color(\"white\"), self.rect.inflate(-6,-6))\n surface.fill((205,205,205), self.rect.inflate(-8,-8))\n surface.fill(checked_color, self.select_rect)",
"def fill(layer, event):\n # on press\n layer.fill(layer.coordinates, layer._value, layer.selected_label)",
"def filled(\n self,\n filled: FillReturn,\n fill_type: FillType,\n ax: figure | int = 0,\n color: str = \"C0\",\n alpha: float = 0.7,\n ) -> None:\n fig = self._get_figure(ax)\n color = self._convert_color(color)\n xs, ys = filled_to_bokeh(filled, fill_type)\n if len(xs) > 0:\n fig.multi_polygons(xs=[xs], ys=[ys], color=color, fill_alpha=alpha, line_width=0)",
"def on_draw(self):\n self.clear()\n self.manager.draw()",
"def fill(self, *args, **kwargs):\n closed = kwargs.pop('closed', True)\n return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)",
"def fill(self, *args, **kwargs):\n closed = kwargs.pop('closed', True)\n return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)",
"def fill(self, *args, **kwargs):\r\n closed = kwargs.pop('closed', True)\r\n return super(RadarAxes, self).fill(closed=closed, *args, **kwargs)",
"def draw(self):\n arcade.draw_xywh_rectangle_filled(\n self.x, self.y, self.width, self.height, self.fill.color\n )\n arcade.draw_xywh_rectangle_outline(\n self.x, self.y, self.width, self.height, self.pen.color, 3\n )",
"def _redraw(self, render_as_done: \"bool\" = False) -> \"None\":\n if not self.drawn:\n cast(\"Application\", super())._redraw(render_as_done=True)\n self.drawn = True",
"def fill(self, color):",
"def fill_px(self, fill_px):\n\n self._fill_px = fill_px",
"def fill(self, color):\n self.fill_rect(0, 0, self.width, self.height, color)"
] | [
"0.76267654",
"0.7002292",
"0.6535505",
"0.6509827",
"0.6509827",
"0.6482051",
"0.64610213",
"0.63605654",
"0.6331894",
"0.6112397",
"0.6048987",
"0.59302145",
"0.58996844",
"0.5897555",
"0.5853862",
"0.57472944",
"0.5747061",
"0.5738132",
"0.5734731",
"0.5734146",
"0.57322496",
"0.5723832",
"0.5720785",
"0.5720785",
"0.57183105",
"0.571228",
"0.5708237",
"0.56902003",
"0.568447",
"0.567119"
] | 0.70235515 | 1 |
hgtStartData is the source data from the NASA JPL topological data | def __init__(self, hgtStartData):
self.data = []
for row in hgtStartData:
toAdd = []
for height in row:
toAdd.append([height, 0])
self.data.append(toAdd)
self.maxX = len(hgtStartData[0]) - 1
self.maxY = len(hgtStartData) - 1
self.minFloodHeight = 0 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _compute_single_source_data(self, start, end):\n single_source_data = {}\n dijkstra = Dijkstra(self.graph_provider)\n single_source_data['start'] = dijkstra.single_source(start)\n single_source_data['end'] = dijkstra.single_source(end)\n\n self._single_source_data = single_source_data",
"def test_data_source_soaps_id_head(self):\n pass",
"def extract_head(data):\n tl = data['tls'][data['i']];\n br = data['brs'][data['i']];\n head = extract_area(data,(tl,br));\n return head;",
"def getOLAPSource():",
"def get_start_delta(target_horizon, gt_id=\"contest_tmp2m\"):\n return get_measurement_lag(gt_id) + get_forecast_delta(target_horizon)",
"def mdsData(shotno=None,\n\t\t\tdataAddress=['\\HBTEP2::TOP.DEVICES.SOUTH_RACK:CPCI_10:INPUT_94',\n\t\t\t\t\t\t '\\HBTEP2::TOP.DEVICES.SOUTH_RACK:CPCI_10:INPUT_95'],\n\t\t\ttStart=[],tStop=[]):\t\t\t\n\t\t\n\t# convert dataAddress to a list if it not one originally \n\tif type(dataAddress) is not list:\n\t\tdataAddress=[dataAddress];\n\t\t\n#\t# if shotno == -1, use the latest shot number\n#\tif shotno==-1:\n#\t\tshotno=latestShotNumber()\n\t\t\n\t# init arrays\n\ttime = []\n\tdata = []\n\t\t\n\t# check if computer is located locally or remotely. The way it connects to spitzer remotely can only use one method, but locally, either method can be used. \n\tif _ON_HBTEP_SERVER==True: # if operating local to the tree\n\t\t# converted from Ian's code\n\t\t\n\t\ttree = _mds.Tree('hbtep2', shotno) \n\t\tfor i in range(0,len(dataAddress)):\n\n\t\t\tnode = tree.getNode(dataAddress[i])\t\t\t#Get the proper node\t\n\t\t\tdata.append(node.data())\t\t\t \t \t#Get the data from this node \n\t\tif type(data[0]) is _np.ndarray: # if node is an array, return data and time\n\t\t\ttime = node.dim_of().data()\t\t\n\n\t\n\telse: # operaeting remotely\n\t\n\t\t# if shotno is specified, this function gets its own mdsConn\n\t\tif type(shotno) is float or type(shotno) is int or type(shotno) is _np.int64:\n\t\t\tmdsConn=_initRemoteMDSConnection(shotno);\n\n\t\tfor i in range(0,len(dataAddress)):\n\t\t\tdata.append(mdsConn.get(dataAddress[i]).data())\n\t\t\n\t\t# if data is an array, also get time\n\t\tif type(data[0]) is _np.ndarray:\n\t\n\t\t\ttime = mdsConn.get('dim_of('+dataAddress[0]+')').data(); # time assocated with data\n \n\tif time != [] and type(tStop)!=list:\n\t\t# trim time and data\n\t\ttime,data= _trimTime(time,data,tStart,tStop)\n\t\t\n\tif time != []:\n\t\treturn data, time\n\telse: \n\t\treturn data",
"def get_start_state_data(start_state: int, states: [State]) -> tuple:\n first_node = 0\n for state in states:\n if state.trigs:\n for trig in state.trigs:\n if trig.source == start_state:\n first_node = trig.target\n return (get_state_by_id(states, first_node, \"new\").new_id, get_state_by_id(states, first_node, \"old\").y,\n (get_state_by_id(states, first_node, \"new\").x - 2))",
"def horde_start(self, observation):",
"def soho_load(dataset, startdate, enddate, path=None, resample=None, pos_timestamp=None, max_conn=5):\n if not (pos_timestamp=='center' or pos_timestamp=='start' or pos_timestamp is None):\n raise ValueError(f'\"pos_timestamp\" must be either None, \"center\", or \"start\"!')\n\n if dataset == 'SOHO_COSTEP-EPHIN_L2-1MIN':\n df, metadata = soho_ephin_loader(startdate, enddate, resample=resample, path=path, all_columns=False, pos_timestamp=pos_timestamp)\n else:\n trange = a.Time(startdate, enddate)\n cda_dataset = a.cdaweb.Dataset(dataset)\n try:\n result = Fido.search(trange, cda_dataset)\n filelist = [i[0].split('/')[-1] for i in result.show('URL')[0]]\n filelist.sort()\n if path is None:\n filelist = [sunpy.config.get('downloads', 'download_dir') + os.sep + file for file in filelist]\n elif type(path) is str:\n filelist = [path + os.sep + f for f in filelist]\n downloaded_files = filelist\n\n for i, f in enumerate(filelist):\n if os.path.exists(f) and os.path.getsize(f) == 0:\n os.remove(f)\n if not os.path.exists(f):\n downloaded_file = Fido.fetch(result[0][i], path=path, max_conn=max_conn)\n\n # downloaded_files = Fido.fetch(result, path=path, max_conn=max_conn) # use Fido.fetch(result, path='/ThisIs/MyPath/to/Data/{file}') to use a specific local folder for saving data files\n # downloaded_files.sort()\n data = TimeSeries(downloaded_files, concatenate=True)\n df = data.to_dataframe()\n\n metadata = _get_metadata(dataset, downloaded_files[0])\n\n # remove this (i.e. following lines) when sunpy's read_cdf is updated,\n # and FILLVAL will be replaced directly, see\n # https://github.com/sunpy/sunpy/issues/5908\n # df = df.replace(-1e+31, np.nan) # for all fluxes\n # df = df.replace(-2147483648, np.nan) # for ERNE count rates\n # 4 Apr 2023: previous 2 lines removed because they are taken care of with sunpy\n # 4.1.0:\n # https://docs.sunpy.org/en/stable/whatsnew/changelog.html#id7\n # https://github.com/sunpy/sunpy/pull/5956\n\n # careful!\n # adjusting the position of the timestamp manually.\n # requires knowledge of the original time resolution and timestamp position!\n if pos_timestamp == 'center':\n if (dataset.upper() == 'SOHO_ERNE-HED_L2-1MIN' or\n dataset.upper() == 'SOHO_ERNE-LED_L2-1MIN' or\n dataset.upper() == 'SOHO_COSTEP-EPHIN_L3I-1MIN'):\n df.index = df.index+pd.Timedelta('30s')\n if dataset.upper() == 'SOHO_CELIAS-PM_30S':\n df.index = df.index+pd.Timedelta('15s')\n if pos_timestamp == 'start':\n if dataset.upper() == 'SOHO_CELIAS-SEM_15S':\n df.index = df.index-pd.Timedelta('7.5s')\n\n if isinstance(resample, str):\n df = resample_df(df, resample, pos_timestamp=pos_timestamp)\n except (RuntimeError, IndexError):\n print(f'Unable to obtain \"{dataset}\" data!')\n downloaded_files = []\n df = []\n metadata = []\n return df, metadata",
"def __init__(self, data_source, min_sup=MIN_SUPPORT, eq=False):\n self.thd_supp = min_sup\n \"\"\":type thd_supp: float\"\"\"\n self.equal = eq\n \"\"\":type eq: bool\"\"\"\n self.titles, self.data = DataGP.read(data_source)\n \"\"\":type titles: ndarray\"\"\"\n \"\"\":type data: ndarray\"\"\"\n self.row_count, self.col_count = self.data.shape\n self.time_cols = self.get_time_cols()\n self.attr_cols = self.get_attr_cols()\n self.valid_bins = np.array([])\n self.no_bins = False\n self.step_name = '' # For T-GRAANK\n self.attr_size = 0 # For T-GRAANK",
"def hcgps(data_src, min_supp=MIN_SUPPORT, max_iteration=MAX_ITERATIONS, step_size=STEP_SIZE, return_gps=False):\n # Prepare data set\n d_set = DataGP(data_src, min_supp)\n d_set.init_attributes()\n attr_keys = [GI(x[0], x[1].decode()).as_string() for x in d_set.valid_bins[:, 0]]\n\n if d_set.no_bins:\n return []\n\n # Parameters\n it_count = 0\n var_min = 0\n counter = 0\n var_max = int(''.join(['1'] * len(attr_keys)), 2)\n eval_count = 0\n\n # Empty Individual Template\n best_sol = structure()\n candidate = structure()\n\n # Best Cost of Iteration\n best_costs = np.empty(max_iteration)\n best_patterns = []\n str_best_gps = list()\n str_iter = ''\n str_eval = ''\n repeated = 0\n\n # generate an initial point\n best_sol.position = None\n # candidate.position = None\n if best_sol.position is None:\n best_sol.position = np.random.uniform(var_min, var_max, N_VAR)\n # evaluate the initial point\n apply_bound(best_sol, var_min, var_max)\n best_sol.cost = costfxn(best_sol.position, attr_keys, d_set)\n\n # run the hill climb\n while counter < max_iteration:\n # while eval_count < max_evaluations:\n # take a step\n candidate.position = None\n if candidate.position is None:\n candidate.position = best_sol.position + (random.randrange(var_min, var_max) * step_size)\n apply_bound(candidate, var_min, var_max)\n candidate.cost = costfxn(candidate.position, attr_keys, d_set)\n\n if candidate.cost < best_sol.cost:\n best_sol = candidate.deepcopy()\n eval_count += 1\n str_eval += \"{}: {} \\n\".format(eval_count, best_sol.cost)\n\n best_gp = validategp(d_set, decodegp(attr_keys, best_sol.position))\n \"\"\":type best_gp: GP\"\"\"\n is_present = isduplicate(best_gp, best_patterns)\n is_sub = amcheck(best_patterns, best_gp, subset=True)\n if is_present or is_sub:\n repeated += 1\n else:\n if best_gp.support >= min_supp:\n best_patterns.append(best_gp)\n str_best_gps.append(best_gp.print(d_set.titles))\n\n try:\n # Show Iteration Information\n # Store Best Cost\n best_costs[it_count] = best_sol.cost\n str_iter += \"{}: {} \\n\".format(it_count, best_sol.cost)\n except IndexError:\n pass\n it_count += 1\n\n if max_iteration == 1:\n counter = repeated\n else:\n counter = it_count\n # Output\n out = json.dumps({\"Algorithm\": \"LS-GRAD\", \"Best Patterns\": str_best_gps, \"Iterations\": it_count})\n \"\"\":type out: object\"\"\"\n if return_gps:\n return out, best_patterns\n else:\n return out",
"def get_tmin(self):\n tmin = min(sorted(self.srcData.keys()))\n return tmin",
"def get_start(network, road_id):\n return network[0][road_id][0]",
"def start(self):\n self.log.setLevel(logging.INFO)\n super().start()\n \n self._dts = rift.tasklets.DTS(self.tasklet_info,\n UtCompositeYang.get_schema(),\n self._loop,\n self.on_dts_state_change) \n\n # Set the instance id\n self.instance_name = self.tasklet_info.instance_name\n self.instance_id = int(self.instance_name.rsplit('-', 1)[1])\n self.log.debug(\"Starting TestDriverTasklet Name: {}, Id: {}\".format(\n self.instance_name,\n self.instance_id))\n\n self.state = TaskletState.STARTING",
"def get_data(station,starttime,endtime,activity=False,\n rep='/GNOMEDrive/gnome/serverdata/',resample=None):\n setname = \"MagneticFields\"\n dstr = ['%Y','%m','%d','%H','%M']\n dsplit = '-'.join(dstr[:starttime.count('-')+1])\n start = datetime.strptime(starttime,dsplit)\n starttime = construct_utc_from_metadata(start.strftime(\"%Y/%m/%d\"),\n start.strftime(\"%H:%M:%S.%d\"))\n dsplit = '-'.join(dstr[:endtime.count('-')+1])\n end = datetime.strptime(endtime,dsplit)\n endtime = construct_utc_from_metadata(end.strftime(\"%Y/%m/%d\"),\n end.strftime(\"%H:%M:%S.%d\"))\n dataset = []\n for date in numpy.arange(start,end,timedelta(minutes=1)):\n date = date.astype(datetime)\n path1 = rep+station+'/'+date.strftime(\"%Y/%m/%d/\")\n path2 = station+'_'+date.strftime(\"%Y%m%d_%H%M*.hdf5\")\n fullpath = os.path.join(path1,path2)\n dataset += glob.glob(fullpath)\n if len(dataset)==0:\n print \"ERROR: No data files were found...\"\n quit()\n file_order,data_order = {},{}\n for fname in dataset:\n hfile = h5py.File(fname, \"r\")\n segfile = file_to_segment(hfile,setname)\n file_order[segfile] = fname\n data_order[segfile] = hfile\n # Extract sample rate from metadata of last read data file\n sample_rate = hfile[setname].attrs[\"SamplingRate(Hz)\"]\n # Estimate full segment activity list\n activity = create_activity_list(station,data_order)\n # Generate an ASCII representation of the GPS timestamped\n # segments of time covered by the input data\n seglist = segmentlist(data_order.keys())\n # Sort the segment list\n seglist.sort()\n # Create list of time series from every segment\n ts_list = generate_timeseries(file_order,setname)\n # Retrieve channel data for all the segments\n full_data = numpy.hstack([retrieve_channel_data(data_order[seg],setname)\n for seg in seglist])\n new_sample_rate = sample_rate if resample==None else resample\n new_data_length = len(full_data)*new_sample_rate/float(sample_rate)\n full_data = scipy.signal.resample(full_data,int(new_data_length))\n # Models a time series consisting of uniformly sampled scalar values\n ts_data = types.TimeSeries(full_data,delta_t=1./new_sample_rate,\n epoch=seglist[0][0])\n for v in data_order.values():\n v.close()\n return ts_data,ts_list,activity,int(starttime),int(endtime)",
"def start_time(self) -> datetime:\n return self.root_hartree.start_time",
"def get_source(source, data):\n\n # source = 'NCv1.143'\n z = data[source]['z']\n line_width = data[source]['line_width']\n delta_v = 1 * kms # do not care actually, fully degenerate with\n # the column density\n\n # selecting only CO lines\n keys = [key for key in data[source].keys()\n if 'CO' in key and 'eCO' not in key]\n CO_data = Table(np.asarray([(Jlow + 1, data[source][key], data[source]['e' + key])\n for Jlow, key in enumerate(keys)\n if np.isfinite(data[source][key])]),\n names=['Jup', 'flux', 'eflux'],\n dtype=[int, float, float])\n\n Jup = CO_data['Jup'].data\n flux = CO_data['flux'].data * Jykms\n eflux = CO_data['eflux'].data * Jykms\n\n return z, line_width, Jup, flux, eflux",
"def get_start_node(self):\n return self._start",
"def make_source_dataset(self, current_host_index, num_hosts):\n pass",
"def prepare_data():\n #data, label = load_ta_data(), load_ta_target()\n data, label = load_own_data(), load_own_target()\n tra_x, tst_x = split_samples(data)\n tra_y, tst_y = split_samples(label)\n return (tra_x, tst_x, tra_y, tst_y)",
"def insert_start(self, data):\n\n if self.head is None:\n self.head = ListNode(data)\n else:\n temp = self.head\n self.head = ListNode(data)\n self.head.next = temp",
"def getObservationStart(vis, obsid=-1, verbose=False):\n if (os.path.exists(vis) == False):\n print \"vis does not exist = %s\" % (vis)\n return\n if (os.path.exists(vis+'/table.dat') == False):\n print \"No table.dat. This does not appear to be an ms.\"\n print \"Use au.getObservationStartDateFromASDM().\"\n return\n mytb = createCasaTool(tbtool)\n try:\n mytb.open(vis+'/OBSERVATION')\n except:\n print \"ERROR: failed to open OBSERVATION table on file \"+vis\n return(3)\n time_range = mytb.getcol('TIME_RANGE')\n mytb.close()\n if verbose: print \"time_range: \", str(time_range)\n # the first index is whether it is starttime(0) or stoptime(1) \n time_range = time_range[0]\n if verbose: print \"time_range[0]: \", str(time_range)\n if (obsid >= len(time_range)):\n print \"Invalid obsid\"\n return\n if obsid >= 0:\n time_range = time_range[obsid]\n elif (type(time_range) == np.ndarray):\n time_range = np.min(time_range)\n return(time_range)",
"def initialize_data(self , station = '', datasets = {} ):\n \n self.datasets = datasets\n self.datasets_keys = datasets.keys()\n self.station = station\n \n data = {} # container for the data of each dataset\n source_configuration = {} # container for the source_configuration of each dataset\n \n\n \n \"\"\" Looping over the datasets \"\"\"\n logging.info('*** Reading and Initializing the data from the netCDF files ')\n \n \n for k,v in datasets.items() :\n logging.info(' Initialising the dataset: *** %s ' , k )\n data[k] = {} \n data['cdm_tables'] = {} \n \n ### alternative with xarray \n #ds = xr.load_dataset(v) \n #observations_table = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n ### alternative with netCDF4\n #ds = nc.Dataset(v) \n #data[k]['dateindex'] = ds.variables['dateindex'][0,:] # storing the dateindex \n \n ###for h5py but cant extract date time units !!!\n ds = h5py.File(v , driver=\"core\" ) \n data[k]['df'] = ds # storing the entire file \n try: \n data[k]['source_file'] = ds['source_configuration']['source_file'][0]\n except:\n data[k]['source_file'] = str(v) # temp fix \n \n #data[k]['product_code'] = ds['source_configuration']['product_code'][0] \n #data[k]['recordtimestamp'] = ds['recordtimestamp'].value\n #data[k]['recordindex'] = ds['recordindex'].value \n #ds.close() \n logging.debug('Reading the file with h5py ')\n \n \n # add here appending datasets for the case of ncar_w and ncar_t \n \n \n self.data = data\n self.make_dataframe()\n ds.close()\n \n \"\"\" Reading the header_table, station_configuration, source_configuration \"\"\"\n for k,v in datasets.items() : \n \n #d = xr.open_dataset(v , engine = 'h5netcdf' ) \n #data[k]['recordtimestamp'] = d['recordtimestamp'].values\n #data[k]['recordindex'] = d['recordindex'].values \n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'station_configuration') \n data[k]['station_configuration'] = d.to_dataframe() \n #data[k]['station_configuration'] = d ### USELESS ? \n logging.debug('Done with %s station_configuration' , str(k) )\n \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'header_table') \n logging.debug('Loading the header_table') \n if 'header_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['header_table'] = {}\n for var in d.variables:\n self.attributes['header_table'][var] = {}\n self.attributes['header_table'][var]['description'] = d[var].description\n self.attributes['header_table'][var]['external_table'] = d[var].external_table \n data[k]['header_table'] = d.to_dataframe() \n logging.debug('Done with %s ' , k )\n \n logging.info(\"*** Loading the observations_table (might take time) %s\" , k ) \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'observations_table') \n \n if 'observations_table' not in list( self.attributes.keys() ): # saving the attributes to be re-applied at the end\n self.attributes['observations_table'] = {}\n for var in d.variables:\n self.attributes['observations_table'][var] = {}\n self.attributes['observations_table'][var]['description'] = d[var].description\n self.attributes['observations_table'][var]['external_table'] = d[var].external_table\n \n \n logging.info(\"*** Loading the source configuration %s\" , k ) \n try: \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'source_configuration')\n d = d.isel(hdrlen=[0])\n data[k]['source_configuration'] = d.to_dataframe() ### USELESS ? \n logging.debug('Done with %s source_configuration' , k )\n except: \n data[k]['source_configuration']= pd.DataFrame(np.array( [ [ self.data[k]['source_file'] ] ] ) , columns=['source_file'] ) \n \n if k == 'era5_1': # reading the whole era5_1 feedback (including reanalysis)\n d = xr.open_dataset(v , engine = 'h5netcdf' , group = 'era5fb') \n data[k]['era5fb'] = d.to_dataframe() \n logging.debug('Done with %s era5 feedback ', k )\n \n \"\"\" Reading the CDM tables that do not depend on specific stations or observations (fixed values), for the first file only \"\"\" \n if list(datasets.keys()).index(k) == 0 :\n for t in [ 'crs' , 'observed_variable', 'units' , 'z_coordinate_type' , 'station_type']: \n \n d = xr.open_dataset(v , engine = 'h5netcdf' , group = t) \n #data['cdm_tables'][t] = d.to_dataframe() ### USELESS ?\n data['cdm_tables'][t] = d \n \n d.close() \n ds.close()\n\n \"\"\" Reading the name of the original source file \"\"\"\n source_configuration[k] = {} \n source_configuration[k]['source_file'] = [ c for c in v.split('/') if '.nc' in c][0]\n\n \n \"\"\" Storing the station configurations \"\"\" \n self.source_configuration = source_configuration \n \n \"\"\" Making all date_times \"\"\" \n self.make_all_datetime()\n \n \n \"\"\" feedback columns \"\"\"\n if 'era5_1' in list (self.data.keys() ):\n self.fb_columns = list(self.data['era5_1']['era5fb'].columns ) \n else:\n self.fb_columns = ['empty']",
"def _get_start_params(self, start_params=None):\n if start_params is None:\n if hasattr(self, 'start_params'):\n start_params = self.start_params\n elif self.exog is not None:\n # fails for shape (K,)?\n start_params = [0] * self.exog.shape[1]\n else: # pragma: no cover\n raise ValueError(\"If exog is None, then start_params should \"\n \"be specified\")\n return start_params",
"def _pasrse_data_start_end(self, data):\n first = data['obs_time'].iloc[0]\n last = data['obs_time'].iloc[-1]\n\n return (first, last)",
"def format_start(self):\n logging.info(\" itr h => cost set troom droom tout dout = t rwd\")\n logging.info(\" %7.1f %4.1f %7.1f %7.1f %4.1f %4.1f\" % (\n self.state['heat_cost'],\n self.state['set_temp'],\n self.state['room_temp'],\n self.state['room_temp_change'],\n self.state['outside_temp'],\n self.state['outside_temp_change'],\n ))",
"def get_start_time(self):\n start = datetime.strptime(\n self.get_handler().SOURCE_START_DATE.split('.')[0],\n '%Y%m%d%H%M%S'\n )\n return start",
"def hgvs_start(self):\n try:\n return self.hp.parse(self.term).posedit.pos.start\n except hgvs.exceptions.HGVSParseError:\n # Log me\n # print(self.term)\n return None",
"def __init__(self, start_node):\n self.start_node = start_node",
"def test_parse_hgts_riatahgt(self):\n with open(self.riatahgt_output_hgt_fp, 'r') as f:\n output = parse_hgts(f, 'riata-hgt')\n self.assertEqual(int(output), 1)"
] | [
"0.55448586",
"0.54904854",
"0.5319753",
"0.5157752",
"0.515219",
"0.5077915",
"0.5074858",
"0.50618047",
"0.49980226",
"0.4995144",
"0.4934863",
"0.4929119",
"0.49235275",
"0.49122941",
"0.49056458",
"0.489825",
"0.48901126",
"0.4874309",
"0.4871687",
"0.48503634",
"0.4832765",
"0.4831706",
"0.48259488",
"0.4811187",
"0.48023063",
"0.4792749",
"0.4784197",
"0.4780106",
"0.47731048",
"0.47674128"
] | 0.580808 | 0 |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.