query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
sequencelengths 30
30
| negative_scores
sequencelengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
Shows a table of user tests. | def Index(request):
output = request.GET.get('o')
if output == 'gviz_table_data':
return http.HttpResponse(FormatUserTestsAsGviz(request))
else:
params = {
'height': '400px',
'width': 'auto',
'page_size': 20
}
return util.Render(request, 'user_tests_index.html', params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Table(request, key):\n test = models.user_test.Test.get_mem(key)\n if not test:\n msg = 'No test was found with test_key %s.' % key\n return http.HttpResponseServerError(msg)\n\n params = {\n 'hide_nav': True,\n 'hide_footer': True,\n 'test': test,\n }\n\n return util.GetResults(request, 'user_test_table.html', params,\n test.get_test_set())",
"def test_view_displays_all(self):\n set_up_one_user(self, 1, 0)\n login = self.client.login(username='test', password='2HJ1vRV0Z&3iD')\n response = self.client.get(reverse('index'))\n self.assertEqual(response.status_code, 200)\n self.assertEqual(str(response.context['user']), 'test')\n self.assertEqual(len(response.context['data']), 1)",
"def tests():\n\n\treturn render_template(\"testing.html\")",
"def test_display_all_users(self):\n self.assertEqual(User.display_users(), User.UserDetails)",
"def view_test(request):\n\n\tcontext_dict = {\n\t\t'title': 'All Students'\n\t}\n\treturn render(\n\t\trequest,\n\t\t'viewTest.html',\n\t\tcontext_dict\n\t)",
"def show_users():\n\n user = User(connection=connection, cursor=cursor)\n\n all_users = user.get_all_users()\n\n context = {\n 'all_users': all_users\n }\n\n return render_template('pages/tables/users.html', **context)",
"def test_user_list(self):\n response = self.client.get('/tests/dashboard/')\n self.assertEqual(response.status_code, 200)\n self.assertContains(response, 'testuser', html=True)\n self.assertContains(response, '[email protected]', html=True)",
"def tests():\n dates, times = report_date_time()\n return render_template('tests.html',\n unit_date=dates[0], unit_time=times[0],\n integ_date=dates[1], integ_time=times[1])",
"def test_list_user(self):\n pass",
"def test_user_get_topteams():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n r = client.get('/top/10')\n assert r.status_code == 200\n destroy_ctfd(app)",
"def test(request, table):\n\n username = request.session.get('username', False)\n error_json = {\"Error\": \"No user authenticated\"}\n\n if (username):\n context = {'username': username}\n if table == \"contains\":\n return render(request, 'MedTAG_sket_dock_App/test/test-contains.html', context)\n elif table == \"associate\":\n return render(request, 'MedTAG_sket_dock_App/test/test-annotation.html', context)\n elif table == \"annotate\":\n return render(request, 'MedTAG_sket_dock_App/test/test-mentions.html', context)\n elif table == 'linked':\n return render(request, 'MedTAG_sket_dock_App/test/test-linked.html', context)\n\n return JsonResponse(error_json)",
"def table():\n if \"username\" in session:\n return render_template(\"table.html\")\n return abort(401)",
"def users(accountable, query):\n users = accountable.users(query)\n headers = ['display_name', 'key']\n if users:\n rows = [[v for k, v in sorted(u.items()) if k in headers]\n for u in users]\n rows.insert(0, headers)\n print_table(SingleTable(rows))\n else:\n click.secho('No users found for query {}'.format(\n query\n ), fg='red')",
"def show_table():\n\n title_list = ('ID', 'Platform', 'Producer', 'Year', 'Elements')\n \n return table, title_list",
"def show_users():\r\n users = User.query.order_by(User.last_name,User.first_name).all()\r\n return render_template('list.html', users=users)",
"def print_mistakes_table():\n conn = sq.connect(host='localhost', user='root',\n password='student', database='quiz')\n cursor = conn.cursor()\n\n cursor.execute(\"select * from mistakes\")\n data = cursor.fetchall()\n\n table = PrettyTable()\n table.field_names = ['Question', 'Given Answer','User Given Answer']\n for row in data:\n table.add_row(row)\n conn.close()\n\n return table",
"def listusers():\n\n try:\n users = User.query.order_by(User.email).all()\n click.echo(\n tabulate(\n [\n [u.username, u.email, \"admin\" if u.is_admin else None]\n for u in users\n ]\n )\n )\n except OperationalError:\n click.echo(\"Tabela de usuários inexistente...\")",
"def show_table(table):\n # id: string\n # Unique and random generated (at least 2 special char()expect: ';'),\n # 2 number, 2 lower and 2 upper case letter)\n # title: string\n # manufacturer: string\n # price: number (dollars)\n # in_stock: number\n title_list = [\"ID\", \"Title\", \"Manufacturer\",\n \"Price\", \"Number in stock\"]\n ui.print_table(table, title_list)",
"def table():\n user = Staff.is_login()\n if user is None:\n return redirect(url_for('auth.login'))\n\n pengusulans = Pengusulan.get_by_staff(user.id)\n ranks = Pengusulan.calculate_averages(pengusulans)\n return render_template(\"pengusulan/table.html\", pengusulans=pengusulans, ranks=ranks, pengusulan_code=pengusulan_code, user=user)",
"def test_user_list(self):\r\n self._add_demo_import()\r\n params = {\r\n 'api_key': self.api_key\r\n }\r\n res = self.testapp.get('/api/v1/a/users/list',\r\n params=params,\r\n status=200)\r\n\r\n # we should get back dict of count, users.\r\n data = json.loads(res.body)\r\n\r\n self.assertEqual(\r\n 1, data.get('count'), \"There are none by default. \" + res.body)\r\n self.assertEqual(\r\n 'admin',\r\n data.get('users')[0]['username'],\r\n \"The first user is from admin \" + res.body)\r\n self.assertEqual(\r\n '[email protected]',\r\n data.get('users')[0]['email'],\r\n \"The first user is from [email protected] \" + res.body)",
"def test_show(self):\n\n with self.client as c:\n response = c.get(f\"/users/{self.testuser.id}\")\n\n self.assertEqual(response.status_code, 200)\n self.assertIn(\"@alice\", str(response.data))",
"def Dashboard(user=None):\n\n\tif user == None:\n\t\tuser= defaultUser\n\n\ttable = user.htmlTable(head=5)\n\t\n\n\tphysics_score = user.subjectAccuracy(\"Physics\")\n\tbiology_score = user.subjectAccuracy(\"Biology\")\n\n\tbiology_numerator = biology_score[1]\n\tbiology_denominator = biology_score[0]\n\tbiology_accuracy = int(np.round(biology_score[2], 2) * 100)\n\n\tphysics_numerator = physics_score[1]\n\tphysics_denominator = physics_score[0]\n\tphysics_accuracy = int(np.round(physics_score[2], 2) * 100)\n\n\ttotal_questions = biology_denominator + physics_denominator\n\n\n\twikifier_results = {}\n\twikifier_results[\"Oski\"] = \"https://en.wikipedia.org/wiki/Oski_the_Bear\"\n\twikifier_results[\"Mitosis\"] = \"https://en.wikipedia.org/wiki/Mitosis\"\n\twikifier_results[\"Gravity\"] = \"https://en.wikipedia.org/wiki/Gravity\"\n\n\treturn render_template('indexStudent.html', user=user.name, table=table, wikifier_results=wikifier_results, \n\t\tphysics_numerator = physics_numerator, physics_denominator = physics_denominator, physics_accuracy = physics_accuracy, \n\t\tbiology_accuracy = biology_accuracy, biology_numerator = biology_numerator, biology_denominator = biology_denominator, total_questions=total_questions)",
"def test_detail(self, client, users):\n user = users[0]\n url = reverse('users:detail', args=(user.pk,))\n response = client.get(url)\n assert response.status_code == 200\n assert user.username in str(response.content)",
"def test_db_page():\n create_test_object(db)\n test_objects = get_test_objects(db)\n return render_template(\"hello_db.html\", test_objects=test_objects)",
"def zio_test_help():\n print(\"zio-ut [TESTS]\")\n print(\"\")\n print(\"[TESTS]: list of tests to perform. It can be the name of a specific test, or the name of a module of tests\")\n print(\" In alternative, you che use the test code:\")\n print(\"Code test case\")\n print(\"- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \")\n i = 0\n for t in test_list:\n print(str(i) + \" \" + t)\n i = i + 1",
"def test_users_listed(self):\n url = reverse('admin:core_user_changelist')\n res = self.client.get(url)\n\n self.assertContains(res, self.user.name)\n self.assertContains(res, self.user.plan)",
"def test_show_index_signed_in(self):\n with self.client as c:\n with c.session_transaction() as sess:\n sess[CURR_USER_KEY] = self.user1.id\n\n res = c.get(\"/\")\n html = res.get_data(as_text=True)\n\n self.assertEqual(res.status_code, 200)\n self.assertIn('<nav class=\"navbar navbar-custom border-bottom border-light navbar-expand-md navbar-dark sticky-top\">', html)\n self.assertIn('<a href=\"/users/tester1\">tester1</a>', html)\n self.assertIn('All Lists', html)\n self.assertIn(DEFAULT_IMAGE_URL, html)",
"def test_list(self, client, users):\n url = reverse('users:list')\n response = client.get(url)\n assert response.status_code == 200\n for user in users:\n assert user.username in str(response.content)",
"def show_users():\n return 'hehe'",
"def display_tournament_list():\r\n for tournament in tournaments_table:\r\n print(tournament['Nom'])"
] | [
"0.80550736",
"0.68134075",
"0.65610844",
"0.653583",
"0.65127206",
"0.64837927",
"0.6434516",
"0.6418729",
"0.63465595",
"0.6317583",
"0.6233405",
"0.62315404",
"0.6187078",
"0.61481196",
"0.6106531",
"0.60995424",
"0.6074437",
"0.6015064",
"0.6000204",
"0.59634566",
"0.5962333",
"0.595508",
"0.5899354",
"0.589534",
"0.58729374",
"0.5872143",
"0.58696735",
"0.58026505",
"0.58013934",
"0.5779767"
] | 0.7154061 | 1 |
Sends an API request to run one's test page on WebPagetest.org. | def WebPagetest(request, key):
test = models.user_test.Test.get_mem(key)
if not test:
msg = 'No test was found with test_key %s.' % key
return http.HttpResponseServerError(msg)
current_user = users.get_current_user()
if (test.user.key().name() != current_user.user_id() and not
users.is_current_user_admin()):
return http.HttpResponse('You can\'t play with tests you don\'t own')
# Help users autorun their tests by adding autorun=1 to the test url.
test_url_parts = list(urlparse.urlparse(test.url))
test_url_query = dict(cgi.parse_qsl(test_url_parts[4]))
test_url_query.update({'autorun': '1'})
test_url_parts[4] = urllib.urlencode(test_url_query)
test_url = urlparse.urlunparse(test_url_parts)
# TODO(elsigh): callback url.
webpagetest_url = ('%s&url=%s¬ify=%s' %
(WEBPAGETEST_URL, test_url,
urllib.quote('[email protected]')))
webpagetests = {}
# See http://goo.gl/EfK1r for WebPagetest instructions.
for location in WEBPAGETEST_LOCATIONS:
url = '%s&location=%s' % (webpagetest_url, location)
response = urlfetch.fetch(url)
json = simplejson.loads(response.content)
webpagetests[location] = json
params = {
'test': test,
'webpagetests': webpagetests
}
return util.Render(request, 'user_test_webpagetest.html', params) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_doGet(self) -> None:\n\n status_code = apicall.doGet(URL, self._browserheader)\n print(\"in do get:\", status_code)\n assert status_code == API_SUCCESS",
"def test(base_url='http://localhost:8000/'):\n with env.cd(settings.PROJECT_PATH):\n # env.run('python rnacentral/apiv1/tests.py --base_url=%s' % base_url)\n env.run('python rnacentral/portal/tests/selenium_tests.py --base_url %s --driver=phantomjs' % base_url) # pylint: disable=C0301\n env.run('python rnacentral/apiv1/search/sequence/tests.py --base_url %s' % base_url) # pylint: disable=C0301",
"def test_api_use_websurfer_topup_post(self):\n body = Internet()\n response = self.client.open(\n '/api/use/websurfer-topup/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def GET_test(self, request):\n request.setHeader('Access-Control-Allow-Origin', settings.PDFCD_HEADER_VALUE)\n ip = apiutils.getIP(request)\n out.info('Test called (%s)\\n' % (ip))\n request.setResponseCode(*pdapi.getResponse(pdapi.OK))\n return \"SUCCESS\\n\"",
"def test_get_page(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)",
"def test_get_page(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)",
"def do_get(self, url):\n self.driver.get(url)",
"def test_http_request(self):\n\n response = requests.get(self.live_server_url)\n assert response.status_code == 200",
"def i_am_on_the_zoo_website():\n driver.get(\"http://www.thetestroom.com/webapp/\")",
"async def test_api_url_ending_with_index_html(self):\n self.set_source_parameter(\"url\", \"https://gatling/index.html\")\n response = await self.collect(get_request_json_return_value={})\n self.assert_measurement(response, api_url=\"https://gatling/js/stats.json\")",
"def test_simpleapp():\n class Handler(RequestHandler):\n def get(self):\n self.write('Hello')\n\n app = Application([url('/hello', Handler)])\n\n with Tester(app) as tester:\n response = yield tester.http_client.fetch(tester.url_for('/hello'))\n assert 'Hello' == text_body(response)",
"def call_api(page_num=0):\n base_url = \"http://data.sfgov.org/resource/jjew-r69b.json\"\n query_string = SoQL_query(page_num=page_num).generate_query()\n url = base_url+query_string\n response = requests.get(url)\n return response",
"def test_get_main_route():\n response = client.get(url)\n assert response.status_code == 200",
"def test_get_one(self):\n response = self.client.get('/api/v1/parcels/100')\n self.assertEqual(response.status_code, 200)",
"def test_homepage(self):\n rv = self.app.get('/')\n assert 'Enter your url here' in rv.data",
"def testSimple(self):\n\t\t\n\t\tself.getPage(\"player/status/\", method = \"GET\")\n\t\tself.assertStatus(status = 200)\n\t\t\n\t\tprint 'http headers:'\n\t\tfor header in self.headers.items():\n\t\t\tprint header\n\t\tprint 'body:'\n\t\tprint simplejson.dumps(self.body,indent=1)",
"def test_doPost(self) -> None:\n status_code = apicall.doPost(URL, self._browserheader)\n assert status_code != API_SUCCESS",
"def test_get(self):\n return self.doRequest(self.url, method=\"GET\", body=self.input)",
"def run_single_test(self, config):\n path_name = config['path_name']\n for request in config['request']:\n with self.subTest(request=request, test_name=config['test_name']):\n if 'args' in request:\n url = reverse(path_name, kwargs=request['args'])\n else:\n url = reverse(path_name)\n\n query_params = None\n if 'query_params' in request:\n query_params = urlencode(request['query_params'])\n url = '{}?{}'.format(url, query_params)\n\n data = None\n data_format = 'json'\n if 'data' in request:\n data = request['data']\n\n if 'data_format' in request:\n data_format = request['data_format']\n\n response_check = None\n if 'response_check' in request:\n response_check = request['response_check']\n\n self.call_api(\n url,\n data,\n self.tokens[request['user']],\n request['status'],\n config['type'],\n data_format=data_format,\n response_check=response_check)",
"def test_api_use_pokhara_internet_post(self):\n body = Body3()\n response = self.client.open(\n '/api/use/pokhara-internet/',\n method='POST',\n data=json.dumps(body),\n content_type='application/json')\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))",
"def open(self):\n self.driver.get('{}/submit'.format(self.config.get('Test', 'url')))\n return self",
"def test_GET_fetcher():\n params = {\n 'key1':'value1',\n 'arg2':'value2'\n }\n\n ## test that request goes ok\n resp = wf_utils.fetch_GET_request(\n GET_ECHO_ENDPOINT,\n params=params\n )\n\n ## test that response json can be parsed\n payload = resp.json()\n\n ## test that response contains expected echo\n assert payload['args'] == params\n assert payload['headers']['user-agent'] == wf_utils.USER_AGENT",
"def test_random_programming_quotes_api(self):\n response = api_client.get(\"/?query=programming\")\n self.assertEqual(response.status_code, 200)",
"def test_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)",
"def test_get(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)",
"def test_sample(self):\n response = self.tester.get('/sample-household/',\n content_type='html/text')\n self.assertEqual(response.status_code, 200)",
"def test_1():\n\tassert api_call().status_code == 200",
"def _run_test(self, host, path_info='/', cookie_dict=None, action=None,\n set_email=None, set_admin=None, continue_url=None,\n method='GET'):\n environ = {}\n wsgiref.util.setup_testing_defaults(environ)\n # The SERVER_NAME should never be used by the login module -- always defer\n # to the HTTP Host (so the user is not redirected to a different domain).\n environ['SERVER_NAME'] = 'do_not_use'\n environ['SERVER_PORT'] = '666'\n environ['SERVER_PROTOCOL'] = 'HTTP/1.1'\n environ['HTTP_HOST'] = host\n environ['PATH_INFO'] = path_info\n environ['REQUEST_METHOD'] = method\n if cookie_dict:\n cookie = Cookie.SimpleCookie(cookie_dict)\n cookie_value = ';'.join(m.OutputString() for m in cookie.values())\n environ['HTTP_COOKIE'] = cookie_value\n query_dict = {}\n if action:\n query_dict['action'] = action\n if set_email:\n query_dict['email'] = set_email\n if set_admin:\n query_dict['admin'] = set_admin\n if continue_url:\n query_dict['continue'] = continue_url\n if query_dict:\n environ['QUERY_STRING'] = urllib.urlencode(query_dict)\n\n response_dict = {}\n\n def start_response(status, headers):\n response_dict['status'] = int(status.split(' ', 1)[0])\n response_dict['headers'] = dict((k.lower(), v)\n for (k, v) in headers)\n\n login.application(environ, start_response)\n\n return (response_dict['status'],\n response_dict['headers'].get('location'),\n response_dict['headers'].get('set-cookie'),\n response_dict['headers'].get('content-type'))",
"def test_basic(self):\n api = V1Client()\n result = api.send_request({\n 'transactions_after': '2006-12-30',\n 'account_id': API_DETAILS['account_id'],\n 'site_tag': 'MAIN',\n 'authorization': API_DETAILS['site_tag_auth_code'],\n })",
"def test_main(self):\n path = reverse(\"main\")\n request = RequestFactory().get(path)\n response = index(request)\n assert response.status_code == 200"
] | [
"0.6589668",
"0.6506418",
"0.63798094",
"0.62275237",
"0.61125684",
"0.61125684",
"0.60818094",
"0.60572374",
"0.5959362",
"0.5956013",
"0.5955665",
"0.59321207",
"0.59309137",
"0.5920353",
"0.5911625",
"0.59028727",
"0.5895274",
"0.589447",
"0.5891544",
"0.58683366",
"0.58643067",
"0.58592033",
"0.5857823",
"0.58492845",
"0.58492845",
"0.58429366",
"0.5823361",
"0.5811466",
"0.5807058",
"0.57945144"
] | 0.6819468 | 0 |
Sync all foreign models in instance to data using their class object and manager name. More info | def _sync_foreign_model(self, instance, data, cls, manager_name):
# Remove all foreign instances that are not featured in data
data_ids = [item["id"] for item in data if "id" in item]
for existing_foreigns in getattr(instance, manager_name).all():
if existing_foreigns.id not in data_ids:
existing_foreigns.delete()
# Update all foreign instances using data
for item in data:
new_foreign = cls(**item, profile=instance)
new_foreign.save() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _save_reverse_relations(self, related_objects, instance):\n for field, related_field, data, kwargs in related_objects:\n # inject the PK from the instance\n if isinstance(field, serializers.ListSerializer):\n for obj in data:\n obj[related_field.name] = instance\n elif isinstance(field, serializers.ModelSerializer):\n data[related_field.name] = instance\n else:\n raise Exception(\"unexpected serializer type\")\n\n # reinject validated_data\n field._validated_data = data\n field.save(**kwargs)",
"def save(self):\n\n for vm in self.vms:\n vm.save()\n\n for obj in self.objects:\n obj.save()\n\n for vol in self.volumes:\n vol.save()",
"def save_object(self, obj, **kwargs):\n obj._complex_m2m_data={};\n if getattr(obj, '_m2m_data', None):\n for relatedObject in obj._meta.get_all_related_many_to_many_objects():\n if (relatedObject.field.rel.through._meta.auto_created):\n # These are non-trough ManyToMany relations and\n # can be updated just fine\n continue\n fieldName = relatedObject.get_accessor_name()\n if fieldName in obj._m2m_data.keys():\n obj._complex_m2m_data[fieldName] = (relatedObject, obj._m2m_data[fieldName])\n del obj._m2m_data[fieldName]\n\n serializers.ModelSerializer.save_object(self, obj, **kwargs);\n\n for (accessor, stuff) in obj._complex_m2m_data.items():\n (relatedObject, data) = stuff\n through = relatedObject.field.rel.through\n local_fieldName = relatedObject.field.m2m_reverse_field_name()\n remote_fieldName = relatedObject.field.m2m_field_name()\n\n # get the current set of existing relations\n existing = through.objects.filter(**{local_fieldName: obj});\n\n data_ids = [item.id for item in data]\n existing_ids = [getattr(item,remote_fieldName).id for item in existing]\n\n #print \"data_ids\", data_ids\n #print \"existing_ids\", existing_ids\n\n # remove relations that are in 'existing' but not in 'data'\n for item in list(existing):\n if (getattr(item,remote_fieldName).id not in data_ids):\n print \"delete\", getattr(item,remote_fieldName)\n item.delete() #(purge=True)\n\n # add relations that are in 'data' but not in 'existing'\n for item in data:\n if (item.id not in existing_ids):\n #print \"add\", item\n newModel = through(**{local_fieldName: obj, remote_fieldName: item})\n newModel.save()",
"def _load_related_objects(context, cls, db_data):\n obj_data = db_data.copy()\n for name, (obj_cls, _) in cls.object_fields.items():\n if obj_data.get(name):\n obj_data[name] = obj_cls(context, **obj_data.get(name).as_dict())\n else:\n del obj_data[name]\n\n return obj_data",
"def run(self):\n self.create_all_sync_instances()",
"def save(self, update_cache=True, *args, **kwargs):\n super(Changeset, self).save(*args, **kwargs)\n if self.closed and update_cache:\n from .tasks import update_cache_for_instance\n for relation in self._meta.get_all_related_objects():\n related = getattr(self, relation.get_accessor_name())\n type_name = related.model.instance_type.__name__\n ids = related.values_list('id', flat=True)\n for i in ids:\n update_cache_for_instance.delay(type_name, i)",
"def sync_to_ontology(self):\n self.ontology.sync_entity_to_graph(self)",
"def _on_post_syncdb(app, verbosity=2, db=DEFAULT_DB_ALIAS, **kwargs):\n app_models = [m for m in get_models(app) if issubclass(m, ContentItem)]\n for model in app_models:\n update_model_prefix(model, verbosity=verbosity, db=db)",
"def _prepare(cls):\n # the dbmodel is either the proxy base or ourselves\n dbmodel = cls._meta.concrete_model if cls._meta.proxy else cls\n cls.__dbclass__ = dbmodel\n if not hasattr(dbmodel, \"__instance_cache__\"):\n # we store __instance_cache__ only on the dbmodel base\n dbmodel.__instance_cache__ = {}\n super()._prepare()",
"def doSync (self) :\r\n \r\n self.factory.getSyncFor(self)",
"def sync_attributes(self, sync_imported=False):\n for cls in itertools.chain(self.classes(), self.object_properties(),\n self.individuals()):\n if not cls.label:\n try:\n cls.label.append(cls.__name__)\n except:\n cls.label.append(cls._name)\n if not cls.comment and cls.__doc__:\n cls.comment.append(inspect.cleandoc(cls.__doc__))\n if sync_imported:\n for onto in self.imported_ontologies:\n onto.sync_attributes()",
"def init_model(connection):\n db = connection\n\n for obj in common.__dict__.itervalues():\n if type(obj) == type and issubclass(obj, common.Model) and hasattr(obj, '__tablename__'):\n tablename = getattr(obj, '__tablename__')\n obj._object_store = Domain(db, tablename)\n collection_to_class[obj._object_store] = obj",
"def save_relations(self, obj, data, commit):\n\n relation_updater = self.updater(obj, data, self.relation_type)\n\n if commit:\n relation_updater.update()\n else:\n # append the updater instance to the object. Note that it's a list\n # since there can be more than one relation field per instance\n if not hasattr(obj, '_relation_updater'):\n obj._relation_updater = []\n obj._relation_updater.append(relation_updater)",
"def syncSave(self):\n for pyfile in self.files.values():\n pyfile.sync()\n\n for pypack in self.packages.values():\n pypack.sync()\n\n self.db.syncSave()",
"def hydrate_from_staff_api(sender, instance, **kwargs):\n if instance.pk:\n return\n\n instance.sync_with_staff_api()",
"def copy_relations(self, oldinstance):\n for image in oldinstance.images.all():\n image.pk = None\n image.gallery = self\n image.save()",
"def sync_tables():\n sync_table(ShoppingList)\n sync_table(User)\n sync_table(Category)\n sync_table(Feed)\n sync_table(News)\n sync_table(Photo)\n sync_table(Profile)\n sync_table(Video)\n sync_type(FeedPhoto)\n sync_type(NewsPhoto)",
"def class_to_db(self):",
"def update_model(self):\n pass",
"def update_associations(self):\n for dt_format, old_value, new_value in self.own_list:\n DescriptorFormatTypeManager.own(dt_format, self.entity, old_value, new_value)",
"def _post_sync(self):",
"def _object_update(self, obj, items):\n # many to many fields are saved after the main object\n m2ms = {}\n for key, value in items.iteritems():\n try:\n field = obj._meta.get_field(key)\n if isinstance(field, ManyToManyField):\n m2ms[key] = value\n else:\n setattr(obj, key, value)\n\n except FieldDoesNotExist:\n raise InvalidParameter(key)\n\n try:\n obj.full_clean()\n obj.save()\n except ValidationError as e:\n raise InvalidParameter(e.message_dict, override=True)\n\n for key, values in m2ms.iteritems():\n manager = getattr(obj, key)\n manager.clear()\n manager.add(*values)",
"def sync_with_database(self):\n # learn from db\n lports = self.nb_api.get_all(l2.LogicalPort)\n for lport in lports:\n port_id = \"{}:{}\".format(lport.lswitch.id, lport.id)\n self.cache_logical_port_by_port_id[port_id] = lport\n lrouters = self.nb_api.get_all(l3.LogicalRouter)\n for lrouter in lrouters:\n self.cache_logical_router_by_dpid[lrouter.id] = lrouter",
"def sync_tree_db(self) -> None:\n self.sync_tree_with_data(self.tree_db, self.data_db)",
"def save(self, force_insert=False, force_update=False, using=None,\n update_fields=None, **kwargs):\n self.cache_expire()\n super().save(force_insert, force_update, using, update_fields)\n\n cls = self.__class__.__name__\n if cls == \"Movement\":\n for a in self.attachments.all():\n a.auto_rename(**kwargs)\n\n if cls == \"Piece\":\n for a in self.attachments.all():\n a.auto_rename(**kwargs)\n for m in self.movements.all():\n if self.hidden:\n m.hidden = True\n else:\n m.hidden= False\n m.save(**kwargs)\n\n if kwargs.get(\"ignore_solr\"):\n pass\n elif kwargs.get(\"commit_solr\", True):\n self.solr_index(commit=True)\n else:\n self.solr_index(commit=False)",
"def _internal_store(self, modelobj):\n riak_object = modelobj._riak_object\n modelcls = type(modelobj)\n model_name = \"%s.%s\" % (modelcls.__module__, modelcls.__name__)\n store_version = self.store_versions.get(model_name, modelcls.VERSION)\n # Run reverse migrators until we have the correct version of the data.\n data_version = riak_object.get_data().get('$VERSION', None)\n while data_version != store_version:\n migrator = modelcls.MIGRATOR(\n modelcls, self, data_version, reverse=True)\n riak_object = migrator(riak_object).get_riak_object()\n data_version = riak_object.get_data().get('$VERSION', None)\n yield riak_object.store()\n returnValue(modelobj)",
"def flush():\n with transaction.atomic():\n if voter_records:\n NCVoter.objects.bulk_create(voter_records)\n with transaction.atomic():\n # This looks weird. Let me explain.\n # All the unsaved ChangeTracker instances have references\n # to the NCVoter instances from *before* the NCVoter instances\n # were saved. So they do not know the voter instances now have\n # IDs from being inserted. This re-sets the voter on the change\n # object, ensuring it knows the ID of its voter and can be saved\n # properly.\n for c in change_records:\n c.voter = c.voter\n c.voter_id = c.voter.id\n ChangeTracker.objects.bulk_create(change_records)\n change_records.clear()\n voter_records.clear()",
"def Automaticupdatesobjects():\n pass",
"def reinit_data(self):\n self.if_name_map, \\\n self.if_alias_map, \\\n self.if_id_map, \\\n self.oid_name_map = Namespace.get_sync_d_from_all_namespace(mibs.init_sync_d_interface_tables, self.db_conn)\n\n self.update_data()",
"def relate(self, qs):\n model_map = {}\n item_map = {}\n for item in qs:\n object_id = getattr(item, self._object_id_field)\n content_type = getattr(item, self._content_type_field)\n model_map.setdefault(content_type, {}) \\\n [object_id] = item.id\n item_map[item.id] = item\n for ct, items_ in model_map.items():\n for o in ct.model_class().objects.select_related() \\\n .filter(id__in=items_.keys()).all():\n setattr(item_map[items_[o.id]],self._content_object_field, o)\n return qs"
] | [
"0.59193707",
"0.5779825",
"0.5765561",
"0.57606983",
"0.5748647",
"0.5728996",
"0.57228345",
"0.5692319",
"0.5648922",
"0.56440777",
"0.5624399",
"0.5589713",
"0.55817914",
"0.5570557",
"0.5563866",
"0.55505055",
"0.5546178",
"0.55017614",
"0.54824173",
"0.5429872",
"0.54124266",
"0.53637755",
"0.5330571",
"0.532785",
"0.5320109",
"0.5315197",
"0.52983",
"0.5294128",
"0.52797806",
"0.5259059"
] | 0.7405185 | 0 |
Method has to check if any of `modules` contains `callable` object with name `method_name` and return list of such objects | def methods_importer(
method_name: str, modules: List[Union[str, ModuleType]]
) -> List[Callable]:
result = []
for module in modules:
try:
if isinstance(module, ModuleType):
mod = module
elif isinstance(module, str):
mod = importlib.import_module(module)
else:
raise TypeError('Must be list of strings or ModuleType')
met = getattr(mod, method_name, None)
if met:
result.append(mod)
# return met
except ImportError:
continue
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_function_list_from_modlist(self):\n function_list = []\n function_name_list = []\n for module in self.module_list:\n for name, obj in inspect.getmembers(module, inspect.isfunction):\n if inspect.getmodule(obj) == module:\n function_list.append(obj)\n function_name_list.append(name)\n return function_list",
"def _getModFunctions(modName, modSearch):\n # First find all callable functions they want\n try:\n mod = sys.modules[modName]\n modNames = dir(mod)\n callables = []\n for m in modNames:\n a = getattr(mod, m)\n if(hasattr(a, '__call__') and hasattr(a, '__class__')):\n if(a.__module__ == modSearch and a.__name__[0] != \"_\"):\n callables.append(a)\n return callables\n except Exception as e:\n print('!! Unable to functionalize the module: %s' % str(e))\n return None",
"def list_callables(self):\n self.logger.debug(\"List of callable API objects requested\")\n # Dict of subsystem object names to their callable methods.\n callables = {}\n for name, obj in self.systems.items():\n methods = []\n # Filter out methods which are not explicitly flagged for export\n for member in getmembers(obj):\n if is_api_method(obj, member[0]):\n methods.append(member[0])\n callables[name] = methods\n return msgs.list_reply(callables)",
"def lookup(obj):\n objList = [method_name for method_name in dir(obj)\n if callable(getattr(obj, method_name))]\n return objList",
"def _get_methods(self):\n\n methods = inspect.getmembers(self, predicate=callable)\n method_list = set()\n\n for name, _ in methods:\n if (name in ('proxy', 'start', 'stop', 'part', 'join',)\n or name[0] == '_'):\n continue\n\n method_list.add(name)\n\n return method_list",
"def __contains__(self, name):\n return name in self._modules",
"def process_module_list(self, modules):",
"def find_functions(module):\n for attrname in dir(module):\n attr = getattr(module, attrname)\n # iteratively get __module__ or __class__ (where __module__ fails for clas\n if callable(attr) and getattr(attr, '__module__', getattr(attr, '__class__', '')) == module.__name__:\n yield attr",
"def is_callable(self, name, method):\r\n return name in self._registry and self._registry[name].method == method",
"def search_import(\n method: str, modules: List[Union[str, ModuleType]]\n) -> Optional[object]:\n for module in modules:\n try:\n\n if isinstance(module, ModuleType):\n mod = module\n elif isinstance(module, str):\n # get module by string name\n mod = importlib.import_module(module)\n else:\n raise TypeError('Must be list of strings or ModuleType')\n\n # get method from module by string name\n met = getattr(mod, method, None)\n\n if met:\n return met\n\n except ImportError: # import_module can fail\n continue\n\n return None",
"def _get_rpc_method_names(self):\n return [name for name in dir(self) if _is_rpc_call_method(getattr(self, name))]",
"def get_method_list_from_classlist(self):\n method_list = []\n method_name_list = []\n for class_object in self.class_list:\n for name, obj in inspect.getmembers(class_object, inspect.ismethod):\n method_list.append(obj)\n method_name_list.append(name)\n return method_list",
"def assert_contains_method_reference_expression_in_m(\n self, clazz, method_name='main'):\n matches = list(filter_type_in_method(\n clazz, tree.MethodReference, method_name))\n if not matches:\n self.fail('No matching method reference found.')\n return matches",
"def _each_trait_method ( self, object ):\n dic = {}\n for klass in object.__class__.__mro__:\n for name, method in klass.__dict__.items():\n if (type( method ) is FunctionType) and (name not in dic):\n dic[ name ] = True\n yield name",
"def methods_of(obj):\r\n result = []\r\n for i in dir(obj):\r\n if callable(getattr(obj, i)) and not i.startswith('_'):\r\n result.append((i, getattr(obj, i)))\r\n return result",
"def get_complete_schedule(self, module_name = None):\n\n # from scratch\n if self.modules is None:\n self.populate_packages_and_modules()\n\n if module_name is None:\n module_name = self.top_module\n\n instance_dict = {}\n worklist = [ (module_name, self.modules[module_name]) ]\n while len(worklist) != 0:\n instance_name, module = worklist.pop()\n instance_dict[instance_name] = module\n for submodule_instance, submodule_type in module.submodules:\n if submodule_type in self.modules:\n worklist.append((instance_name + '.' + submodule_instance, self.modules[submodule_type]))\n\n partial_order = {}\n called_methods = {} # list of rules (and methods) that call a given method\n for instance_name, module in instance_dict.items():\n # add execution to partial order\n for i in range(len(module.execution)):\n partial_order[instance_name + '.' + module.execution[i]] = [instance_name + '.' + x for x in module.execution[i+1:]]\n # add method calls to partial order\n # get list of rules that call each method\n for rule, methods in module.method_calls_by_rule.items():\n full_rule_name = instance_name + '.' + rule\n for method in methods:\n full_method_name = instance_name + '.' + method\n if full_method_name not in called_methods:\n called_methods[full_method_name] = [full_rule_name]\n else:\n called_methods[full_method_name].append(full_rule_name)\n # make sure all lower-level methods appear in called_methods, even if they are not called by a rule\n for rule in module.execution:\n if rule.count('.') > 1 and not rule.split('.')[-1].startswith('RL_'):\n # this is a lower-level method\n if rule not in called_methods:\n called_methods[rule] = []\n # the items in called_methods are a list of rules and methods, this function helps to get just rules\n # similar to taking the transitive closure of called_methods\n def get_rules_from_rule_or_method(x):\n if x not in called_methods:\n # x is a rule or top-level method\n return [x]\n rules = [get_rules_from_rule_or_method(y) for y in called_methods[x]]\n rules = sum(rules, []) # flatten rules\n return list(set(rules))\n # create a new partial order that doesn't contain called methods\n new_partial_order = {}\n for first_rule, second_rules in partial_order.items():\n actual_first_rules = get_rules_from_rule_or_method(first_rule)\n\n actual_second_rules = []\n for second_rule in second_rules:\n actual_second_rules += get_rules_from_rule_or_method(second_rule)\n\n for r1 in actual_first_rules:\n if r1 not in new_partial_order:\n new_partial_order[r1] = actual_second_rules\n else:\n new_partial_order[r1] += actual_second_rules\n # cleanup new_partial_order\n for first_rule in new_partial_order:\n new_partial_order[first_rule] = list(set(new_partial_order[first_rule]))\n while new_partial_order[first_rule].count(first_rule) > 0:\n new_partial_order[first_rule].remove(first_rule)\n partial_order = new_partial_order.copy()\n\n full_schedule = []\n to_schedule = set(partial_order.keys())\n # schedule rules from end to beginning\n while len(to_schedule) > 0:\n removed_candidate = False\n for candidate in to_schedule:\n if len(partial_order[candidate]) == 0:\n to_schedule.remove(candidate)\n full_schedule = [candidate] + full_schedule\n # remove candidate from all the partial orders\n for x in partial_order:\n while partial_order[x].count(candidate) > 0:\n partial_order[x].remove(candidate)\n removed_candidate = True\n break\n if not removed_candidate:\n raise Exception(\"getting the full schedule failed\")\n\n return full_schedule",
"def getModules(runName=\"run\", ofClass=None):\n # Container dict for all modules found with a runName function\n modules = {}\n \n # Cycle through all python files, excluding any starting with '_' in this\n # package dir\n for f in os.listdir(os.path.dirname(__file__)):\n # Split into module name and extension\n mod_name, ext = os.path.splitext(f)\n # Must be a .py file and not start with '_'\n if ext != '.py' or mod_name.startswith('_'):\n continue\n # Import the module relative to the current package\n mod = importlib.import_module(\".\"+mod_name, __package__)\n\n # Cycle through all members in the module, looking for the entry point\n # function and subclasses if needed\n members = {'runName': None, 'subClass': []}\n for obj_name, obj in inspect.getmembers(mod):\n # The .getmembers() method returns a tuple with the first element\n # the full member name , and the second the member definition.\n \n # Check for our entry function if we have not found it yet\n if members['runName'] is None and \\\n inspect.isfunction(obj) and \\\n obj.__name__ == runName:\n members['runName'] = obj\n continue\n\n # Check for any subclasses\n if ofClass is not None and \\\n inspect.isclass(obj) and \\\n issubclass(obj, ofClass) and \\\n obj != ofClass:\n members['subClass'].append(obj)\n continue\n\n # Only add this module if we found a runName\n if members['runName'] is not None:\n modules[mod_name] = members\n\n return modules",
"def get_all():\n temp = inspect.getmembers(sys.modules[__name__], inspect.isclass)\n temp = [x for x in temp if x[0] not in [\"Method\", \"Radpro\"]]\n return temp",
"def get_all():\n return {\n _method : getattr(_ROOTObjectFunctions, _method)\n for _method in dir(_ROOTObjectFunctions)\n if not _method.startswith('_') and callable(getattr(_ROOTObjectFunctions, _method))\n }",
"def _methods(self, methods):\n\n # If we were given none, assume all\n if not methods:\n return self.methods.values()\n else:\n return [self.methods[meth] for meth in methods\n if meth in self.methods]",
"def _get_filter_classes_from_module(module_name):\n classes = []\n module = utils.import_object(module_name)\n for obj_name in dir(module):\n itm = getattr(module, obj_name)\n if _is_filter_class(itm):\n classes.append(itm)\n return classes",
"def _performOnEngines(self, methodName, targets, *args, **kwargs):\n log.msg(\"Performing %s on %r\" % (methodName, targets))\n # This will and should raise if targets is not valid!\n engines = self.engineList(targets)\n dList = []\n for e in engines:\n meth = getattr(e, methodName, None)\n if meth is not None:\n dList.append(meth(*args, **kwargs))\n else:\n raise AttributeError(\"Engine %i does not have method %s\" % (e.id, methodName))\n return dList",
"def _method_calls(fn):\n return [x[1] for x in re.findall(METHOD, getsource(fn))]",
"def module_functionalities(module: types.ModuleType, MARA_XXX: str, type) -> []:\n if MARA_XXX in dir(module):\n functionalities = getattr(module, MARA_XXX)\n if isinstance(functionalities, typing.Callable):\n functionalities = functionalities()\n if isinstance(functionalities, typing.Dict):\n functionalities = functionalities.values()\n if not isinstance(functionalities, typing.Iterable):\n raise TypeError(\n f'{module.__name__}.{MARA_XXX} should be or return a list or dict of {type.__name__}. Got \"{functionalities}\".')\n for functionality in functionalities:\n if not isinstance(functionality, type):\n raise TypeError(f'In {module.__name__}.{MARA_XXX}: Expected a {type.__name__}, got \"{functionality}\"')\n return functionalities\n else:\n return []",
"def callables(self):\n \n if hasattr(self, \"_callables\"):\n return self._callables\n \n # build a list of all the Callable objects\n # The old backend processed all operations first\n # (FIXME: duplicate for the sake of easy checking)\n self._callables = []\n\n for c in self._node.callables():\n if isinstance(c, idlast.Operation):\n self._callables.append(call.operation(self, c))\n \n for c in self._node.callables():\n if isinstance(c, idlast.Attribute):\n self._callables = self._callables + call.read_attributes(self, c)\n if c.readonly(): continue\n self._callables = self._callables + call.write_attributes(self, c)\n \n return self._callables",
"def modules_enabled(self, c):\n\n modules = []\n for name, module in self.modules.iteritems():\n modules.append( (name, module.__class__.__name__) )\n\n return modules",
"def assert_contains_lambda_expression_in_m(\n self, clazz, method_name='main'):\n matches = list(filter_type_in_method(\n clazz, tree.LambdaExpression, method_name))\n if not matches:\n self.fail('No matching lambda expression found.')\n return matches",
"def modules(cls):\n members = inspect.getmembers(cls, lambda a: not (inspect.isroutine(a) and a.__name__ == 'modules'))\n modules = [module for name, module in members if not name.startswith('_')]\n return modules",
"def filter_on_inclusion(expressions: list, methods: list) -> list:\n \n filtered_methods = set()\n \n for expression in expressions:\n valid_methods = [method for method in methods if matches_expression(expression, method)]\n \n filtered_methods += set(valid_methods)\n \n return filtered_methods",
"def get_rewards():\n this = modules[__name__]\n names, funcs = [], []\n for name, func in inspect.getmembers(this):\n\n # Is a definition a function\n if inspect.isfunction(func):\n # Is defined in this module\n if inspect.getmodule(func) == this:\n names.append(name)\n funcs.append(func)\n\n return tuple(names), tuple(funcs)"
] | [
"0.6745168",
"0.6412298",
"0.6275962",
"0.61267555",
"0.6027407",
"0.5975847",
"0.5927226",
"0.58631086",
"0.5823784",
"0.58145356",
"0.58093834",
"0.5742289",
"0.57135206",
"0.56821996",
"0.565382",
"0.5643577",
"0.56351846",
"0.5629629",
"0.56272805",
"0.5621702",
"0.55442744",
"0.55342805",
"0.5530577",
"0.5517566",
"0.5478025",
"0.54717684",
"0.5452656",
"0.5431275",
"0.54181355",
"0.5417446"
] | 0.72288847 | 0 |
on change event of survey_id field, if note is available in selected survey then display this note in note fields. | def on_change_survey(self, cr, uid, ids, survey_id, context=None):
if not survey_id:
return {}
notes = self.pool.get('survey').read(cr, uid, survey_id, ['note'])['note']
return {'value': {'note': notes}} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _on_clip_notes_changed(self):\n if liveobj_valid(self._sequencer_clip) and self._can_edit():\n time_start, time_length = self._get_clip_notes_time_range()\n self._clip_notes = self._get_notes_handler(self._sequencer_clip, time_start, self._pitches, time_length)\n else:\n self._clip_notes = []\n self._update_editor_matrix()\n self.notify_notes_changed()",
"def process_clarification(self, sid, clarification):\n if clarification['val'] != None:\n self.data.set_data(sid, clarification['field'].lower(), clarification['val'].lower())",
"def select_note(self, idx):\n if idx >= 0:\n key = self.notes_list_model.list[idx].key\n note = self.notes_db.get_note(key)\n # valid note, so note editing should be enabled\n self.view.set_note_editing(True)\n\n else:\n key = None\n note = None\n # no note selected, so we clear the UI (and display a clear\n # message that no note is selected) and we disable note\n # editing controls.\n self.view.clear_note_ui()\n self.view.set_note_editing(False)\n\n self.selected_note_key = key\n\n # when we do this, we don't want the change:{text,tags,pinned} events\n # because those should only fire when they are changed through the UI\n self.view.mute_note_data_changes()\n self.view.set_note_data(note)\n if key:\n self.view.set_note_status(self.notes_db.get_note_status(key))\n\n self.view.unmute_note_data_changes()",
"def survey_id(self, survey_id):\n\n self._survey_id = survey_id",
"def _onchange_field(self):\n if not self.secretary_contact_id:\n return\n if self.partner_type in ['dr', 'patient', 'secretary']:\n self.update({\n 'secretary_contact_id': False\n })",
"def _set_notes(self):\n if self._report_key == ReportTypes.SEARCH_DETAIL_REPORT and self._report_data['totalResultsSize'] > 0:\n self._set_search_notes()\n elif self._report_key != ReportTypes.SEARCH_DETAIL_REPORT:\n self._set_note()",
"def survey_id(self, survey_id):\n\n self.logger.debug(\"In 'survey_id' setter.\")\n\n self._survey_id = survey_id",
"def __add_fields_to_note(self, note):\n note.status_text = get_note_status_text(note)\n note.linked_person_url = \\\n self.get_url('/view', id=note.linked_person_record_id)\n note.flag_spam_url = \\\n self.get_url('/flag_note', id=note.note_record_id,\n hide=(not note.hidden) and 'yes' or 'no',\n signature=self.params.signature)\n note.source_datetime_local_string = self.to_formatted_local_datetime(\n note.source_date)\n note.should_show_inline_photo = self.should_show_inline_photo(\n note.photo_url)",
"def display_note(self, note):\n\t\tself.canvas.itemconfig(self.note, text = note)",
"def select_show_event(obj):\n new_text=generate_show_details_label(obj.parent.parent.shows,obj.show_id)\n print new_text\n obj.parent.parent.show_details_label.text=new_text",
"def _selection_changed(self, event):\n if self.typeCombo.get() in Constants.TASKS[0:3]:\n if self.subjectAdded == False:\n self._placeWidgets(self.subjectLabel, self.subjectCombo)\n self.subjectAdded = True\n else:\n if self.subjectAdded:\n # Hide subject label and combobox\n self.subjectLabel.place_forget()\n self.subjectCombo.place_forget()\n self.subjectAdded = False\n self.row -= 1",
"def display_notes(self, notes):\n\n # TODO: this can probably be a cached property like isneovim\n hassyntastic = bool(int(self._vim.eval('exists(\":SyntasticCheck\")')))\n\n if hassyntastic:\n self.__display_notes_with_syntastic(notes)\n else:\n self.__display_notes(notes)\n\n self._vim.command('redraw!')",
"def pSsnChanged(self):\n\t\tssn_widget = self.ui.findChild(QWidget, \"p_ssn\")\n\t\tssn = ssn_widget.toPlainText()\n\t\t\n\t\tif(len(ssn) == 11):\n\t\t\tp_name = self.ui.findChild(QWidget, \"p_name\")\n\t\t\tp_age = self.ui.findChild(QWidget, \"p_age\")\n\t\t\tp_length = self.ui.findChild(QWidget, \"p_length\")\n\t\t\t\n\t\t\t# Make database query with SSN and see if there's a match\n\t\t\t# --> update p_name, p_ssn, p_age, p_length\n\t\t\tQueryMatch = True\n\t\t\t\n\t\t\tif QueryMatch:\n\t\t\t\t# Test data\t\t\t\n\t\t\t\tif ssn == \"080290-123X\":\n\t\t\t\t\tp_name.setText(\"Tauno Testi\")\n\t\t\t\t\tp_age.setText(\"27\")\n\t\t\t\t\tp_length.setText(\"175 cm\")\n\t\t\t\telif ssn == \"120487-831C\":\n\t\t\t\t\tp_name.setText(\"Marjo Testelias\")\n\t\t\t\t\tp_age.setText(\"31\")\n\t\t\t\t\tp_length.setText(\"165 cm\")\n\t\t\t\t\n\t\t\t\tself.patient_ssn = ssn\n\t\t\t\tself.patient_chosen = True\n\t\t\telse:\n\t\t\t\t# no match, clear data and set flag to False\n\t\t\t\tp_name.setText(\"\")\n\t\t\t\tp_age.setText(\"\")\n\t\t\t\tp_length.setText(\"\")\n\t\t\t\tself.patient_chosen = False",
"def survey(request, survey_id):\n u = request.user\n survey_id = int(survey_id)\n if request.method =='POST':\n try:\n survey_meta = Survey.objects.get(id=survey_id)\n except Survey.DoesNotExist:\n return render_to_response('survey/m/notexist.html')\n survey = eval(\"%s.objects.get(user=request.user, uuid_token=request.POST['uuid_token'])\"%survey_meta.model_name)\n form = eval(\"%sForm( request.POST, instance=survey)\"%survey_meta.model_name)\n \n if form.is_valid():\n survey.completed = True\n survey.complete_date = datetime.datetime.now() \n form.save()\n return render_to_response('survey/m/completed.html')\n else:\n return render_to_response('survey/m/basic.html', \n {'form':form,\n 'survey_id': survey_id,\n 'uuid': survey.uuid_token,\n 'errors':form.errors})\n else:\n uuid = \"\"\n form = None \n try:\n s = Survey.objects.get(id=survey_id)\n status = eval(\"%s.objects.get(user=u,survey=s)\"%s.model_name)\n form = eval(\"%sForm()\"%s.model_name)\n except Survey.DoesNotExist:\n return render_to_response('survey/m/notexist.html')\n\n return render_to_response('survey/m/basic.html', {'form':form,\n 'survey_id': survey_id,\n 'uuid_token': status.uuid_token},\n context_instance=RequestContext(request))",
"def edit_note(self):\r\n names = [note.__str__() for note in self.source.notes]\r\n \r\n selected = self.notes_list.get(tk.ACTIVE)\r\n dex = names.index(selected) \r\n reading = self.source.notes[dex]\r\n \r\n self.session = tk.Toplevel(self.master, **jt.bframe_style)\r\n self.source.noteUI(self.session, base = reading)\r\n \r\n attach_frame = tk.Frame(self.session, **jt.bframe_style)\r\n a_button = tk.Button(attach_frame, \r\n text = 'Rewrite {}'.format(self.source.note_var), \r\n command = lambda x = reading: self.save_rewrite(x),\r\n **jt.button_style)\r\n \r\n a_button.grid()\r\n attach_frame.grid(row = 4, padx = 10, pady = 10)",
"def set_NotesInfo(self, value):\n super(RetrieveUserDashboardInputSet, self)._set_input('NotesInfo', value)",
"async def update(self, event_args: SurveyEventArgs):\n print(event_args.user.user_id_tel)\n print(event_args.survey.name)\n await asyncio.sleep(1.0)\n # TODO Log info in file",
"def visualize_survey(self):\n # Test if current nwb file contains Survey table\n if 'behavior' in self.model.nwb.processing:\n list_surveys = [v for v in self.model.nwb.processing['behavior'].data_interfaces.values()\n if v.neurodata_type == 'SurveyTable']\n if len(list_surveys) > 0:\n ShowSurveyDialog(nwbfile=self.model.nwb)",
"def _set_note(self):\n if self._report_data and self._report_data['note']:\n note = self._report_data['note']\n if note.get('createDateTime'):\n note['createDateTime'] = Report._to_report_datetime(note.get('createDateTime'))\n if note.get('expiryDateTime') and str(note['expiryDateTime']).startswith('0001-01-01'):\n note['expiryDateTime'] = ''\n elif note.get('expiryDateTime'):\n note['expiryDateTime'] = Report._to_report_datetime(note.get('expiryDateTime'), False)\n if note.get('effectiveDateTime'):\n note['effectiveDateTime'] = Report._to_report_datetime(note.get('effectiveDateTime'))\n if note.get('givingNoticeParty') and note['givingNoticeParty'].get('phoneNumber'):\n phone = note['givingNoticeParty'].get('phoneNumber')\n note['givingNoticeParty']['phoneNumber'] = phone[0:3] + '-' + phone[3:6] + '-' + phone[6:]",
"def note_print(self):\r\n self.viewbox.destroy()\r\n self.viewbox = tk.Frame(self.note_tab, **jt.bframe_style)\r\n\r\n names = [note.__str__() for note in self.source.notes]\r\n \r\n if len(names) > 0:\r\n selected = self.notes_list.get(tk.ACTIVE)\r\n dex = names.index(selected) \r\n reading = self.source.notes[dex]\r\n else:\r\n reading = kit.Note(0, 'place-holder', self.source.tbl)\r\n reading.smart_fill(['', '', '', ''])\r\n \r\n self.source.readUI(self.viewbox, reading)\r\n self.viewbox.grid(row = 1, padx = 10, pady = 5)",
"def notesChanged(self):\n if self.controller:\n self.versionProp.updateVersion(self.controller.current_version)",
"def renderNote(self):\n\t\tif self.activeNote:\n\t\t\tself.activeNote.render()",
"def hide_from_survey(self, hide_from_survey):\n\n self._hide_from_survey = hide_from_survey",
"def on_pre_enter(self):\r\n store = get_store()\r\n self.ids.Capillary.text = str(store.get('Capillary')[\"value\"])\r\n self.ids.CapillaryUnit.text = store.get('Capillary')[\"unit\"]\r\n self.ids.Towindow.text = str(store.get('Towindow')[\"value\"])\r\n self.ids.TowindowUnit.text = store.get('Towindow')[\"unit\"]\r\n self.ids.Idiameter.text = str(store.get('Idiameter')[\"value\"])\r\n self.ids.IdiameterUnit.text = store.get('Idiameter')[\"unit\"]\r\n self.ids.Voltage.text = str(store.get('Voltage')[\"value\"])\r\n self.ids.VoltageUnit.text = store.get('Voltage')[\"unit\"]\r\n self.ids.Electriccurrent.text = str(store.get('Electriccurrent')[\"value\"])\r\n self.ids.ElectriccurrentUnit.text = store.get('Electriccurrent')[\"unit\"]",
"def on_pre_enter(self):\r\n store = get_store()\r\n self.ids.Capillary.text = str(store.get('Capillary')[\"value\"])\r\n self.ids.CapillaryUnit.text = store.get('Capillary')[\"unit\"]\r\n self.ids.Towindow.text = str(store.get('Towindow')[\"value\"])\r\n self.ids.TowindowUnit.text = store.get('Towindow')[\"unit\"]\r\n self.ids.Idiameter.text = str(store.get('Idiameter')[\"value\"])\r\n self.ids.IdiameterUnit.text = store.get('Idiameter')[\"unit\"]\r\n self.ids.Voltage.text = str(store.get('Voltage')[\"value\"])\r\n self.ids.VoltageUnit.text = store.get('Voltage')[\"unit\"]\r\n self.ids.Electroosmosis.text = str(store.get('Electroosmosis')[\"value\"])\r\n self.ids.ElectroosmosisUnit.text = store.get('Electroosmosis')[\"unit\"]",
"def on_question_change(s, dt):\n\n def on_question_select(self, dt):\n \"\"\"\n This method switches screen for answering selected question.\n :param self: It is for handling class structure.\n :param dt: It is for handling callback input.\n :return: It is for changing screen to selected question's page.\n \"\"\"\n\n self.popup.dismiss()\n\n questions = open(\"data/questions.fay\", \"w+\")\n questions_all = \"\"\n\n question_id = self.list_quests.adapter.selection[0].text.split(\" \")[1]\n for key in self.data_all_ids.iterkeys():\n if question_id == key.split(\"*[SEAS-LIST-VIEW]*\")[0]:\n questions_all += self.data_all_ids[key]\n break\n\n for key, value in self.data_all_ids.iteritems():\n if not question_id == key.split(\"*[SEAS-LIST-VIEW]*\")[0]:\n questions_all += value\n\n questions.write(self.cipher.encrypt(bytes(questions_all)))\n questions.close()\n\n return self.on_question_skip()\n\n def color_hex(x):\n \"\"\"\n This method determines hex color code for given question according to its type.\n :param x: It is type of question.\n :return: It is hex code of color.\n \"\"\"\n\n quest_hex = {\"choice\": \"FF4530\",\n \"short\": \"FCAA03\",\n \"code\": \"5CB130\"\n }\n\n if x == \"programming\":\n hex_code = quest_hex[\"code\"]\n elif x == \"short_answer\":\n hex_code = quest_hex[\"short\"]\n else:\n hex_code = quest_hex[\"choice\"]\n\n return hex_code\n\n s.data_all_questions = database_api.getExam(Cache.get(\"info\", \"token\"),\n Cache.get(\"lect\", \"code\"),\n Cache.get(\"lect\", \"exam\")\n )[\"Questions\"]\n s.data_all_ids = {}\n for q in s.data_all_questions.itervalues():\n data_question = str(q[\"ID\"]) + \"*[SEAS-NEW-LINE]*\" + \\\n q[\"type\"] + \"*[SEAS-NEW-LINE]*\" + \\\n str(q[\"value\"]) + \"*[SEAS-NEW-LINE]*\" + \\\n q[\"text\"] + \"*[SEAS-NEW-LINE]*\"\n s.data_all_ids[str(q[\"ID\"]) + \"*[SEAS-LIST-VIEW]*\" + q[\"type\"]] = data_question\n\n popup_content = FloatLayout()\n s.popup = Popup(title=\"Questions\",\n content=popup_content,\n separator_color=[140 / 255., 55 / 255., 95 / 255., 1.],\n size_hint=(None, None),\n size=(s.width / 2, s.height / 2)\n )\n\n s.list_quests = ListView(size_hint=(.9, .8),\n pos_hint={\"center_x\": .5, \"center_y\": .55}\n )\n\n args_converter = lambda row_index, x: {\"text\": \"ID: {id} - Type: [color=#{hex}]{qtype}[/color]\".format(id=x[0],\n hex=color_hex(x[1]),\n qtype=x[1].replace(\"_\",\n \" \"\n ).title()\n ),\n \"markup\": True,\n \"selected_color\": (.843, .82, .82, 1),\n \"deselected_color\": (.57, .67, .68, 1),\n \"background_down\": \"data/img/widget_gray_75.png\",\n \"font_name\": \"data/font/CaviarDreams_Bold.ttf\",\n \"font_size\": s.height / 50,\n \"size_hint_y\": None,\n \"height\": s.height / 20,\n \"on_release\": partial(on_question_select,\n s\n )\n }\n s.list_quests.adapter = ListAdapter(data=[i.split(\"*[SEAS-LIST-VIEW]*\") for i in s.data_all_ids.iterkeys()],\n cls=ListItemButton,\n args_converter=args_converter,\n allow_empty_selection=False\n )\n popup_content.add_widget(s.list_quests)\n\n popup_content.add_widget(Button(text=\"Close\",\n font_name=\"data/font/LibelSuit.ttf\",\n font_size=s.height / 40,\n background_normal=\"data/img/widget_red.png\",\n background_down=\"data/img/widget_red_select.png\",\n size_hint_x=1,\n size_hint_y=None,\n height=s.height / 20,\n pos_hint={\"center_x\": .5, \"y\": .0},\n on_release=s.popup.dismiss)\n )\n\n s.popup.open()",
"def notify_wizard(self):\n if (self._wfield != None):\n self._wfield.update(self._conds or None)",
"def check_note_for_history(self):\r\n testrun_notes = [\r\n \"multiple loci suspected\",\r\n \"suspected multicopy, poor performance\",\r\n \"fixed allele 1\",\r\n \"very poor amplification\",\r\n \"very poor amplification, high off target percent\",\r\n \"poor amplification, maybe redesign\",\r\n \"mono-allele 1?\",\r\n \"redesign primer\",\r\n \"most of target\",\r\n \"poor performance\",\r\n \"poor performance, primers off target\",\r\n \"off target amp\",\r\n \"mono-allele 1\",\r\n \"mono-allele 2 and off target\",\r\n \"Nate said it is a mess\",\r\n \"off target amp\",\r\n \"mono-allele 1 and off target\"\r\n ]\r\n if self.note == \"No primers made by primer3\":\r\n self.add_history(\"2018-2-12\",\"Nate\",\"primers were not made for this sequence variation\")\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note == \"Removed by nate, close to other SNP\":\r\n self.add_history(\"2018-2-19\",\"Nate\",\"Primers designed for this SNP were taken out, were to close to other SNP\")\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note == \"Predicted to form hetrodymer\":\r\n self.add_history(\"2018-2-19\",\"Nate\",\"Predicted to form hetrodymer\")\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note == \"no valid primer pair could be made for this position\":\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n elif self.note in testrun_notes:\r\n self.add_history(\"2018-2-23\",\"Thomas\",self.note)\r\n self.note = \"sequence variant selected by GBS-SNP-selection\"\r\n #check if any were missed.\r\n if self.active and self.note != \"sequence variant selected by GBS-SNP-selection\":\r\n pass #print(self.note)\r",
"def on_pre_enter(self):\r\n store = get_store()\r\n self.ids.Capillary.text = str(store.get('Capillary')[\"value\"])\r\n self.ids.CapillaryUnit.text = store.get('Capillary')[\"unit\"]\r\n self.ids.Towindow.text = str(store.get('Towindow')[\"value\"])\r\n self.ids.TowindowUnit.text = store.get('Towindow')[\"unit\"]\r\n self.ids.Idiameter.text = str(store.get('Idiameter')[\"value\"])\r\n self.ids.IdiameterUnit.text = unicode(store.get('Idiameter')[\"unit\"])\r\n self.ids.Pressure.text = str(store.get('Pressure')[\"value\"])\r\n self.ids.PressureUnit.text = store.get('Pressure')[\"unit\"]\r\n self.ids.Detectiontime.text = str(store.get('Detectiontime')[\"value\"])\r\n self.ids.DetectiontimeUnit.text = store.get('Detectiontime')[\"unit\"]",
"def on_get(self, req, resp, **kwargs):\n note_id = kwargs['note_id']\n self.validate_note_id(note_id)\n note = self.get_note_with_access_check(req.context, note_id)\n resp.text = self.get_note_details(note)\n resp.status = falcon.HTTP_200"
] | [
"0.54103535",
"0.5384617",
"0.5342676",
"0.531327",
"0.52878267",
"0.52672136",
"0.5042082",
"0.49997288",
"0.49391526",
"0.49161386",
"0.49144882",
"0.49064714",
"0.48971125",
"0.4877697",
"0.48331854",
"0.47828805",
"0.4776097",
"0.47532988",
"0.47387284",
"0.46859652",
"0.46377048",
"0.46272117",
"0.46220168",
"0.46034786",
"0.45915082",
"0.45707864",
"0.4562763",
"0.45528558",
"0.4546072",
"0.45436296"
] | 0.7571935 | 0 |
Parses the given string in infix notation. | def parse_infix(input: str) -> Node:
parsed = ParsedString(input).tokenize()
ans = parse_e(parsed)
return ans | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def evaluate_infix(string):\n return postfix(infix_to_postfix(string))",
"def calculate_infix_expression(cls, expression):\n\t\tlogger.info(f\"in the calculate infix expression {expression}\")\n\t\telements = expression.split()\n\t\tstack = []\n\t\ttry:\n\t\t\tfor e in elements:\n\t\t\t\tif not e.isdigit() and e != \")\":\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and not cls.is_operator(stack[-1]):\n\t\t\t\t\tstack.append(e)\n\t\t\t\tif e.isdigit() and cls.is_operator(stack[-1]):\n\t\t\t\t\toperator = stack.pop()\n\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(e), operator)\n\t\t\t\t\tif stack[-1] == \"(\":\n\t\t\t\t\t\tstack.append(str(result))\n\t\t\t\t\telse:\n\t\t\t\t\t\traise Exception(\"invalid input\")\n\t\t\t\t\t\tbreak\n\t\t\t\tif e == \")\":\n\t\t\t\t\tvalue = stack.pop()\n\t\t\t\t\tob = stack.pop()\n\t\t\t\t\tif (ob == \"(\"):\n\t\t\t\t\t\tstack.append(str(value))\n\t\t\t\t\telif (cls.is_operator(ob)):\n\t\t\t\t\t\toperand1 = stack.pop()\n\t\t\t\t\t\tstack.pop()\n\t\t\t\t\t\tresult = cls.apply_math_operations(float(operand1), float(value), ob)\n\t\t\t\t\t\tstack.append(str(result))\n\n\t\t\tanswer = float(stack[0])\n\t\t\tlogger.info(f\"the answe is {answer}\")\n\t\t\treturn answer\n\t\texcept Exception as e:\n\t\t\traise Exception(\"Exception from the infix function\")",
"def infix_to_postfix(string):\n \n # Validate and tokenize the string\n tokens = validate(string)\n \n # Initialize the stack\n s = Stack()\n\n # Ready the final postfix expression\n postfix = ''\n \n # List of operators that have to be handled\n operators = ['+', '-', '*', '/', '^', 'sqrt', 'u-', '(', ')']\n \n # Iterate through tokens\n for token in tokens:\n if token in operators:\n if token in ['sqrt', 'u-']:\n # Square root and unary minus have the highest precendence. So\n # they get pushed on to the stack immediately\n s.push(token)\n elif token == '^':\n top = s.peek()\n while top in ['sqrt', 'u-']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['*', '/']:\n # Multiplication and division have the same precedence. Order\n # is determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token in ['+', '-']:\n # Addition and subtraction have the same precedence. Order is\n # determined by order of appearance\n top = s.peek()\n while top in ['sqrt', 'u-', '^', '*', '/']:\n postfix += s.pop() + ' '\n top = s.peek()\n s.push(token)\n elif token == '(':\n s.push(token)\n elif token == ')':\n top = s.peek()\n while top != '(':\n postfix += s.pop() + ' '\n top = s.peek()\n s.pop()\n else: # Token is a number or variable\n postfix += token + ' '\n\n # Pop out any more operators that might be sitting on the stack\n while(len(s)):\n postfix += s.pop() + ' '\n\n # Get rid of trailing whitespace and print\n postfix = postfix.strip()\n return postfix",
"def _parse_ins_string(string):\n istart_markers = set([\"[\", \"(\", \"!\"])\n marker_dict = {\"[\": \"]\", \"(\": \")\", \"!\": \"!\"}\n # iend_markers = set([\"]\",\")\",\"!\"])\n setdum = {\"dum\", \"DUM\"}\n obs_names = []\n slen = len(string)\n idx = 0\n while True:\n if idx >= slen - 1:\n break\n char = string[idx]\n if char in istart_markers:\n # em = iend_markers[istart_markers.index(char)]\n em = marker_dict[char]\n # print(\"\\n\",idx)\n # print(string)\n # print(string[idx+1:])\n # print(string[idx+1:].index(em))\n # print(string[idx+1:].index(em)+idx+1)\n eidx = min(slen, string.find(em, idx + 1))\n obs_name = string[idx + 1 : eidx]\n if obs_name not in setdum:\n obs_names.append(obs_name)\n idx = eidx + 1\n else:\n idx += 1\n return obs_names",
"def infix_to_postfix(infix_expr):\n # Append adds new item to list\n # Concat creates a new list every time instead\n\n opstack = StackArray()\n res = []\n lstr = infix_expr.split()\n # l_para = r_para = 0\n # operator precedence dict\n prec = { # higher val = higher prec\n \"(\" : 4,\n \"^\" : 3, # r-to-l (i.e. 2^3^2 = 2^(3^2) )\n \"~\" : 3, # right-to-left (i.e. -3^2 = -9)\n # '*/+-' are associated left to right\n \"*\" : 2,\n \"/\" : 2,\n \"+\" : 1,\n \"-\" : 1\n }\n for token in lstr:\n if token[0] in '0123456789':\n res.append(token)\n # not opstack.is_empty() guards against IndexError on empty peek\n if not opstack.is_empty() and opstack.peek() == '^':\n res.append(opstack.pop())\n if not opstack.is_empty() and opstack.peek() == '~':\n res.append(opstack.pop())\n elif token == '(':\n # l_para += 1\n opstack.push(token)\n elif token == ')':\n # r_para += 1\n # opstack can't be empty for proper formatted input\n while opstack.peek() != '(':\n res.append(opstack.pop())\n opstack.pop() # remove left paran '('\n else: # token is ^ ~ * / + -: <-- operators\n while not opstack.is_empty() and prec[token] <= prec[opstack.peek()]:\n if opstack.peek() == '(':\n break\n elif token == '^' and opstack.peek() == '~':\n break\n else:\n res.append(opstack.pop())\n opstack.push(token)\n # if l_para != r_para:\n # raise SyntaxError\n while not opstack.is_empty():\n res.append(opstack.pop())\n res = \" \".join(res)\n res.strip()\n return res",
"def infixToPostfix(inFixStr):\n postFixList = []\n s = Stack()\n chList = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\"\n prec = {\"(\": 0, \"+\": 1, \"-\": 1, \"*\": 2, \"/\": 2} # operator precedence\n\n tok = inFixStr.split(\" \")\n for ch in tok: # ch can be (,), operand, operator\n if ch in chList: # the easy case when token is an operand\n postFixList.append(ch)\n elif ch == \"(\": # easy case of (\n s.push(ch)\n elif ch == \")\": # keep popping and appending until (\n top = s.pop()\n while top != \"(\":\n postFixList.append(top)\n top = s.pop() # pop next\n else: # now we are at opeartors\n # pop higher order operators first\n while not s.isEmpty() and prec[s.peek()] > prec[ch]:\n postFixList.append(s.pop())\n s.push(ch) # push current opeartor\n\n while not s.isEmpty(): # pop everything else in the stack\n postFixList.append(s.pop())\n return \" \".join(postFixList)",
"def infix_to_postfix(string):\n tokenlist = string.split()\n output = []\n stack = create_stack()\n for token in tokenlist:\n if token == '(':\n stack.push(token)\n elif token == ')':\n toptoken = stack.pop()\n while toptoken != '(':\n output.append(toptoken)\n toptoken = stack.pop()\n elif token == '*' or token == '/':\n toptoken = stack.top()\n while toptoken in ['*','/']:\n output.append(stack.pop())\n toptoken = stack.top()\n stack.push(token)\n elif token == '+' or token == '-':\n toptoken = stack.top()\n while toptoken in ['*','/','+','-']:\n output.append(stack.pop())\n toptoken = stack.top()\n stack.push(token)\n else:\n output.append(token)\n while stack.length() > 0:\n output.append(stack.pop())\n space= ' '\n newstr = space.join(output)\n return newstr",
"def toPostfix (self,infix):\n postfix = []\n stack = []\n # Loop over characters in the input string\n for char in infix:\n # If char is a number add it to postfix\n if isFloat(char):\n postfix.append(char)\n # If its a special number add it to postfix\n elif char in Calculator.specialNumbers:\n postfix.append(char)\n # If char is a function push it onto the stack\n elif char in Calculator.functions:\n stack.append(char)\n # If the char is a function argument separator (,) pop operators off the stack onto\n # postfix until ( is reached\n elif char == ',':\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # If char is an operator O\n elif char in Calculator.operators:\n # While there is an operator, P, on the top of stack\n while len(stack)>0 and stack[-1] in Calculator.operators:\n stackTop = stack[-1]\n precChar = Calculator.operators[char][1]\n precStackTop = Calculator.operators[stackTop][1]\n # If O in -?+* and its precedence is <= P, pop P off stack\n if char in Calculator.operators and precChar <= precStackTop:\n postfix.append(stack.pop())\n else:\n break\n # Push O onto stack\n stack.append(char)\n # If char is (, push it onto the stack\n elif char == '(':\n stack.append(char)\n # If char is )\n elif char == ')':\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # While top of stack isn't ( pop operators off the top of the stack\n while stack[-1] != '(':\n postfix.append(stack.pop())\n # If the size of the stack reaches 0 without finding a ( there are unmatched brackets.\n if len(stack) == 0:\n return \"Unmatched Error\"\n # Pop ( off the stack, but not onto output queue\n stack.pop()\n # If the token at the top of the stack is a function pop it off the stack and add to postfix\n if len(stack) > 0 and stack[-1] in Calculator.functions:\n postfix.append(stack.pop())\n # Finally pop all the operators off the stack onto postfix\n while len(stack)>0:\n # If the operator on the top of the stack is () then there are unmatched brackets\n if stack[-1] in '()':\n return \"Unmatched Error\"\n postfix.append(stack.pop())\n return postfix",
"def infixToPostfix(infix):\n postfix = []\n stackArr = []\n scanOperand = False\n hasIntegral = False\n hasDecimal = False\n currentOperand = 0\n decimal = 1\n for ch in infix:\n currentPrio = charPrio(ch)\n if currentPrio < 0: # current ele is operand\n if not (ch.isdigit() or ch == '.'):\n inputError()\n return\n if not scanOperand:\n scanOperand = True\n if ch == '.':\n if not hasIntegral:\n formatError()\n return\n hasDecimal = True\n continue\n if hasDecimal:\n if ch == '.':\n formatError()\n return\n currentOperand = currentOperand + 0.1 ** decimal * int(ch)\n decimal += 1\n else:\n if not hasIntegral:\n hasIntegral = True\n currentOperand = currentOperand * 10 + int(ch)\n elif currentPrio == 0:\n # none operation\n pass\n else:\n # and operand into postfix expression\n if scanOperand:\n scanOperand = False\n hasDecimal = False\n hasIntegral = False\n decimal = 1\n postfix.append(currentOperand)\n currentOperand = 0\n # handle operator\n if isEmpty(stackArr):\n push(stackArr, ch) # push into stack\n elif currentPrio > prio[peek(stackArr)]:\n push(stackArr, ch) # push into stack\n elif currentPrio == 1: # ')'\n while (not isEmpty(stackArr)) and currentPrio <= prio[peek(stackArr)]:\n ele = pop(stackArr)\n if ele != '(':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n else:\n break\n else:\n while (not isEmpty(stackArr)) and currentPrio <= prio[peek(stackArr)] and prio[peek(stackArr)] < 5 :\n ele = pop(stackArr)\n if ele != '(' or ele != ')':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n push(stackArr, ch) # push into stack\n if scanOperand:\n postfix.append(currentOperand)\n while not isEmpty(stackArr):\n ele = pop(stackArr)\n if ele != '(' or ele != ')':\n postfix.append(ele) #pop out of stack, then add into postfix expression\n return postfix",
"def infix_to_postfix(input_str): # postfix requires that all operators proceed after the two operands that they work on\n\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n if input_str is None: raise ValueError\n # Split input string\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # Create output list, will be fed to postfix_eval() at end\n output_list = []\n # initialize stack large enough to contain all operators\n operator_stack = Stack(len(term_list)//3+1)\n for term in term_list:\n # check for operand, if present append to output list\n if operand_present(term) is True:\n output_list.append(term)\n # check for operator\n elif operator_present(term) or term == '(' or term == ')':\n #if operand_stack.size()<2: \n # raise PostfixFormatException(\"Insufficient operands\")\n # Check for open parentheses\n if term == '(': operator_stack.push(term)\n # Check for closing parentheses, pop stack until open parentheses found\n elif term == ')':\n while 1:\n token = operator_stack.pop()\n if token != '(': \n output_list.append(token)\n else: break\n # Otherwise push to stack but pop any higher/equal order operators\n else:\n sort_operators(term, operator_stack, output_list)\n #print(operator_stack.peek())\n #else: raise PostfixFormatException(\"Invalid token\")\n #if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str",
"def infix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing an infix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression \"\"\"\n stack = Stack(30)\n if input_str == '':\n return ''\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n order = {}\n order[\"+\"] = 1\n order[\"-\"] = 1\n order[\"*\"] = 2\n order[\"/\"] = 2\n order[\"**\"] = 3\n order[\"<<\"] = 4\n order[\">>\"] = 4\n pfix_str = ''\n split_list = input_str.split()\n for i in split_list:\n new_val = i.lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit() and pfix_str == \"\":\n pfix_str = pfix_str + i\n elif i in op_list:\n if not stack.is_empty():\n p = stack.peek()\n while 0 < stack.size():\n p = stack.peek()\n if p == \"(\":\n break\n if i == \"**\":\n if order[p] <= order[i]:\n break\n else:\n p1 = stack.pop()\n pfix_str = pfix_str + \" \" + p1\n elif order[p] < order[i]:\n break\n else:\n p2 = stack.pop()\n pfix_str = pfix_str + \" \" + p2\n stack.push(i)\n elif i == \"(\":\n stack.push(i)\n elif new_val.isdigit():\n pfix_str = pfix_str + \" \" + i\n elif i == \")\":\n p = stack.peek()\n while p != \"(\":\n pfix_str = pfix_str + \" \" + stack.pop()\n if not stack.is_empty():\n p = stack.peek()\n stack.pop()\n while not stack.is_empty():\n pop3 = stack.pop()\n pfix_str = pfix_str + \" \" + pop3\n return pfix_str",
"def infix_to_postfix(string_input):\n stack_ops = []\n output = []\n value = \"\"\n\n for item in string_input:\n # item = operator\n if item in ops_prec.keys():\n value = value_to_output(value, output)\n\n # pop elements while they have lower precedence\n while (stack_ops\n and stack_ops[-1] in ops_prec.keys()\n and ops_prec[item] <= ops_prec[stack_ops[-1]]):\n output.append(stack_ops.pop())\n # else put item on stack\n stack_ops.append(item)\n\n # subexpression, delay precedence\n elif item == '(':\n value = value_to_output(value, output)\n\n stack_ops.append(item)\n elif item == ')':\n value = value_to_output(value, output)\n\n # flush output until ( is reached on stack\n while (stack_ops and stack_ops[-1] != '('):\n output.append(stack_ops.pop())\n # remove '('\n stack_ops.pop()\n\n # value = operand\n else:\n # concatenation of value for multidigit ones\n value += item\n # output.append(item) # this would be for one digit\n\n # flush stack to output\n value = value_to_output(value, output)\n\n while stack_ops:\n output.append(stack_ops.pop())\n\n return output",
"def prefix_to_postfix(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n return (\"\")\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n track = len(split_list) - 1\n while track >= 0:\n new_val = split_list[track].lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if new_val.isdigit():\n stack.push(split_list[track])\n track = track - 1\n elif split_list[track] in op_list:\n first = stack.pop()\n second = stack.pop()\n stack.push(first + \" \" + second + \" \" + split_list[track])\n track = track - 1\n else:\n break\n postfix = stack.pop()\n return postfix",
"def Calc_infix(self,infix):\r\n\r\n stak=[]\r\n for i in range(0, len(infix)):\r\n if (infix[i] == '+') or (infix[i] == '-') or (infix[i] == '*') or (infix[i] == '/'):\r\n if len(stak) > 1:\r\n tmp = self.Check_is_valid_data(stak[len(stak) - 1])\r\n tmp1 = self.Check_is_valid_data(stak[len(stak) - 2])\r\n if (tmp == -1) or (tmp1 == -1):\r\n return False\r\n if tmp == -2:\r\n tmp = stak[len(stak) - 1]\r\n elif tmp == -3:\r\n tmp = extra_functions.convert_string(stak[len(stak) - 1])\r\n\r\n else:\r\n tmp = tmp[0]\r\n\r\n if tmp1 == -2:\r\n tmp1 = stak[len(stak) - 2]\r\n elif tmp1 == -3:\r\n\r\n tmp1 = extra_functions.convert_string(stak[len(stak) - 2])\r\n\r\n else:\r\n tmp1 = tmp1[0]\r\n\r\n stak = stak[:-1]\r\n if infix[i] == '-':\r\n stak[len(stak) - 1] = tmp - tmp1\r\n elif infix[i] == '+':\r\n stak[len(stak) - 1] = tmp + tmp1\r\n elif infix[i] == '*':\r\n stak[len(stak) - 1] = tmp * tmp1\r\n elif infix[i]== '/':\r\n if tmp1 != 0:\r\n stak[len(stak) - 1] = int(tmp / tmp1)\r\n else:\r\n return False\r\n else:\r\n if (infix[i] == '+') or (infix[i] == '-'):\r\n\r\n tmp = self.Check_is_valid_data(stak[len(stak) - 1])\r\n if tmp == -1:\r\n return False\r\n elif tmp == -2:\r\n tmp = stak[len(stak) - 1]\r\n elif tmp == -3:\r\n\r\n tmp = extra_functions.convert_string(stak[len(stak) - 1])\r\n\r\n else:\r\n tmp = tmp[0]\r\n if infix[i] == '-':\r\n stak[0] = tmp * -1\r\n else:\r\n stak[0] = tmp\r\n else:\r\n return False\r\n elif (infix[i] == 'lengthof') or (infix[i]== 'sizeof') or (infix[i] == 'type'):\r\n if len(stak) > 0:\r\n tmp = self.Check_is_valid_data(stak[len(stak) - 1])\r\n if (((tmp == 0) or (tmp == -1) or (tmp == -2) or (tmp == -3)) and ((infix[i]== 'lengthof') or (infix[i] == 'sizeof'))):\r\n return False\r\n elif ((tmp == 0) or (tmp == -1) or (tmp == -2) or (tmp == -3)) and (infix[i] == 'type'):\r\n stak[len(stak) - 1] = 0\r\n else:\r\n stak = stak[:-1]\r\n tmp1 = self.Type(tmp[1])\r\n\r\n if infix[i] == 'lengthof':\r\n stak.append(int(tmp[2] / tmp1))\r\n elif infix[i] == 'sizeof':\r\n stak.append(tmp[2])\r\n else:\r\n stak.append(tmp[0])\r\n else:\r\n return False\r\n else:\r\n if infix[i] == '?':\r\n stak.append(0)\r\n else:\r\n tmp = self.Check_is_valid_data(infix[i])\r\n if self.Data_types.__contains__(infix[i]):\r\n stak.append(self.Type(infix[i]))\r\n continue\r\n if tmp == -1:\r\n return False\r\n else:\r\n stak.append(infix[i])\r\n\r\n if stak.__len__() == 1:\r\n return stak\r\n return False",
"def infix_to_postfix(infix:str) -> str:\n stack = deque()\n precedence = {'+':1, '-':1,\n '*':2, '/':2,\n '^':3, '(':-9\n }\n output = \"\"\n for ch in infix:\n if ch not in {'+', '-', '*', '/', '^', '(', ')'}:\n output += ch\n elif ch == '(':\n stack.append(ch)\n elif ch == ')':\n while len(stack) > 0 and\\\n stack[-1] != '(':\n output += stack.pop()\n stack.pop()\n else:\n while len(stack) > 0 and\\\n precedence[stack[-1]] >= precedence[ch]:\n output += stack.pop()\n stack.append(ch)\n while len(stack) > 0:\n output += stack.pop()\n return output",
"def toPostfix(infix):\n output = \"\" # Output stack - the numbers in our expression\n operators = \"\" # Operator stack (using string for ease but could be a list)\n precedence = {\"*\": 100, \"/\": 90, \"+\": 80, \"-\": 70, \"(\": 60, \")\": 50} # Operator precedence dictionary - operator characters mapped to an arbitrary numeric value representing their precedence (BOMDAS)\n \n #Loop through characters\n for c in infix:\n #If c is a number\n if (c.isdigit()):\n output += c\n #Else if c is a function - ignoring these for now\n #Else if c is an operator - + - * / might account for x and division ASCII symbol later\n elif c in {\"+\", \"-\", \"*\", \"/\"}:\n # While there is still an operator left at the top of the stack\n # AND the operator at the top of the stack has greater precedence\n # OR the operator at the top of the stack has equal precedence and the token is left associative (don't know what this means, ignoring for now)\n # AND that operator is not a left parenthesis '('\n # Note: \\ tells python that a statement will continue on to the next line\n while len(operators) > 0 and operators[-1] != '(' and precedence[operators[-1]] > precedence[c]:\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # Push it onto the operator stack\n operators += c\n # Else if token is a left parenthesis (\n elif c == \"(\":\n # Push c to operator stack\n operators += c\n elif c == \")\":\n while operators[-1] != \"(\":\n # Pop the operator from the operator stack onto the output queue.\n output += operators[-1]\n operators = operators[:-1]\n # If there is a left bracket at the top of the stack, remove it\n if operators[-1] == '(':\n # Pop the operator from the operator stack and discard it\n operators = operators[:-1]\n # if there is a function token at the top of the operator stack... (Ignoring this for now)\n \n # If there are any operators left in the stack, append to output\n while len(operators) > 0:\n # Push operator from top of stack to output\n output += operators[-1]\n # Remove top operator from stack\n operators = operators[:-1]\n return output",
"def eval_postfix(s):\n stack = Stack()\n for x in s.split(): # rozděl 's' dle mezer\n if x == '+':\n stack.push(stack.pop() + stack.pop())\n elif x == '-':\n stack.push(-stack.pop() + stack.pop())\n elif x == '*':\n stack.push(stack.pop() * stack.pop())\n elif x == '/':\n second = stack.pop()\n stack.push(stack.pop() / second)\n else:\n stack.push(float(x))\n return stack.pop()",
"def infix_to_postfix(s):\n result = \"\" # output string\n op = Stack() # operator stack\n i = 0 # index to 's'\n while i < len(s):\n if s[i] in \"0123456789\":\n while i < len(s) and s[i] in \"0123456789\":\n result += s[i]\n i += 1\n result += \" \"\n continue\n if s[i] == '(':\n op.push(s[i])\n elif s[i] == ')':\n top = op.pop()\n while top != '(':\n result += top + \" \"\n top = op.pop()\n else: # s[i] is +,-,*,/\n while not op.is_empty() and not higher_prec(s[i], op.peek()):\n result += op.pop() + \" \"\n op.push(s[i])\n i += 1\n while not op.is_empty():\n result += op.pop() + \" \"\n return result",
"def postfix_eval(input_str):\n\n \"\"\"Input argument: a string containing a postfix expression where tokens \n are space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns the result of the expression evaluation. \n Raises an PostfixFormatException if the input is not well-formed\"\"\"\n if input_str is None: raise PostfixFormatException\n # create list of operands and operators\n term_list = input_str.split()\n # initialize stack large enough to contain all operands\n operand_stack = Stack(2*len(term_list)//3+1)\n # iterate over term_list\n for term in term_list:\n # check for operatorm, evaluate operators on A & B if True\n if operator_present(term) is True:\n if operand_stack.size()<2: \n raise PostfixFormatException(\"Insufficient operands\")\n B = operand_stack.pop()\n A = operand_stack.pop()\n operand_stack.push(\n calculate(\n A, # A\n B, # B\n term) # operator\n )\n # check for operand, push to stack if True\n elif operand_present(term) is True:\n operand_stack.push(term)\n else: raise PostfixFormatException(\"Invalid token\")\n if len(term_list) % 3 != 0: raise PostfixFormatException(\"Too many operands\")\n return operand_stack.pop()",
"def input_parser(input_string: str) -> str: \n if is_int(input_string):\n return input_string\n #he is int, give back plz.\n else:\n try:\n modified_input: str = input_string.strip()\n\n evaluatable_pairs: str = regex_splitter(modified_input)\n\n while not (is_int(evaluatable_pairs)):\n evaluatable_pairs = regex_splitter(evaluatable_pairs)\n\n return (evaluatable_pairs)\n\n except:\n raise Exception(\"Invalid Input\")",
"def parse_prefix(s):\n\n Term.str = replace_string(s) # replace operators with more than one letter to be one letter\n second = None\n\n # if there is a left parentheses it means that we are having an operator that is enclosed by parenthesis\n if is_left_parenthese(Term.str[0]):\n Term.eat() # eat left parentheses\n first, Term.str = Formula.parse_prefix(Term.str) # take first formula of the operator\n root = switch_root_to_str(Term.str[0]) # take the root\n Term.eat() # eat the root\n second, Term.str = Formula.parse_prefix(Term.str) # take second formula of the operator\n Term.eat() # eat right parentheses\n\n # if first letter is a quantifier ('A' or 'E')\n elif is_quantifier(Term.str[0]):\n root = Term.str[0] # take the quantifier as root\n Term.eat() # eat the root ( quantifier)\n first = Term.get_whole_name() # take the name of the variable\n Term.eat() # eat the left bracket\n second, Term.str = Formula.parse_prefix(Term.str) # take the formula\n Term.eat() # eat the right bracket\n\n # if first letter is a relation (starts with capital letter)\n elif is_relation(Term.str[0]):\n root = Term.get_whole_name() # take the name of the relation\n first = []\n Term.eat() # eat left parentheses\n\n # if we didn't find closing parenthesis - than there must be at least one Term inside the parenthesis.\n # take it.\n if not is_right_parenthese(Term.str[0]):\n term_obj, Term.str = Term.parse_prefix(Term.str)\n first.append(term_obj)\n\n # while there is a comma, take the next term\n while is_comma(Term.str[0]):\n Term.eat() # eat left parentheses\n term_obj, Term.str = Term.parse_prefix(Term.str)\n first.append(term_obj)\n Term.eat() # eat right parentheses\n\n # else , it is an operator\n else:\n\n # if it's an unary operator\n if is_unary(Term.str[0]):\n root = Term.str[0]\n Term.eat()\n first, Term.str = Formula.parse_prefix(Term.str)\n\n # else , the operator is binary or equaluty\n else:\n first, Term.str = Term.parse_prefix(Term.str)\n # if it's a binary operator\n if is_binary(Term.str[0]):\n root = Term.str[0:2]\n Term.eat()\n\n # if it's an equal operator\n else:\n root = Term.str[0]\n Term.eat()\n second, Term.str = Term.parse_prefix(Term.str)\n returned_formula = Formula(root, first, second)\n return returned_formula, Term.str",
"def stringToSymModWithExpr(string):\n parser = Parser()\n string = string.strip() #delete all surrounding whitespaces\n i = 0\n symbol = \"\"\n # read the symbol\n while i < len(string) and string[i] != \"(\":\n symbol = symbol + string[i]\n i = i + 1\n # if parameters are present, get them\n if i < len(string) and string[i] == \"(\": # If true then parameters will follow, else we are done\n i = i + 1 # skip the opening bracket\n params = string[i:(len(string) - 1)].split(\",\")\n for i in range(0,len(params)):\n params[i] = parser.parse(params[i].strip())\n return(Module(symbol,params))\n else:\n return(Module(symbol,[]))",
"def calculator(infix_expr):\n\n # Assign precedence values to operators\n prec = {}\n prec['^'] = 4\n prec['*'] = 3\n prec['/'] = 3\n prec['+'] = 2\n prec['-'] = 2\n prec['('] = 1\n\n # Instantiate stacks\n operand_stack = Stack()\n operator_stack = Stack()\n\n try:\n token_list = infix_expr.split()\n logging.debug(\"token_list = {}\".format(token_list))\n except:\n sys.exit(1)\n\n for token in token_list:\n logging.debug(\"token = {}\".format(token))\n if token in '0123456789':\n operand_stack.push(int(token))\n logging.debug(\"operand_stack.push = {}\".format(token))\n elif token == '(':\n operator_stack.push(token)\n logging.debug(\"operator_stack.push = {}\".format(token))\n elif token == ')':\n logging.debug(\"token = {}\".format(token))\n operator_token = operator_stack.pop()\n logging.debug(\"operator_stack.pop = {}\".format(operator_token))\n while operator_token != '(':\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"while operator_token != '(':\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_token = operator_stack.pop()\n logging.debug(\"new operator_token = {}\".format(operator_token))\n elif token in '^*/+-':\n while (not operator_stack.isEmpty()) and \\\n (prec[operator_stack.peek()] >= prec[token]):\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n operand_stack.push(result)\n logging.debug(\"Operator - While:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operator_stack.push(token)\n logging.debug(\"operator_stack.push(): {}\".format(token))\n else:\n logging.debug(\"else.... exiting....\")\n sys.exit(1)\n\n # Use all remaining operators\n if not operator_stack.isEmpty():\n operand2 = operand_stack.pop()\n operand1 = operand_stack.pop()\n operator_token = operator_stack.pop()\n result = do_math(operator_token, operand1, operand2)\n logging.debug(\"Remaining Operators:\\noperand1 = {} | operand2 = {} | token = {} | result = {}\".format(\n operand1, operand2, operator_token, result))\n operand_stack.push(result)\n\n return operand_stack.pop()",
"def prefix_to_postfix(input_str): # prefix requires that all operators precede the two operands that they work on\n\n \"\"\"Input argument: a string containing a prefix expression where tokens are \n space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns a String containing a postfix expression(tokens are space separated)\"\"\"\n if input_str is None: raise ValueError\n # split input string into list\n term_list = input_str.split()\n #print(\"TERM LIST \",term_list) \n # initialize output list\n output_list = []\n #print(\"OUT SIZE \", len(output_list))\n # initialize operator stack\n operator_stack = Stack(len(term_list)//3+1)\n for i in range(len(term_list)):\n term = term_list[i]\n # prefix should begin with an operator otherwise raise Exception\n if i == 0:\n if operator_present(term) is True: operator_stack.push(term)\n else: raise PostfixFormatException()\n # Check for operator\n elif operator_present(term): \n operator_stack.push(term)\n # check for operand\n elif operand_present(term):\n output_list.append(term)\n # if previous two terms in output list were operands, pop operator stack to output list once\n if operand_present(term_list[i-1]):\n output_list.append(operator_stack.pop())\n # for every three operands there should be an additional operator\n if operand_present(term_list[i-3]) and operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n while operator_stack.size() != 0:\n output_list.append(operator_stack.pop())\n new_str = (\" \".join(output_list))\n #print(\"NEW STR \", new_str)\n return new_str",
"def parseFbcInfixAssociation(*args):\n return _libsbml.FbcAssociation_parseFbcInfixAssociation(*args)",
"def parse_input(self, instructions):\r\n\r\n input_ = instructions\r\n input_list = input_.strip().split()\r\n\r\n if input_list[0] == 'push':\r\n self.push(input_list[1])\r\n\r\n elif input_list[0] == 'pop':\r\n self.pop()\r\n\r\n elif input_list[0] == 'top':\r\n self.top()\r\n\r\n elif input_list[0] == 'replace':\r\n self.replace(input_list[1], input_list[2])\r\n\r\n else:\r\n pass",
"def infix_to_postfix(self, exp):\n\n try:\n for i in exp:\n #if the character is an operand output it\n if self.is_operand(i):\n self.postfix.append(i)\n\n #if the character is '(' push it\n elif i is '(':\n self.push('(')\n\n elif i is ')':\n #if the character is ')\" pop until we encounter '(' in the stack\n while not self.isEmpty() and self.peek() is not '(':\n self.postfix.append(self.pop())\n if not self.isEmpty() and self.peek() is not '(':\n return -1\n else:\n self.pop()\n\n #if an operator is encountered\n else:\n while not self.isEmpty() and self.peek() is not '(' and self.not_greater(i):\n self.postfix.append(self.pop())\n self.push(i)\n while not self.isEmpty():\n self.postfix.append(self.pop())\n\n return ''.join(self.postfix)\n\n except Exception as e:\n print(\"Error occurred while performing infix to postfix conversion :\", e)\n traceback.print_exc()\n return -1",
"def parseInfixAssociation(*args):\n return _libsbml.Association_parseInfixAssociation(*args)",
"def visitPackageInfixSyntax(self, *args):\n return _libsbml.L3ParserSettings_visitPackageInfixSyntax(self, *args)",
"def postfix_eval(input_str: str) -> Any:\n \"\"\"Input argument: a string containing a postfix expression where tokens \n are space separated. Tokens are either operators + - * / ** << >> or numbers (integers or floats)\n Returns the result of the expression evaluation. \n Raises an PostfixFormatException if the input is not well-formed\"\"\"\n stack = Stack(30)\n if input_str == \"\":\n raise PostfixFormatException('Insufficient operands')\n op_list = [\"+\", \"-\", \"*\", \"/\", \"<<\", \">>\", \"**\"]\n split_list = input_str.split()\n for i in split_list:\n new_val = i.lstrip(\"-\")\n new_val = new_val.replace(\".\", \"\", 1)\n if i in op_list:\n try:\n num_val = stack.pop()\n num_val_initial = stack.pop()\n except IndexError:\n raise PostfixFormatException(\"Insufficient operands\")\n if i == \"+\":\n stack.push(num_val_initial + num_val)\n if i == \"-\":\n stack.push(num_val_initial - num_val)\n if i == \"*\":\n stack.push(num_val_initial * num_val)\n if i == \"/\":\n if num_val == 0:\n raise ValueError(\"0 not divisible\")\n stack.push(num_val_initial / num_val)\n if i == \"**\":\n stack.push(num_val_initial ** num_val)\n if i == \"<<\":\n t1 = type(num_val)\n t2 = type(num_val_initial)\n if t1 == float or t2 == float:\n raise PostfixFormatException(\"Illegal bit shift operand\")\n stack.push(num_val_initial << num_val)\n if i == \">>\":\n t1 = type(num_val)\n t2 = type(num_val_initial)\n if t1 == float or t2 == float:\n raise PostfixFormatException(\"Illegal bit shift operand\")\n stack.push(num_val_initial >> num_val)\n elif new_val.isdigit():\n if \".\" in i:\n stack.push(float(i))\n else:\n stack.push(int(i))\n else:\n raise PostfixFormatException(\"Invalid token\")\n val = stack.pop()\n if not stack.is_empty():\n raise PostfixFormatException(\"Too many operands\")\n return val"
] | [
"0.7397211",
"0.6478074",
"0.6168341",
"0.61475885",
"0.6107287",
"0.60420674",
"0.60419923",
"0.6025316",
"0.60142255",
"0.5998835",
"0.5970454",
"0.5955664",
"0.5921972",
"0.5825912",
"0.5818483",
"0.576102",
"0.5759942",
"0.57156307",
"0.56969327",
"0.56241494",
"0.5590105",
"0.5579819",
"0.5577712",
"0.55776685",
"0.5503583",
"0.54652965",
"0.546494",
"0.5432385",
"0.5418647",
"0.54095167"
] | 0.81860536 | 0 |
Normalize time in arbitrary timezone to UTC naive object. | def normalize_time(timestamp):
offset = timestamp.utcoffset()
if offset is None:
return timestamp
return timestamp.replace(tzinfo=None) - offset | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def normalize_to_utc(date, timezone):\n local_tz = pytz.timezone(timezone)\n new_date = date.replace(tzinfo = local_tz)\n utc_tz = pytz.timezone('UTC')\n new_date = new_date.astimezone(utc_tz)\n return new_date",
"def normalize_time(timestamp):\r\n offset = timestamp.utcoffset()\r\n if offset is None:\r\n return timestamp\r\n return timestamp.replace(tzinfo=None) - offset",
"def resolved_at_to_utc(user_time, user_tz):\n if isinstance(user_tz, str):\n user_tz = dateutil.tz.gettz(user_tz)\n\n localized_time = user_time.replace(tzinfo=user_tz)\n return localized_time.to(\"UTC\").datetime",
"def toutc(dateobj, timezone):\n fmtdate = parser.parse(dateobj) # string to datetime object\n user_tz = pytz.timezone(timezone) # getting user's timezone\n localize_date_with_tz = user_tz.localize(fmtdate) #adding user's timezone to datetime object\n utcdate = pytz.utc.normalize(localize_date_with_tz) #converting user's datetime to utc datetime\n return utcdate",
"def localize_time_utc(non_utc_time):\n return pytz.utc.localize(non_utc_time)",
"def localize_time_utc(non_utc_time):\n return pytz.utc.localize(non_utc_time)",
"def fromutc(self, dt):\n if dt.tzinfo is None:\n return dt.replace(tzinfo=self)\n return super(UTC, self).fromutc(dt)",
"def localize_utc(value):\n if isinstance(value, datetime):\n return value.replace(tzinfo=tzutc()).astimezone(tzlocal())\n else:\n return value",
"def datetime_naive_to_utc(dt):\n\n if type(dt) != datetime.datetime:\n raise TypeError(f\"dt must be type datetime.datetime, not {type(dt)}\")\n\n if dt.tzinfo is not None and dt.tzinfo.utcoffset(dt) is not None:\n # has timezone info\n raise ValueError(\n \"dt must be naive/timezone unaware: \"\n f\"{dt} has tzinfo {dt.tzinfo} and offset {dt.tzinfo.utcoffset(dt)}\"\n )\n\n return dt.replace(tzinfo=datetime.timezone.utc)",
"def make_naive_utc(date_time: datetime.datetime) -> datetime.datetime:\n utc_timezone = datetime.timezone(datetime.timedelta(seconds=0))\n return date_time.astimezone(utc_timezone).replace(tzinfo=None)",
"def to_utc(dt):\n if dt.tzinfo is None:\n return dt.replace(tzinfo=pytz.utc)\n else:\n return dt.astimezone(pytz.utc)",
"def ensure_utc_time(ts: datetime) -> datetime:\n if ts.tzinfo is None:\n return datetime(*ts.timetuple()[:6], tzinfo=UTC_TZ)\n elif str(ts.tzinfo) != str(UTC_TZ):\n return ts.astimezone(UTC_TZ)\n return ts",
"async def test_process_timestamp_to_utc_isoformat() -> None:\n datetime_with_tzinfo = datetime(2016, 7, 9, 11, 0, 0, tzinfo=dt_util.UTC)\n datetime_without_tzinfo = datetime(2016, 7, 9, 11, 0, 0)\n est = dt_util.get_time_zone(\"US/Eastern\")\n datetime_est_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=est)\n est = dt_util.get_time_zone(\"US/Eastern\")\n datetime_est_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=est)\n nst = dt_util.get_time_zone(\"Canada/Newfoundland\")\n datetime_nst_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=nst)\n hst = dt_util.get_time_zone(\"US/Hawaii\")\n datetime_hst_timezone = datetime(2016, 7, 9, 11, 0, 0, tzinfo=hst)\n\n assert (\n process_timestamp_to_utc_isoformat(datetime_with_tzinfo)\n == \"2016-07-09T11:00:00+00:00\"\n )\n assert (\n process_timestamp_to_utc_isoformat(datetime_without_tzinfo)\n == \"2016-07-09T11:00:00+00:00\"\n )\n assert (\n process_timestamp_to_utc_isoformat(datetime_est_timezone)\n == \"2016-07-09T15:00:00+00:00\"\n )\n assert (\n process_timestamp_to_utc_isoformat(datetime_nst_timezone)\n == \"2016-07-09T13:30:00+00:00\"\n )\n assert (\n process_timestamp_to_utc_isoformat(datetime_hst_timezone)\n == \"2016-07-09T21:00:00+00:00\"\n )\n assert process_timestamp_to_utc_isoformat(None) is None",
"def aware_utc_from_timestamp(timestamp):\n return datetime.utcfromtimestamp(timestamp).replace(tzinfo=pytz.utc)",
"def make_naive(value, timezone=None):\n\n if timezone is None:\n timezone = get_current_timezone()\n\n # Emulate the behavior of astimezone() on Python < 3.6.\n if is_naive(value):\n raise ValueError(\"make_naive() cannot be applied to a naive datetime\")\n\n return value.astimezone(timezone).replace(tzinfo=None)",
"def localize(dt):\n if dt.tzinfo is UTC:\n return (dt + LOCAL_UTC_OFFSET).replace(tzinfo=None)\n # No TZ info so not going to assume anything, return as-is.\n return dt",
"def make_tz_aware(time_string):\n naive_dt = datetime.datetime.strptime(time_string.strip(), '%m/%d/%Y')\n aware_dt = pytz.timezone('Asia/Manila').localize(naive_dt)\n return aware_dt.astimezone(pytz.UTC)",
"def test_time_to_commute_retrieved_from_google_api_in_posix_is_converted_to_utc(self):\n result = calculate_time_of_commute(\n origin_name='Gatwick Airport',\n destination_name='Kings Cross St Pancras',\n )\n assert type(result) == datetime\n assert result.tzinfo is None # Assert it is a naive datetime",
"def normalise_dt(dt: Union[str, datetime]) -> datetime:\n if isinstance(dt, str):\n dt = parse_time(dt)\n if dt.tzinfo is not None:\n dt = dt.astimezone(tzutc()).replace(tzinfo=None)\n return dt",
"def tz_localize(self, dt):\n if is_datetime(dt):\n # Naive datetime, see\n # https://docs.python.org/3/library/datetime.html#available-types\n if dt.tzinfo == None or dt.tzinfo.utcoffset(dt) == None:\n return dt\n else:\n return dt.astimezone(self.args.tz)\n elif is_date(dt):\n return dt\n else:\n raise ValueError('Expected datetime or date object')",
"def local_to_utc(local_dt):\n local_dt = local_dt.replace(tzinfo=tz.tzlocal())\n return local_dt.astimezone(tz.tzlocal())",
"def set_utc(date_time):\n utc = datetime.timezone(datetime.timedelta(0))\n date_time = date_time.replace(tzinfo=utc)\n return date_time",
"def localize_datetime_utc(date_time):\n return pytz.utc.localize(date_time)",
"def localize_datetime_utc(date_time):\n return pytz.utc.localize(date_time)",
"def localize_datetime_utc(date_time):\n return pytz.utc.localize(date_time)",
"def date_to_utc(self, date):\n if date.tzinfo is not None:\n # date is timezone-aware\n date = date.astimezone(self.tz_utc)\n\n else:\n # date is a naive date: assume expressed in local time\n date = date.replace(tzinfo=self.tz_local)\n # and converted to UTC\n date = date.astimezone(self.tz_utc)\n return date",
"def tolocal(dateobj, timezone):\n \n utc_date_with_tz = pytz.utc.localize(dateobj) # \n user_tz = pytz.timezone(timezone)\n localdate = user_tz.normalize(utc_date_with_tz) \n \n return localdate",
"def to_datetime_utc(obj: Union[None, pendulum.DateTime, str]) -> Union[pendulum.DateTime, None]:\n\n if isinstance(obj, pendulum.DateTime):\n return obj.in_tz(tz=\"UTC\")\n elif isinstance(obj, str):\n dt = pendulum.parse(obj)\n return dt.in_tz(tz=\"UTC\")\n elif obj is None:\n return None\n\n raise ValueError(\"body should be None or pendulum.DateTime\")",
"def to_utc(dt):\n time_tuple = time.gmtime(time.mktime(dt.timetuple()))\n return datetime.datetime(*time_tuple[0:6])",
"def localToUTC(t, local_tz):\n t_local = local_tz.localize(t, is_dst=None)\n t_utc = t_local.astimezone(pytz.utc)\n return t_utc"
] | [
"0.7121004",
"0.70751613",
"0.68719333",
"0.6638765",
"0.6492223",
"0.6492223",
"0.64689595",
"0.6383807",
"0.63305354",
"0.6259214",
"0.6201917",
"0.61943454",
"0.6189186",
"0.61438674",
"0.6071864",
"0.6052293",
"0.6044807",
"0.6037905",
"0.60008335",
"0.60007656",
"0.5990347",
"0.59875214",
"0.5973145",
"0.5973145",
"0.5973145",
"0.5946043",
"0.59390885",
"0.5931901",
"0.5928691",
"0.59215915"
] | 0.7118167 | 1 |
Determines if time is going to happen in the next window seconds. | def is_soon(dt, window):
soon = (utcnow() + datetime.timedelta(seconds=window))
return normalize_time(dt) <= soon | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_soon(dt, window):\r\n soon = (utcnow() + datetime.timedelta(seconds=window))\r\n return normalize_time(dt) <= soon",
"def is_time(self) -> bool:\n return self.times > 1",
"def check_timer(self, wanted_time):\n if time.time() - self.start_time >= wanted_time:\n return True\n return False",
"def checkAtFinalTime():\n global final_time\n if final_time <= current_second:\n return True\n return False",
"def check_time():\n times = get_times()\n time_difference = abs((times['local'] - times['target']).total_seconds())\n return time_difference < post_time_tol_seconds",
"def have_window(self) -> bool:\r\n return (\r\n self._first_enc_at is not None\r\n and (time.time() > self._first_enc_at + self.window_size)\r\n )",
"def isTimeRemaining(self):\n if self.run_type.startswith('timed'):\n time_since_start = (time.time() - self.start_times['run'])\n remaining_time = self.max_time * 60 - time_since_start\n if remaining_time < 0:\n return False\n else:\n return True",
"def is_next_run(self, local_time):\n return local_time <= self.stop_time",
"def haveTime(self):\n if self.timeout is None:\n return True\n return time.time() <= self._stop",
"def count_as_possession(self):\n if self.is_possession_ending_event:\n if self.seconds_remaining > 2:\n return True\n # check when previous possession ended\n prev_event = self.previous_event\n while prev_event is not None and not prev_event.is_possession_ending_event:\n prev_event = prev_event.previous_event\n if prev_event is None or prev_event.seconds_remaining > 2:\n return True\n # possession starts in final 2 seconds\n # return True if there is a FT or FGM between now and end of period\n next_event = prev_event.next_event\n while next_event is not None:\n if isinstance(next_event, FreeThrow) or (\n isinstance(next_event, FieldGoal) and next_event.is_made\n ):\n return True\n next_event = next_event.next_event\n return False",
"def run_now(self, local_time):\n return self.start_time <= local_time < self.stop_time",
"def in_window(window, date):\n win = datetime.timedelta(window)\n if date == None:\n return False\n date = date.replace(tzinfo=None)\n delta = UTCNOW - date\n return delta <= win",
"def time_is_out(self):\n return self.get_simulation_time() > self.config.max_time",
"def is_over(self, time):\n over = (not self.enable_loop()) and (time >= self.get_duration())\n return over",
"def time_to_fire(self):\n return(self.frequency < (time.time() - self.last_fired))",
"def is_real_time(self):\n return time.time() - self.timestamp < self._DEADLINE_SEC",
"def test_next_window_time_sample_passed(self):\n test_window_scheme = WindowingScheme(self.window_test_filter, 3)\n # Value 15 will be filtered as it ranges between lower and upper bound limits\n filtered_value = test_window_scheme.filter(self.middle_value)\n self.assertEquals(filtered_value, self.middle_value)\n # Let next window time elapse\n time.sleep(4)\n filtered_value = test_window_scheme.filter(self.more_than_upper_bound)\n # None is expected as filtered value because at least one sample has been already passed and\n # value ranges outside lower and upper bound limits\n self.assertEquals(filtered_value, None)",
"def has_been_n_seconds_since_last(self, identifier, seconds):\r\n current_time = time.time()\r\n if identifier not in self._last_time or \\\r\n (current_time - self._last_time[identifier] > seconds):\r\n self._last_time[identifier] = current_time\r\n return True\r\n return False",
"def realtime(self):\n return self._time is None",
"def must_run(self):\r\n self.current_time = datetime.now()\r\n return all([self._minute(), self._hour(), self._day_of_month(), self._month(), self._day_of_week()])",
"def valid(t):\n return float(t) > time.time()",
"def running(self):\n return (\n self.enabled and (self.elapsed < self.timeout)\n and not math.isclose(self.elapsed, self.timeout)\n )",
"def trigger(self):\n if self.timer is None or time.time() - self.last_try > self.min_sec * 2:\n self.timer = time.time()\n self.last_try = time.time()\n return False\n elif time.time() - self.timer > self.min_sec:\n self.reset()\n return True\n else:\n self.last_try = time.time()\n return False",
"def time_to_move(self):\r\n if int(self.pix_pos.x+TOP_BOTTOM_BUFFER//2) % self.app.cell_width == 0:\r\n if self.direction == vec(1, 0) or self.direction == vec(-1, 0) or self.direction == vec(0, 0):\r\n return True\r\n # for the x-direction\r\n\r\n if int(self.pix_pos.y+TOP_BOTTOM_BUFFER//2) % self.app.cell_height == 0:\r\n if self.direction == vec(0, 1) or self.direction == vec(0, -1) or self.direction == vec(0, 0):\r\n return True\r\n # for the y-direction\r\n\r\n # checks to see if the player is still within the bounds\r",
"def reached(self) -> bool:\n return (time.time() - self._start) >= self.seconds",
"def BeFrameNice(ms = 15):\n try:\n if not stackless.current.is_main:\n if ms < 1.0:\n ms = 1.0\n while blue.os.GetWallclockTimeNow() - blue.os.GetWallclockTime() > ms * 10000:\n blue.synchro.Yield()\n ms *= 1.02\n\n return True\n return False\n except:\n raise",
"def after(self, time2):\r\n return self.to_seconds() > time2.to_seconds()",
"def seconds_before_next_run(self):\n period, last_start_time = self.period, self.last_start_time\n now = utcnow()\n if isinstance(period, Weekly):\n then = now.replace(hour=period.hour, minute=10, second=0, microsecond=0)\n days = (period.weekday - now.isoweekday()) % 7\n if days:\n then += timedelta(days=days)\n if (last_start_time or EPOCH) >= then:\n then += timedelta(days=7)\n elif isinstance(period, Daily):\n then = now.replace(hour=period.hour, minute=5, second=0, microsecond=0)\n if (last_start_time or EPOCH) >= then:\n then += timedelta(days=1)\n elif period == 'irregular':\n return 0 if self.thread and self.thread.is_alive() else None\n elif last_start_time:\n then = last_start_time + timedelta(seconds=period)\n else:\n then = now\n return (then - now).total_seconds()",
"def isSessionStartedThisDay(self):\n serverRegionalSettings = BigWorld.player().serverSettings['regional_settings']\n return int(time_utils._g_instance.serverRegionalTime) / 86400 == int(self.__sessionStartedAt + serverRegionalSettings['starting_time_of_a_new_day']) / 86400",
"def is_sim_end(self):\n\n return self.cur_round == self.num_rounds + 1"
] | [
"0.6944493",
"0.6809295",
"0.6770309",
"0.66791123",
"0.6604746",
"0.6575241",
"0.65720016",
"0.6527601",
"0.64802086",
"0.6467284",
"0.6399745",
"0.63648057",
"0.6287537",
"0.62048924",
"0.6202289",
"0.61881906",
"0.6164102",
"0.6113024",
"0.6048458",
"0.60255253",
"0.5996805",
"0.5933048",
"0.5928945",
"0.5917642",
"0.59157175",
"0.59066916",
"0.5898061",
"0.58949697",
"0.58826524",
"0.5879712"
] | 0.6890391 | 1 |
sent message to line when BTC price change | def line_sent(price):
now = datetime.datetime.now()
LINE_ACCESS_TOKEN = " " # Line Token
url = "https://notify-api.line.me/api/notify"
print("[%02i:%02i:%02i] Price Change : Send Message" % (now.hour, now.minute, now.second))
message = "[%02i:%02i:%02i] Now BTC Price : %s" % (now.hour, now.minute, now.second, price)
msg = urllib.parse.urlencode({"message":message})
LINE_HEADERS = {'Content-Type':'application/x-www-form-urlencoded',"Authorization":"Bearer "+LINE_ACCESS_TOKEN}
session = requests.Session()
send = session.post(url, headers=LINE_HEADERS, data=msg)
print("[%02i:%02i:%02i] " % (now.hour, now.minute, now.second), end="")
print(send.text) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def m_ts_OrderUpdated(self, sender, e):\r\n print(\"Order was updated with price of {0}.\".format(e.NewOrder.LimitPrice))",
"def price(temp):\n now = datetime.datetime.now()\n r = requests.get(\"https://bitcoin.co.th/\")\n soup = BeautifulSoup(r.content, \"html.parser\")\n data = soup.find_all(\"div\", {\"class\": \"price\"})\n print(\"[%02i:%02i:%02i] Now BTC Price : \" % (now.hour, now.minute, now.second), end=\"\")\n for i in range(len(data)):\n price = (data[i].text)\n print(price)\n if price != temp: # Price Change\n line_sent(price)\n temp = price\n time.sleep(30) # Delay 30 second\n main(temp) # call function main for loop",
"def _on_op_private_ticker(self, msg):\r\n msg = msg[\"ticker\"]\r\n if msg[\"sell\"][\"currency\"] != self.curr_quote:\r\n return\r\n if msg[\"item\"] != self.curr_base:\r\n return\r\n bid = int(msg[\"buy\"][\"value_int\"])\r\n ask = int(msg[\"sell\"][\"value_int\"])\r\n\r\n self.debug(\" tick: %s %s\" % (\r\n self.quote2str(bid),\r\n self.quote2str(ask)\r\n ))\r\n self.signal_ticker(self, (bid, ask))",
"async def on_symbol_price_updated(self, price: MetatraderSymbolPrice):\n self._pricesBySymbol[price['symbol']] = price\n positions = list(filter(lambda p: p['symbol'] == price['symbol'], self._positions))\n orders = list(filter(lambda o: o['symbol'] == price['symbol'], self._orders))\n specification = self.specification(price['symbol'])\n if specification:\n for position in positions:\n if 'unrealizedProfit' not in position or 'realizedProfit' not in position:\n position['unrealizedProfit'] = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * \\\n (position['currentPrice'] - position['openPrice']) * \\\n position['currentTickValue'] * position['volume'] / specification['tickSize']\n position['realizedProfit'] = position['profit'] - position['unrealizedProfit']\n new_position_price = price['bid'] if (position['type'] == 'POSITION_TYPE_BUY') else price['ask']\n is_profitable = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * (new_position_price -\n position['openPrice'])\n current_tick_value = price['profitTickValue'] if (is_profitable > 0) else price['lossTickValue']\n unrealized_profit = (1 if (position['type'] == 'POSITION_TYPE_BUY') else -1) * \\\n (new_position_price - position['openPrice']) * current_tick_value * position['volume'] / \\\n specification['tickSize']\n position['unrealizedProfit'] = unrealized_profit\n position['profit'] = position['unrealizedProfit'] + position['realizedProfit']\n position['currentPrice'] = new_position_price\n position['currentTickValue'] = current_tick_value\n for order in orders:\n order['currentPrice'] = price['ask'] if (order['type'] == 'ORDER_TYPE_BUY_LIMIT' or\n order['type'] == 'ORDER_TYPE_BUY_STOP' or\n order['type'] == 'ORDER_TYPE_BUY_STOP_LIMIT') else price['bid']\n if self._accountInformation:\n self._accountInformation['equity'] = self._accountInformation['balance'] + \\\n functools.reduce(lambda a, b: a + b['profit'], self._positions, 0)",
"def on_tick(self, tick: TickData):\n self.bg_xhour.update_tick(tick)\n self.ask = tick.ask_price_1 # 卖一价\n self.bid = tick.bid_price_1 # 买一价",
"def WantMoney(self, message):\n ## self.dispView.SetMoney(message.data)\n newVal = [0.0 for ix in range(NUM_CHANNEL)]\n for i in xrange(NUM_CHANNEL):\n newVal[i] = max(-65535, min(65535, self.nerfModel.ReadFPGA(DATA_OUT_ADDR[i])))\n ## if i == 1:\n ## print \"%.4f\" % newVal[i],\n## #\n# #\n# newVal[i] = self.nerfModel.ReadFPGA16Bit(0x23)\n# hi = ConvertType(hi, 'i', 'h')\n newSpike = self.nerfModel.ReadPipe()\n self.dispView.OnPaint(newVal = newVal, newSpike = newSpike)",
"async def update(self, *args, **kwargs):\n if not self.__bought:\n random_stock = 1\n stock_price = self.priceindicator[random_stock].price\n if stock_price != 0:\n random_const = float(decimal.Decimal(random.randrange(-5,5))/100)\n stock_price = stock_price + stock_price*random_const\n stock_price = int(stock_price)\n await self.place_buy_order(random_stock, self.settings[\"stocks_per_company\"], stock_price, 1)\n log_message = \"StockBuyerBot(\" + self.name + \") bought \" + str(random_stock)\n print(log_message)\n else:\n log_message = \"StockBuyerBot(\" + self.name + \") bought nothing\"\n print(log_message)\n self.add_to_log(self.id, log_message)",
"def on_tick(self, tick: TickData):\n if tick and tick.bid_price_1 > 0:\n self.tick = tick",
"def m_ps_FieldsUpdated(self, sender, e):\r\n if e.Error == None:\r\n # Make sure that there is a valid bid\r\n if e.Fields.GetBestBidPriceField().HasValidValue:\r\n if self.m_orderKey == \"\":\r\n # If there is no order working, submit one through the first valid order feed.\r\n # You should use the order feed that is valid for your purposes.\r\n op = ttapi.OrderProfile(e.Fields.Instrument.GetValidOrderFeeds()[0], e.Fields.Instrument)\r\n op.BuySell = ttapi.BuySell.Buy\r\n op.AccountName = \"12345678\"\r\n op.AccountType = ttapi.AccountType.A1\r\n op.OrderQuantity = ttapi.Quantity.FromInt(e.Fields.Instrument, 1)\r\n op.OrderType = ttapi.OrderType.Limit\r\n op.LimitPrice = e.Fields.GetBestBidPriceField().Value\r\n if not self.m_ts.SendOrder(op):\r\n print(\"Send new order failed. {0}\".format(op.RoutingStatus.Message))\r\n self.Dispose()\r\n else:\r\n self.m_orderKey = op.SiteOrderKey\r\n print(\"Send new order succeeded.\")\r\n elif self.m_ts.Orders.ContainsKey(self.m_orderKey) and self.m_ts.Orders[self.m_orderKey].LimitPrice != e.Fields.GetBestBidPriceField().Value:\r\n # If there is a working order, reprice it if its price is not the same as the bid\r\n op = self.m_ts.Orders[self.m_orderKey].GetOrderProfile()\r\n op.LimitPrice = e.Fields.GetBestBidPriceField().Value\r\n op.Action = ttapi.OrderAction.Change\r\n if not self.m_ts.SendOrder(op):\r\n print(\"Send change order failed. {0}\".format(op.RoutingStatus.Message))\r\n else:\r\n print(\"Send change order succeeded.\")\r\n else:\r\n if e.Error.IsRecoverableError == False:\r\n print(\"Unrecoverable price subscription error: {0}\".format(e.Error.Message))\r\n self.Dispose()",
"def send_btc_price(message):\n\n bot_token = TOKEN\n chat_id = ID\n sendText = 'https://api.telegram.org/bot' + bot_token + '/sendMessage?chat_id=' + chat_id + '&parse_mode=Markdown&text=' + message\n\n response = requests.get(sendText)\n\n return response",
"def price_of_auction_has_changed(bet_id: int):\n bet = Bet.objects.get(pk=bet_id)\n auction = bet.auction\n participance_id = auction.bet_set.exclude(user=bet.user).values_list(\n \"user\", flat=True\n )\n participance = list(\n User.objects.filter(pk__in=participance_id).values_list(\"email\", flat=True)\n )\n\n broadcast_emails(\n participance,\n f\"Hi there, price of the auction {auction.name} has been changed. Current price is {bet.price}\",\n )",
"def update(self, price, dt):\n reached = self.get('reached')\n price_diff = self.get('price_diff')\n price_offset = self.get('price_offset')\n #log.info(\"Update bo feature '%s' at price change with price=%s dt=%s\" % (self.name, price, dt))\n #log.info(self.bo)\n if self.bo.price_diff_d is not None and not reached:\n if self.bo.price_diff_d >= price_diff:\n self.set('reached', True)\n new_stop = self.bo.price_open + self.bo.direction * price_offset\n log.info(\"<BOFeature:%s BE reached: price stop set to %s\" % (self.name, new_stop))\n self.modify_stop(new_stop)\n return(self.bo.ticket)\n else:\n return",
"def _on_op_private_trade(self, msg):\r\n if msg[\"trade\"][\"price_currency\"] != self.curr_quote:\r\n return\r\n if msg[\"trade\"][\"item\"] != self.curr_base:\r\n return\r\n if msg[\"channel\"] == CHANNELS[\"trade.%s\" % self.curr_base]:\r\n own = False\r\n else:\r\n own = True\r\n date = int(msg[\"trade\"][\"date\"])\r\n price = int(msg[\"trade\"][\"price_int\"])\r\n volume = int(msg[\"trade\"][\"amount_int\"])\r\n typ = msg[\"trade\"][\"trade_type\"]\r\n\r\n if own:\r\n self.debug(\"trade: %s: %s @ %s (own order filled)\" % (\r\n typ,\r\n self.base2str(volume),\r\n self.quote2str(price)\r\n ))\r\n # send another private/info request because the fee might have\r\n # changed. We request it a minute later because the server\r\n # seems to need some time until the new values are available.\r\n self.client.request_info_later(60)\r\n else:\r\n self.debug(\"trade: %s: %s @ %s\" % (\r\n typ,\r\n self.base2str(volume),\r\n self.quote2str(price)\r\n ))\r\n\r\n self.signal_trade(self, (date, price, volume, typ, own))",
"def change_price(self, value): \n value = self.price",
"def _onchange_price(self):\n self.price_subtotal = self.price",
"def _on_op_private_wallet(self, msg):\r\n balance = msg[\"wallet\"][\"balance\"]\r\n currency = balance[\"currency\"]\r\n total = int(balance[\"value_int\"])\r\n self.wallet[currency] = total\r\n self.signal_wallet(self, None)",
"def updatePrice(self, isinkey, field, data, qtype):\r\n isin = isinkey[0:12]\r\n bond = regsToBondName[isin]\r\n if qtype == BloombergQuery.BID:\r\n # 1/ WE CACHE THE OLD PRICE\r\n self.updateCell(bond, 'OLDBID', self.df.at[bond, 'BID'])\r\n self.updateCell(bond, 'OLDASK', self.df.at[bond, 'ASK'])\r\n # 2/ WE CHECK IF PRICE CHANGED\r\n if bond in self.rfbonds:\r\n self.blptsAnalytics.get(isin + '@CBBT' + ' Corp', self.bbgPriceRFQuery)\r\n else:\r\n self.blptsPriceOnly.get(isin + BBGHand + ' Corp', self.bbgPriceOnlyQuery)\r\n elif qtype == BloombergQuery.PRICEONLY:\r\n data = data.astype(float)\r\n # for item, value in data.iteritems():\r\n # self.updateCell(bond,bbgToBdmDic[item],value)\r\n self.lock.acquire()\r\n for item, value in data.iteritems():\r\n self.df.at[bond, bbgToBdmDic[item]] = value\r\n self.lock.release()\r\n if (data['BID'] != self.df.at[bond, 'OLDBID']) or (data['ASK'] != self.df.at[bond, 'OLDASK']):\r\n if bond in SPECIALBONDS:\r\n self.blptsAnalytics.get(isin + BBGHand + ' Corp', self.bbgPriceSpecialQuery)\r\n else:\r\n self.blptsAnalytics.get(isin + BBGHand + ' Corp', self.bbgPriceQuery)\r\n # try:\r\n # self.blptsAnalytics.get(isin + BBGHand + ' Corp', self.bbgPriceQuery)\r\n # except:\r\n # print 'error asking analytics for ' + bond\r\n else:\r\n # print 'Update event without a price change for ' + bond\r\n pub.sendMessage('BOND_PRICE_UPDATE', message=MessageContainer(self.df.loc[bond]))\r\n elif qtype == BloombergQuery.RTGACC:\r\n for item, value in data.iteritems():\r\n self.updateCell(bond,bbgToBdmDic[item],value)\r\n else:#'ANALYTICS' or 'FIRSTPASS'\r\n data = data.astype(float)\r\n # try:\r\n # for item, value in data.iteritems():\r\n # self.updateCell(bond,bbgToBdmDic[item],value)\r\n # except:\r\n # print data\r\n self.lock.acquire()\r\n try:\r\n for item, value in data.iteritems():\r\n self.df.at[bond, bbgToBdmDic[item]] = value\r\n except:\r\n self.lock.release()\r\n print data\r\n self.lock.release()\r\n if bond in SINKABLEBONDS:\r\n #self.bbgSinkRequest.fillRequest(isin + ' Corp', ['YAS_ZSPREAD'], strOverrideField='YAS_BOND_PX', strOverrideValue=data['BID'])\r\n self.bbgSinkRequest.fillRequest(isin + ' Corp', ['YAS_ZSPREAD'], strOverrideField='YAS_BOND_PX', strOverrideValue=self.df.at[bond, 'BID'])\r\n self.bbgSinkRequest.get()\r\n self.updateCell(bond, 'ZB', float(self.bbgSinkRequest.output.values[0,0]))\r\n #self.bbgSinkRequest.fillRequest(isin + ' Corp', ['YAS_ZSPREAD'], strOverrideField='YAS_BOND_PX', strOverrideValue=data['ASK'])\r\n # self.bbgSinkRequest.fillRequest(isin + ' Corp', ['YAS_ZSPREAD'], strOverrideField='YAS_BOND_PX', strOverrideValue=self.df.at[bond, 'ASK'])\r\n # self.bbgSinkRequest.get() \r\n # self.updateCell(bond, 'ZA', float(self.bbgSinkRequest.output.values[0,0]))\r\n if qtype == BloombergQuery.ANALYTICS:\r\n self.updateStaticAnalytics(bond)",
"def modify_price(self, price):\n if price is not None and self.is_cancellable:\n log.info(\"bo#%s: modify price (pending) order \" % self.ticket)\n not_implemented_error(\"Can't modify price for now (only for pending orders which wasn't triggered\")\n order_id = self.order_id_master\n cancel_order(order_id) # DANGEROUS! it should be atomic operation!\n #style = self.style\n #if self.is_limit:\n #elif self.is_stop:\n #elif self.is_stop_limit\n #order_id = order(self.symbol, self.volume, style=new_style))\n \n else:\n return",
"def m_ps_FieldsUpdated(self, sender, e):\r\n ltp = e.Fields.GetLastTradedPriceField()\r\n ltq = e.Fields.GetLastTradedQuantityField()\r\n print(\"ltp and ltq success...\")\r\n if ltp.HasChanged or ltq.HasChanged:\r\n print(ltp.Value, ltq.Value)\r\n ltp = ltp.Value\r\n ltpi = int(ltp.ToTicks())\r\n self.process_row({'time' :pd.datetime.now(), 'close' : ltpi}, key = \"nk\")",
"def coinbasepro_on_message(caller, msg):\n msg = json.loads(msg)\n # if msg['type'] == 'match':\n if msg['type'][2] == 't':\n chnl = msg[\"product_id\"]\n df = pd.DataFrame.from_records(\n data=[{\n \"tid\": int(msg[\"trade_id\"]),\n \"price\": float(msg[\"price\"]),\n \"volume\": float(msg['size']) if msg['side'] == 'buy' else -float(msg['size']),\n \"datetime\": pd.to_datetime(msg[\"time\"])\n }],\n index=\"datetime\"\n )\n df.index = df.index.tz_convert(\"GMT0\")\n caller.write(chnl, df)\n\n return chnl, df",
"def onMarketUpdate(self, data):\n pass",
"def on_tick(self, tick: TickData):\n self.bg.update_tick(tick)\n print(tick)\n # self.trading = True",
"async def btc(self, ctx):\n try:\n btc_bitstamp_json = await self.bot.aiojson(\"https://www.bitstamp.net/api/ticker\")\n\n btc_currentprice_rate = Decimal(btc_bitstamp_json[\"last\"])\n btc_currentprice_string = self.format_currency(btc_currentprice_rate)\n\n btc_lastopen_rate = Decimal(btc_bitstamp_json[\"open\"])\n btc_lastopen_string = self.format_currency(btc_lastopen_rate)\n\n btc_high_string = self.format_currency(btc_bitstamp_json[\"high\"])\n btc_low_string = self.format_currency(btc_bitstamp_json[\"low\"])\n btc_bid_string = self.format_currency(btc_bitstamp_json[\"bid\"])\n btc_ask_string = self.format_currency(btc_bitstamp_json[\"ask\"])\n btc_volume_string = str(btc_bitstamp_json[\"volume\"]) + \" BTC\"\n\n btc_diff = btc_currentprice_rate - btc_lastopen_rate\n btc_change_percentage = (\n 100 * Decimal(btc_diff) / Decimal(btc_currentprice_rate))\n btc_change_percentage_string = f\"{str(btc_change_percentage)[:6]}%\"\n\n btc_change_color = self.get_change_color(btc_change_percentage, 10)\n\n btc_data_timestamp = datetime.datetime.utcfromtimestamp(\n int(btc_bitstamp_json[\"timestamp\"]))\n\n link = \"https://bitcoincharts.com/charts/chart.png?width=600&m=bitstampUSD&r=30\"\\\n f\"&t=S&v=1&cacheinval={int(time.time())}\"\n embed = discord.Embed(color=btc_change_color,\n timestamp=btc_data_timestamp)\n\n embed.set_author(name=\"30 Day BTC Chart and Info\",\n icon_url=\"https://bitcoin.org/img/icons/opengraph.png\")\n embed.set_image(url=link)\n embed.set_footer(text=\"Chart supplied by bitcoincharts.com under CC-BY-SA 3.0, \"\\\n \"price info supplied by BitStamp. \" + self.legal_notice)\n\n embed.add_field(name=\"Current Price\", value=btc_currentprice_string)\n embed.add_field(name=\"Opening Price\", value=btc_lastopen_string)\n\n embed.add_field(name=\"Change\", value=btc_change_percentage_string)\n embed.add_field(name=\"Volume\", value=btc_volume_string)\n\n embed.add_field(name=\"High\", value=btc_high_string)\n embed.add_field(name=\"Low\", value=btc_low_string)\n\n embed.add_field(name=\"Bid\", value=btc_bid_string)\n embed.add_field(name=\"Ask\", value=btc_ask_string)\n\n await ctx.send(embed=embed)\n except:\n await ctx.send(\"Error while fetching BTC data.\")\n self.bot.log.error(traceback.format_exc())",
"def slot_ticker(self, dummy_sender, data):\r\n (bid, ask) = data\r\n self.bid = bid\r\n self.ask = ask\r\n self.last_change_type = None\r\n self.last_change_price = 0\r\n self.last_change_volume = 0\r\n self._repair_crossed_asks(ask)\r\n self._repair_crossed_bids(bid)\r\n self.signal_changed(self, None)",
"async def btc( ctx):\r\n await ctx.message.delete()\r\n r = requests.get(\r\n \"https://min-api.cryptocompare.com/data/price?fsym=BTC&tsyms=USD,EUR,GBP\"\r\n )\r\n r = r.json()\r\n usd = r[\"USD\"]\r\n eur = r[\"EUR\"]\r\n gbp = r[\"GBP\"]\r\n em = discord.Embed(\r\n description=f\"USD: `{str(usd)}$`\\n\\nEUR: `{str(eur)}€`\\n\\nGBP: `{str(gbp)}£`\"\r\n )\r\n em.set_author(\r\n name=\"Bitcoin\",\r\n icon_url=\"https://cdn.pixabay.com/photo/2013/12/08/12/12/bitcoin-225079_960_720.png\",\r\n )\r\n await ctx.send(embed=em)\r\n ### I hope this code is so horrible I'm never allowed to code embeds again\r",
"def update_total_price():\n tk_total_price.set('Total: {0:>6}'.format(str(total_price)))\n print(total_price)",
"def trade_action(self, BUY_QTY):\n BUY_QTY = 4500\n self.trade(BUY_QTY)\n #self.show()",
"def _handle_market_data(self, response):\n if response['type'] != 'update':\n err_msg = f\"Got unexpected response: {response['type']}\"\n logging.info(err_msg)\n return\n events = response['events']\n # Only iterate over change events.\n for event in (e for e in events if e['type'] == 'change'):\n side = event['side']\n price = Decimal(event['price'])\n quantity = Decimal(event['remaining'])\n quote = Quote(price=price, quantity=quantity)\n if side == 'bid':\n self.exchange_state.order_book().bids().set_quote(quote)\n elif side == 'ask':\n self.exchange_state.order_book().asks().set_quote(quote)\n else:\n raise Exception(\"Unexpected update side: \" + side)\n return True",
"def percent_changes(self):\n\n # close_t = float(val[\"klines\"][\"1m\"].get(self.mw.cfg_manager.pair, {})[-5][4])\n klines_data = self.mw.klines.get(\"1m\")\n coin_data = klines_data.get(self.mw.cfg_manager.pair)\n\n if isinstance(coin_data, list):\n close_5m = float(self.mw.klines[\"1m\"][self.mw.cfg_manager.pair][-5][4])\n close_15m = float(self.mw.klines[\"1m\"][self.mw.cfg_manager.pair][-15][4])\n # close_30m = float(self.mw.klines[\"1m\"][self.mw.cfg_manager.pair][-30][4])\n close_1h = float(self.mw.klines[\"1m\"][self.mw.cfg_manager.pair][-60][4])\n close_4h = float(self.mw.klines[\"1m\"][self.mw.cfg_manager.pair][-240][4])\n\n change_5m_value = ((float(val[\"tickers\"][self.mw.cfg_manager.pair][\"lastPrice\"]) / float(close_5m)) - 1) * 100\n change_15m_value = ((float(val[\"tickers\"][self.mw.cfg_manager.pair][\"lastPrice\"]) / float(close_15m)) - 1) * 100\n # change_30m_value = ((float(val[\"tickers\"][self.mw.cfg_manager.pair][\"lastPrice\"]) / float(close_30m)) - 1) * 100\n change_1h_value = ((float(val[\"tickers\"][self.mw.cfg_manager.pair][\"lastPrice\"]) / float(close_1h)) - 1) * 100\n change_4h_value = ((float(val[\"tickers\"][self.mw.cfg_manager.pair][\"lastPrice\"]) / float(close_4h)) - 1) * 100\n\n change_1d_value = float(val[\"tickers\"][self.mw.cfg_manager.pair][\"priceChangePercent\"])\n\n\n changes = [self.mw.change_5m, self.mw.change_15m, self.mw.change_1h, self.mw.change_4h, self.mw.change_1d]\n change_values = [change_5m_value, change_15m_value, change_1h_value, change_4h_value, change_1d_value]\n\n for i, change in enumerate(changes):\n if change_values[i] > 0:\n operator = \"+\"\n color = Colors.color_green\n elif change_values[i] < 0:\n operator = \"\"\n color = Colors.color_pink\n else:\n operator = \"\"\n color = Colors.color_grey\n\n # print(str(change))\n change.setText(\"<span style='color: \" + color + \"'>\" + operator + \"{0:.2f}\".format(change_values[i]) + \"%</span\")",
"def update(self, price, volume):\r\n if price > self.hig:\r\n self.hig = price\r\n if price < self.low:\r\n self.low = price\r\n self.cls = price\r\n self.vol += volume"
] | [
"0.6816411",
"0.6801577",
"0.65582",
"0.6458763",
"0.640456",
"0.63957596",
"0.63561404",
"0.6308241",
"0.6267959",
"0.60562086",
"0.6038898",
"0.6030322",
"0.6026573",
"0.6005116",
"0.5988077",
"0.5987116",
"0.59702915",
"0.5927759",
"0.5876349",
"0.5855136",
"0.5835843",
"0.58330625",
"0.58323395",
"0.5814783",
"0.5783299",
"0.577812",
"0.5749706",
"0.57474285",
"0.57127446",
"0.5701854"
] | 0.72373915 | 0 |
Creates vocab tables for src_vocab_file and tgt_vocab_file | def create_vocab_tables(src_vocab_file, tgt_vocab_file, share_vocab):
src_vocab_table = lookup_ops.index_table_from_file(
src_vocab_file, default_value=UNK_ID)
if share_vocab:
tgt_vocab_table = src_vocab_table
else:
tgt_vocab_table = lookup_ops.index_table_from_file(
tgt_vocab_file, default_value=UNK_ID)
return src_vocab_table, tgt_vocab_table | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def vocab_tables(source_file, tags_file):\n pass",
"def create_vocab_tables(vocab_file):\n vocab_table = lookup_ops.index_table_from_file(\n vocab_file, default_value=0)\n return vocab_table",
"def create_vocab(data_files, vocab_fname):\n chars = set()\n for data_fname in data_files:\n with io.open(data_fname, 'r', encoding='utf8') as fp:\n raw = fp.read().lower()\n chars.update(raw)\n\n vocab = list(chars - set(['\\t', '\\n'])) + SPECIALS\n tf.logging.info('Creating vocab file..')\n with io.open(vocab_fname, 'w', encoding='utf8') as fp:\n fp.write('\\n'.join(vocab))",
"def load_vocabs(src_lang,\n src_vocab_file_name,\n tgt_lang,\n tgt_vocab_file_name):\n\n src_vocab = _load_vocab(src_vocab_file_name, src_lang)\n tgt_vocab = _load_vocab(tgt_vocab_file_name, tgt_lang)\n\n return src_vocab, tgt_vocab",
"def make_vocab(src_hparams, tgt_hparams):\n src_vocab = MonoTextData.make_vocab(src_hparams)\n\n if tgt_hparams[\"processing_share\"]:\n tgt_bos_token = src_hparams[\"bos_token\"]\n tgt_eos_token = src_hparams[\"eos_token\"]\n else:\n tgt_bos_token = tgt_hparams[\"bos_token\"]\n tgt_eos_token = tgt_hparams[\"eos_token\"]\n tgt_bos_token = utils.default_str(tgt_bos_token,\n SpecialTokens.BOS)\n tgt_eos_token = utils.default_str(tgt_eos_token,\n SpecialTokens.EOS)\n if tgt_hparams[\"vocab_share\"]:\n if tgt_bos_token == src_vocab.bos_token and \\\n tgt_eos_token == src_vocab.eos_token:\n tgt_vocab = src_vocab\n else:\n tgt_vocab = Vocab(src_hparams[\"vocab_file\"],\n bos_token=tgt_bos_token,\n eos_token=tgt_eos_token)\n else:\n tgt_vocab = Vocab(tgt_hparams[\"vocab_file\"],\n bos_token=tgt_bos_token,\n eos_token=tgt_eos_token)\n\n return src_vocab, tgt_vocab",
"def _check_vocab_files(self):\n src_vocab = self.configs['vocab_prefix'] + \".\" + self.configs['src']\n src_vocab_size, src_vocab_file = self._check_vocab_file(\n src_vocab,\n [self.configs['unk'], self.configs['sos'], self.configs['eos']])\n self.configs['source_vocab_file'] = src_vocab_file\n self.configs['source_vocab_size'] = src_vocab_size\n\n tgt_vocab = self.configs['vocab_prefix'] + \".\" + self.configs['tgt']\n tgt_vocab_size, tgt_vocab_file = self._check_vocab_file(\n tgt_vocab,\n [self.configs['unk'], self.configs['sos'], self.configs['eos']])\n self.configs['target_vocab_file'] = tgt_vocab_file\n self.configs['target_vocab_size'] = tgt_vocab_size",
"def _build_vocabulary(input_files):\n if FLAGS.vocab_file:\n tf.logging.info(\"Loading existing vocab file.\")\n vocab = collections.OrderedDict()\n with tf.gfile.GFile(FLAGS.vocab_file, mode=\"r\") as f:\n for i, line in enumerate(f):\n word = line.decode(\"utf-8\").strip()\n assert word not in vocab, \"Attempting to add word twice: %s\" % word\n vocab[word] = i\n tf.logging.info(\"Read vocab of size %d from %s\",\n len(vocab), FLAGS.vocab_file)\n return vocab\n\n tf.logging.info(\"Creating vocabulary.\")\n num = 0\n wordcount = collections.Counter()\n for input_file in input_files:\n tf.logging.info(\"Processing file: %s\", input_file)\n for sentence in tf.gfile.FastGFile(input_file):\n wordcount.update(sentence.split())\n\n num += 1\n if num % 1000000 == 0:\n tf.logging.info(\"Processed %d sentences\", num)\n\n tf.logging.info(\"Processed %d sentences total\", num)\n\n words = wordcount.keys()\n freqs = wordcount.values()\n sorted_indices = np.argsort(freqs)[::-1]\n\n vocab = collections.OrderedDict()\n vocab[special_words.EOS] = special_words.EOS_ID\n vocab[special_words.UNK] = special_words.UNK_ID\n for w_id, w_index in enumerate(sorted_indices[0:FLAGS.num_words - 2]):\n vocab[words[w_index]] = w_id + 2 # 0: EOS, 1: UNK.\n\n tf.logging.info(\"Created vocab with %d words\", len(vocab))\n\n vocab_file = os.path.join(FLAGS.output_dir, \"vocab.txt\")\n with tf.gfile.FastGFile(vocab_file, \"w\") as f:\n f.write(\"\\n\".join(vocab.keys()))\n tf.logging.info(\"Wrote vocab file to %s\", vocab_file)\n\n word_counts_file = os.path.join(FLAGS.output_dir, \"word_counts.txt\")\n with tf.gfile.FastGFile(word_counts_file, \"w\") as f:\n for i in sorted_indices:\n f.write(\"%s %d\\n\" % (words[i], freqs[i]))\n tf.logging.info(\"Wrote word counts file to %s\", word_counts_file)\n\n return vocab",
"def gen_vocab(targets, fname):\n\n\tpath = os.path.join(\"data\", fname)\n\tif not os.path.isfile(path):\n\t\tworddict, wordcount = vocab.build_dictionary(targets)\n\t\tvocab.save_dictionary(worddict, wordcount, path)\n\n\treturn path",
"def create_vocab_table(db, vocab):\n con = lite.connect(db)\n with con:\n cur = con.cursor()\n cur.execute(\"CREATE TABLE IF NOT EXISTS Vocab(vocab TEXT)\")\n for i in range(0, len(vocab)):\n vocab[i] = (vocab[i],)\n with con:\n cur = con.cursor()\n cur.executemany(\"INSERT INTO Vocab VALUES (?)\", vocab)",
"def _make_vocab_files(self):\n self.logger.info('making question vocab...' + self.opt.QUESTION_VOCAB_SPACE)\n qdic, _ = self.load_data(self.opt.QUESTION_VOCAB_SPACE)\n question_vocab = VQADataProvider.make_question_vocab(qdic, self.max_length)\n self.logger.info('making answer vocab...' + self.opt.ANSWER_VOCAB_SPACE)\n qdic, adic = self.load_data(self.opt.ANSWER_VOCAB_SPACE)\n answer_vocab = VQADataProvider.make_answer_vocab(adic, qdic, self.opt.MAX_ANSWER_VOCAB_SIZE, self.use_ocr)\n return question_vocab, answer_vocab",
"def build_vocab(sentences_list, vocab_size, visual_fld):\n words = [word for sentence in sentences_list for word in sentence]\n utils.safe_mkdir(visual_fld)\n with open(os.path.join(visual_fld, 'vocab.tsv'), 'w') as fd:\n dictionary = {}\n index_dictionary = {}\n count = [('UNK', -1)]\n count.extend(Counter(words).most_common(vocab_size - 1))\n for index, (word, _) in enumerate(count):\n dictionary[word] = index\n index_dictionary[index] = word\n fd.write(word + '\\n')\n\n return dictionary, index_dictionary",
"def load_target_vocab(self):\n vocab = [line.split()[0] for line in open(os.path.join('preprocessed', 'all_vocab.txt'), 'r').read().splitlines()]\n self.word2idx = {word: idx for idx, word in enumerate(vocab)}\n self.idx2word = {idx: word for idx, word in enumerate(vocab)}\n self.vocab_size = len(self.word2idx)",
"def create_vocab():\n \n cutoff = CUTOFF\n \n lines = open(INFNAME_FORMAT.format(\"train\")).readlines() \\\n + open(INFNAME_FORMAT.format(\"test\")).readlines()\n raw = [process_line(l) for l in lines]\n cntx = Counter( [ w for e in raw for w in e ] )\n vocab = { x for x, y in cntx.items() if y > cutoff }\n \n return vocab",
"def create_vocab(vocab_path='ORBvoc-synth.txt'):\n total_time = 10 # seconds\n num_frames = 20\n speed = 3.0\n vocab_builder = VocabularyBuilder()\n for seed in tqdm(range(100), total=100):\n image_builder = DemoImageBuilder(\n mode=ImageMode.MONOCULAR, seed=seed,\n length=total_time * speed\n )\n for idx in range(num_frames):\n time = total_time * idx / num_frames\n image = image_builder.create_frame(time)\n vocab_builder.add_image(image.pixels)\n vocab_builder.build_vocabulary(str(vocab_path))",
"def build_vocab(words, vocab_size, visual_fld=None):\n utils.safe_mkdir(visual_fld)\n file = open(os.path.join(visual_fld, 'vocab.tsv'), 'w',encoding='utf8')\n\n dictionary = dict()\n count = [('UNK', -1)]\n index = 0\n count.extend(Counter(words).most_common(vocab_size - 1))\n\n for word, _ in count:\n dictionary[word] = index\n index += 1\n file.write(word + '\\n')\n\n index_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n file.close()\n return dictionary, index_dictionary",
"def build_vocab(filenames):\n vocab = set()\n max_word_length = 0\n max_sentence_length = 0\n number_of_sentences = 0\n for filename in filenames:\n with io.open(filename, 'r', encoding='utf8') as fin:\n for line in fin.readlines():\n number_of_sentences += 1\n vocab = vocab | set(line)\n sentence_length = len(line)\n if sentence_length > max_sentence_length:\n max_sentence_length = sentence_length\n if number_of_sentences % 1000 == 0:\n print(str(number_of_sentences))\n vocab = list(vocab)\n char_to_int = {char:(i+1) for i, char in enumerate(vocab)}\n int_to_char = {(i+1):char for i, char in enumerate(vocab)}\n metadata = {\"char_to_int\": char_to_int,\n \"int_to_char\": int_to_char,\n \"max_sentence_length\": max_sentence_length,\n \"number_of_sentences\": number_of_sentences}\n return metadata",
"def create_tables (cls, env=os.environ):\n\n cur = cls.pri_table_read_cursor (env=env)\n cur.execute ('SPECIALCASE gettablelist')\n ret = cur.fetchall ()\n \n existingtables = set ([x[0].lower() for x in ret])\n\n for tabname in (set (cls.table_desc.keys ()) - existingtables):\n sql, lsd = cls.table_desc[tabname]\n epls, desls, sqlprefix = lsd.get_create_labeling (savels=True)\n\n conn = get_labeled_conn (epls, desls)\n cur = conn.cursor ()\n cur.execute (sql)\n conn.close ()\n lsd.pop_labelset ()\n\n \n import psycopg2\n for sql in cls.sql_createindex:\n conn = get_labeled_conn ()\n cur = conn.cursor ()\n # XXX It would be better to check which indices exist as we do for tables.\n try:\n cur.execute (sql)\n except psycopg2.ProgrammingError, e: \n pass\n conn.close ()",
"def build_vocab(train_dir, vocab_dir, vocab_size=5000):\n data_train, _ = read_file(train_dir)\n\n all_data = []\n for content in data_train:\n all_data.extend(content)\n\n counter = Counter(all_data)\n count_pairs = counter.most_common(vocab_size-1)\n words, _ = list(zip(*count_pairs))\n\n open_file(vocab_dir,mode='w').write('\\n'.join(words)+'\\n')",
"def build_vocab(self, data_paths):\n\t\tfor data_path in data_paths:\n\t\t\tprint(\"Cur path: \" + data_path)\n\t\t\twith open(data_path, 'r', encoding='utf-8') as dataset:\n\t\t\t\tfor word in tqdm(dataset):\n\t\t\t\t\tword = word.strip('\\n')\n\n\t\t\t\t\tself.word_list.append(word)\n\t\t\t\t\tif self.max_length < len(word):\n\t\t\t\t\t\tself.max_length = len(word)\n\n\t\t\t\t\tfor char in word:\n\t\t\t\t\t\tif char not in self.all_table:\n\t\t\t\t\t\t\tself.all_table.append(char)\n\t\t\t\t\t\t\tself.all_ind[char] = len(self.all_table) - 1\n\t\t\t\t\t\t\tself.num_all += 1\n\n\t\tprint(self.all_table)",
"def copy_vocab(self, sample_hdf_file):\n log.info(\"copying vocabulary from sample file\")\n sample_hdf_file.copy(self.VOCAB_PATH, self.file)",
"def prepare_raw_data(raw_data_dir, processed_data_dir , src_vocabulary_size, tgt_vocabulary_size, tokenizer=None):\n # extract corpus to the specified processed directory.\n get_data_set(raw_data_dir, processed_data_dir)\n\n # Create vocabularies of the appropriate sizes.\n tgt_vocab_path = os.path.join(processed_data_dir, \"vocab.tgt\" )\n src_vocab_path = os.path.join(processed_data_dir, \"vocab.src\" )\n create_vocabulary(tgt_vocab_path, os.path.join(processed_data_dir, \"targetIDs\"), tgt_vocabulary_size, tokenizer, normalize_digits=False)\n create_vocabulary(src_vocab_path, os.path.join(processed_data_dir, \"Train.source\"), src_vocabulary_size, tokenizer, normalize_digits=False)\n\n #create Encoded TargetSpace file\n encodedFullTargetSpace_path = os.path.join(processed_data_dir, \"encoded.FullTargetSpace\")\n tgt_vocab, _ = initialize_vocabulary(tgt_vocab_path)\n targetIDs = set()\n with codecs.open( encodedFullTargetSpace_path, 'w', 'utf-8') as tokens_file:\n for line in codecs.open( os.path.join(processed_data_dir, \"targetIDs\"), 'r', 'utf-8'):\n tgtSeq, id = line.strip().split('\\t')\n token_ids = sentence_to_token_ids(tgtSeq, tgt_vocab, normalize_digits=False)\n token_ids = [BOS_ID] + token_ids + [EOS_ID]\n tokens_file.write( id + '\\t' + \" \".join([str(tok) for tok in token_ids]) + \"\\n\")\n targetIDs.add(id)\n\n # Create Encoded TrainPairFile\n encoded_train_pair_path = os.path.join(processed_data_dir, \"encoded.TrainPairs\")\n raw_train_pair_path = os.path.join(processed_data_dir, 'TrainPairs')\n encode_data_to_token_ids(raw_train_pair_path, encoded_train_pair_path, src_vocab_path, targetIDs, normalize_digits=False)\n\n # Create Encoded EvalPairFile\n encoded_eval_pair_path = os.path.join(processed_data_dir, \"encoded.EvalPairs\")\n raw_eval_pair_path = os.path.join(processed_data_dir, 'EvalPairs')\n encode_data_to_token_ids(raw_eval_pair_path, encoded_eval_pair_path, src_vocab_path, targetIDs, normalize_digits=False)\n\n\n return (encoded_train_pair_path, encoded_eval_pair_path,\n encodedFullTargetSpace_path,\n src_vocab_path, tgt_vocab_path)",
"def create_vocabulary(vocabulary_path, json_vocab_path):\n if not gfile.Exists(vocabulary_path):\n print(\"Transform vocabulary to %s\" % vocabulary_path)\n with gfile.GFile(json_vocab_path, mode=\"rb\") as f:\n jvocab = json.load(f)\n vocab = jvocab['w2id']\n vocab_list = _START_VOCAB + sorted(vocab, key=vocab.get)\n with gfile.GFile(vocabulary_path, mode=\"wb\") as vocab_file:\n for w in vocab_list:\n vocab_file.write(w + b\"\\n\")",
"def generate_vocab():\n\n vocab_dict = {}\n folder_path = os.listdir(args.f)\n for subfolder in folder_path:\n subfolder_path = os.path.join(args.f, subfolder)\n for filename in os.listdir(subfolder_path):\n with open(os.path.join(subfolder_path, filename), 'r') as file:\n read_file = file.read()\n normalised_text = re.sub(r\"[^\\s\\w]\", \" \", read_file.lower())\n vocab = normalised_text.split() #.split() creates a list of strings\n vocab_dict.update({i: 0 for i in vocab})\n return vocab_dict",
"def get_vocabulary(text_fname, vocab_fname):\n with codecs.open(text_fname,'r','utf-8') as infile, \\\n codecs.open(vocab_fname,'w','utf-8') as outfile: \n\n count_map={}\n for line in infile:\n sent=line.strip().split(' ')\n for w in sent:\n count_map[w]=count_map.get(w,0.0)+1.0\n\n for w,c in count_map.iteritems(): \n outfile.write(u'{}|{}\\n'.format(w,c))",
"def _extract_vocab_data(source_files):\n vocab = set()\n\n for source_file in source_files:\n with tf.gfile.Open(source_file) as vocab_file:\n for line in vocab_file:\n tokens = line.split()\n vocab.update(tokens)\n\n return list(vocab)",
"def write_to_bin(tok_files, out_file, makevocab=False):\n\n num_stories = len(tok_files)\n\n if makevocab:\n vocab_counter = collections.Counter()\n\n with open(out_file, 'wb') as writer:\n for idx,s in enumerate(tok_files):\n if idx % 1000 == 0:\n print(\"Writing story %i of %i; %.2f percent done\" % (idx, num_stories, float(idx)*100.0/float(num_stories)))\n\n path = os.path.join(tok_dir, s)\n src_path = \"%s.src.tok\" % path\n tgt_path = \"%s.tgt.tok\" % path\n for _ in [src_path, tgt_path]:\n if not os.path.isfile(_):\n raise Exception(\"Error: Couldn't find tokenized file %s\" % _)\n\n # Get the strings to write to .bin file\n article, abstract = [to_bytes(_) for _ in get_art_abs(src_path, tgt_path)]\n\n # Write to tf.Example\n tf_example = example_pb2.Example()\n tf_example.features.feature['article'].bytes_list.value.extend([article])\n tf_example.features.feature['abstract'].bytes_list.value.extend([abstract])\n tf_example_str = tf_example.SerializeToString()\n str_len = len(tf_example_str)\n writer.write(struct.pack('q', str_len))\n writer.write(struct.pack('%ds' % str_len, tf_example_str))\n\n # Write the vocab to file, if applicable\n if makevocab:\n art_tokens = article.split(b' ')\n abs_tokens = abstract.split(b' ')\n art_tokens = [t for t in art_tokens if t not in [to_bytes(SENTENCE_START), to_bytes(SENTENCE_END)]] # remove these tags from vocab\n abs_tokens = [t for t in abs_tokens if t not in [to_bytes(SENTENCE_START), to_bytes(SENTENCE_END)]] # remove these tags from vocab\n tokens = art_tokens + abs_tokens\n tokens = [t.strip() for t in tokens] # strip\n tokens = [t for t in tokens if t!=\"\"] # remove empty\n vocab_counter.update(tokens)\n\n print(\"Finished writing file %s\\n\" % out_file)\n\n # write vocab to file\n if makevocab:\n print(\"Writing vocab file...\")\n with open(os.path.join(finished_files_dir, \"vocab\"), 'wb') as writer:\n for word, count in vocab_counter.most_common(VOCAB_SIZE):\n writer.write(word + b' ' + to_bytes(str(count)) + b'\\n')\n print(\"Finished writing vocab file\")",
"def merge_vocab(*args):\n\n # use this list to filter out 'characters' that we don't need to make the new dataset\n ignore_char_list = [\"<bos>\", \"<eos>\", \"<pad>\", \"<unk>\"]\n merged_char_set = set()\n\n for vocab_path in args:\n vocab = torch.load(vocab_path)\n vocab_chars_set = set(\n [x for x in vocab.c2i.keys() if x not in ignore_char_list]\n )\n merged_char_set.update(vocab_chars_set)\n\n return CharVocab(merged_char_set)",
"def create_vocabulary(sentences, path):\n print('creating vocab..')\n\n word_dict = dict(); vocabulary = dict()\n for sentence in sentences:\n for word in nltk.word_tokenize(sentence):\n if word not in word_dict:\n word_dict[word] = ''\n word_dict['<s>'] = ''\n word_dict['</s>'] = ''\n\n with open(path, encoding=\"utf8\") as f:\n for line in f:\n word, vec = line.split(' ', 1)\n if word in word_dict:\n vocabulary[word] = np.fromstring(vec, sep=' ')\n\n print('vocabulary was created successfully!')\n return vocabulary",
"def build_vocab(path, fname):\r\n\twords = open(path, 'r', encoding='utf-8').read().split()\r\n\twordCount = Counter(words)\r\n\tif not os.path.exists(pm.vocab_path):\r\n\t\tos.makedirs(pm.vocab_path)\r\n\twith open(pm.vocab_path + fname, 'w', encoding='utf-8') as f:\r\n\t\tf.write(\"{}\\t1000000000\\n{}\\t1000000000\\n{}\\t1000000000\\n{}\\t1000000000\\n\".format(\"<PAD>\", \"<UNK>\", \"<SOS>\", \"<EOS>\"))\r\n\t\tfor word, count in wordCount.most_common(len(wordCount)):\r\n\t\t\tf.write(u\"{}\\t{}\\n\".format(word, count))",
"def make_dictionary(self, train_file, validation_file, test_file):\n print 'constructing vocabulary'\n train_set, test_set, valid_set = set(), set(), set()\n label_set = set()\n ftrain = io.open(train_file, 'r')\n for line in ftrain:\n entity, label = map(clean, line.rstrip().split('\\t')[:2])\n train_set |= set(list(entity))\n label_set |= set(label.split(','))\n\n fvalid = io.open(train_file, 'r')\n for line in fvalid:\n entity, label = map(clean, line.rstrip().split('\\t')[:2])\n valid_set |= set(list(entity))\n label_set |= set(label.split(','))\n\n ftest = io.open(test_file, 'r')\n for line in ftest:\n entity, label = map(clean, line.rstrip().split('\\t')[:2])\n test_set |= set(list(entity))\n # label_set |= set(label.split(','))\n \n print '# chars in training ', len(train_set)\n print '# chars in validation ', len(valid_set)\n print '# chars in testing ', len(test_set)\n print '# chars in (testing-training-validation) ', len(test_set-train_set-valid_set)\n print '# labels', len(label_set)\n\n vocabulary = list(train_set | test_set | valid_set)\n vocab_size = len(vocabulary)\n chardict = dict(zip(vocabulary, range(1,vocab_size+1)))\n chardict[u' '] = 0\n labeldict = dict(zip(list(label_set), range(len(label_set))))\n \n return chardict, labeldict"
] | [
"0.7955603",
"0.7308529",
"0.7184133",
"0.68010455",
"0.6748521",
"0.6647768",
"0.64361453",
"0.64343816",
"0.642107",
"0.637317",
"0.63398",
"0.62193406",
"0.60044277",
"0.59781426",
"0.58856463",
"0.5873985",
"0.5845134",
"0.58191055",
"0.5818881",
"0.58034873",
"0.5802334",
"0.5741871",
"0.5728393",
"0.5723298",
"0.5708046",
"0.56991404",
"0.5682408",
"0.56765455",
"0.5670646",
"0.5669087"
] | 0.8489429 | 0 |
Load embed text into python dictionary | def load_embed_text(embed_file):
emb_dict = dict()
emb_size = None
with codecs.getreader("utf-8")(tf.gfile.GFile(embed_file, "rb")) as f:
for line in f:
tokens = line.strip().split(" ")
word = tokens[0]
vec = list(map(float, tokens[1:]))
emb_dict[word] = vec
if emb_size:
assert emb_size == len(vec), "All embeddings should be same size"
else:
emb_size = len(vec)
return emb_dict, emb_size | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_embedding_file(self):\n if self.language == 'en':\n embed_file_dir = self.embedding_path\n wv = KeyedVectors.load_word2vec_format(embed_file_dir, binary=True)\n self.pretrained_embedding = {}\n for word in wv.vocab.keys():\n normalized_word = normalization.process(self.language.upper(), word, letters_to_keep='', letters_to_remove='',\n lowercase=True, remove_repetitions_count=-1, remove_punct=True,\n remove_digits=True, remove_vowels=False, remove_diacritics=True,\n remove_spaces=False, remove_apostrophe=True, copy_through=False,\n keep_romanized_text=False)\n self.pretrained_embedding[normalized_word] = wv[word]\n self.embed_dim = 300\n\n else:\n embed_file_dir = self.embedding_path\n fin = open(embed_file_dir, 'r', encoding='utf-8', newline='\\n', errors='ignore')\n data = {}\n for line in fin:\n if len(line.split()) == 2: # header\n continue\n tokens = line.rstrip().split(' ')\n word = tokens[0]\n normalized_word = normalization.process(self.language.upper(), word, letters_to_keep='', letters_to_remove='',\n lowercase=True, remove_repetitions_count=-1, remove_punct=True,\n remove_digits=True, remove_vowels=False, remove_diacritics=True,\n remove_spaces=False, remove_apostrophe=True, copy_through=False,\n keep_romanized_text=False)\n data[normalized_word] = np.array(tokens[1:])\n self.pretrained_embedding = data\n self.embed_dim = 300",
"def get_embedding():\n\n HOST = app.config.get('EMBEDDING_HOST')\n PORT = app.config.get('EMBEDDING_PORT')\n query_params = {\n 'text' : request.args.get('text', \"\"),\n \"language\" : request.args.get('language', None)\n }\n r = requests.get(f\"http://{HOST}:{PORT}/api/v1/embeddings/create\", params=query_params)\n return jsonify(r.json())",
"def _read(self, text):\n return yaml.safe_load(text)",
"def get_embeddings():\n embeddings = dict(get_coefs(*o.strip().split()) for o in open(EMBEDDING_FILE))\n return embeddings",
"def load_vocab(self):\n keys = []\n values = []\n with open(self.embed_file, 'r') as f:\n lines = f.readlines()\n\n for line in lines:\n key = line.split(\" \")[0]\n value = line.split(\" \")[1:]\n keys.append(key)\n values.append(value)\n # form <dict>\n # vocab = dict(zip(keys, values))\n return keys, values",
"def load_embeddings(self, str_file):\n\n with open(str_file, 'rb') as f_read:\n self.embeddings_entity = pickle.load(f_read)\n self.embeddings_relation = pickle.load(f_read)\n self.dict_paras = pickle.load(f_read)",
"def load_text_embed(filepath: Union[str, os.PathLike], load_dir: str = 'model') \\\n -> Tuple[TransformerEmbedding, Callable]:\n model_dir = Path(filepath).joinpath(load_dir)\n tokenizer = AutoTokenizer.from_pretrained(str(model_dir.resolve()))\n args = dill.load(open(model_dir.joinpath('embedding.dill'), 'rb'))\n emb = TransformerEmbedding(\n str(model_dir.resolve()), embedding_type=args['embedding_type'], layers=args['layers']\n )\n return emb, tokenizer",
"def loadEmbModel(embFile, logger):\n logger.info(\"Loading Embedding Model\")\n f = open(embFile,'r')\n model = {}\n v = []\n for line in f:\n splitLine = line.split(' ')\n word = splitLine[0]\n try:\n embedding = np.array([float(val) for val in splitLine[1:]])\n except:\n logger.info(len(v), line)\n model[word] = embedding\n v.append(embedding)\n mean = np.array(v).mean(0)\n logger.info(mean.shape)\n model['<unk>'] = torch.tensor(mean)\n model['<pad>'] = torch.zeros(embedding.shape)\n model['<start>'] = torch.zeros(embedding.shape)\n model['<end>'] = torch.zeros(embedding.shape)\n logger.info(\"Done.\",len(model),\" words loaded!\")\n return model",
"def embed():",
"def get_embed_dict(url, params=None):\n embed = EmbedFactory.get_embed(url, params)\n return embed.get_embed_dict() if embed else None",
"def embed(self, data, mime_type=\"text/plain\", encode_data_to_base64=True):\n if encode_data_to_base64:\n data = base64.standard_b64encode(data.encode()).decode()\n self.embeddings.append({\"data\": data, \"mime_type\": mime_type})",
"def gen_embedding(path):\r\n word_emb = {}\r\n with open(path, encoding='utf-8') as f:\r\n for line in tqdm(f):\r\n values = line.split()\r\n word_emb[values[0]] = np.asarray(values[1:], dtype='float32')\r\n return word_emb",
"def embed(query: str) -> dict:\n embedding = model.embed(query)\n return {\"embedding\": embedding, \"model\": model_name}",
"def embed(documents, ctx_encoder, ctx_tokenizer, device):\n input_ids = ctx_tokenizer(\n documents[\"title\"],\n documents[\"text\"],\n truncation=True,\n padding=\"longest\",\n return_tensors=\"pt\",\n )[\"input_ids\"]\n embeddings = ctx_encoder(\n input_ids.to(device=device), return_dict=True\n ).pooler_output\n return {\"embeddings\": embeddings.detach().cpu().numpy()}",
"def add_embed_itmes(data):\n for k, v in data.items() :\n embed.add_embed_field(name=k, value=v)",
"def _unserialize(text):\n return yaml.safe_load(text)",
"def dissect(self, text):",
"def load_embeddings(embedding_path, embedding_size, embedding_format):\n print(\"Loading word embeddings from {}...\".format(embedding_path))\n\n if embedding_format in ['vec', 'txt']:\n default_embedding = np.zeros(embedding_size)\n embedding_dict = collections.defaultdict(lambda: default_embedding)\n skip_first = embedding_format == \"vec\"\n with open(embedding_path) as f:\n for i, line in enumerate(f.readlines()):\n if skip_first and i == 0:\n continue\n splits = line.split(' ')\n assert len(splits) == embedding_size + 1\n word = splits[0]\n embedding = np.array([float(s) for s in splits[1:]])\n embedding_dict[word] = embedding\n elif embedding_format == 'bin':\n embedding_dict = fasttext.load_model(embedding_path)\n else:\n raise ValueError('Not supported embeddings format {}'.format(embedding_format))\n print(\"Done loading word embeddings.\")\n return embedding_dict",
"def dangerous_load(text: str, options: Dict[str, str]) -> object:\n try:\n docs = list(yaml.full_load_all(text)) # load the full yaml\n except yaml.YAMLError as e:\n raise LoadingError(\"Can't parse YAML\") from e # must use ValueError\n if len(docs) == 0:\n return {}\n if len(docs) == 1:\n return docs[0] # only one document\n return docs # leave as a list of documents",
"def load_glove_embeddings():\n\n emmbed_file = Path(\"./embeddings.pkl\")\n if emmbed_file.is_file():\n # embeddings already serialized, just load them\n print(\"Local Embeddings pickle found, loading...\")\n with open(\"./embeddings.pkl\", 'rb') as f:\n return pk.load(f)\n else:\n # create the embeddings\n print(\"Building embeddings dictionary...\")\n data = open(\"glove.6B.50d.txt\", 'r', encoding=\"utf-8\")\n embeddings = [[0] * EMBEDDING_SIZE]\n word_index_dict = {'UNK': 0} # first row is for unknown words\n index = 1\n for line in data:\n splitLine = line.split()\n word = tf.compat.as_str(splitLine[0])\n embedding = [float(val) for val in splitLine[1:]]\n embeddings.append(embedding)\n word_index_dict[word] = index\n index += 1\n data.close()\n\n # pickle them\n with open('./embeddings.pkl', 'wb') as f:\n print(\"Creating local embeddings pickle for faster loading...\")\n # Pickle the 'data' dictionary using the highest protocol available.\n pk.dump((embeddings, word_index_dict), f, pk.HIGHEST_PROTOCOL)\n\n return embeddings, word_index_dict",
"def load_text_file(i):\n\n fn = i['text_file']\n\n en = i.get('encoding', '')\n if en == '' or en == None:\n en = 'utf8'\n\n try:\n f = open(fn, 'rb')\n except Exception as e:\n return {'return': 16, 'error': 'problem opening text file='+fn+' ('+format(e)+')'}\n\n try:\n b = f.read()\n except Exception as e:\n f.close()\n return {'return': 1, 'error': 'problem reading text file='+fn+' ('+format(e)+')'}\n\n f.close()\n\n r = {'return': 0, 'bin': b}\n\n if i.get('delete_after_read', '') == 'yes':\n import os\n os.remove(fn)\n\n if i.get('keep_as_bin', '') != 'yes':\n try:\n # decode into Python string (unicode in Python3)\n s = b.decode(en).replace('\\r', '')\n except Exception as e:\n return {'return': 1, 'error': 'problem decoding content from file \"'+fn+'\" ('+format(e)+')'}\n\n r['string'] = s\n\n cl = i.get('split_to_list', '')\n cd = i.get('convert_to_dict', '')\n\n if cl == 'yes' or cd == 'yes':\n lst = s.split('\\n')\n r['lst'] = lst\n\n if cd == 'yes':\n dd = {}\n\n ss = i.get('str_split', '')\n rq = i.get('remove_quotes', '')\n if ss == '':\n ss = ':'\n\n for q in lst:\n qq = q.strip()\n ix = qq.find(ss)\n if ix > 0:\n k = qq[0:ix].strip()\n v = ''\n if ix+1 < len(qq):\n v = qq[ix+1:].strip()\n if v != '' and rq == 'yes':\n if v.startswith('\"'):\n v = v[1:]\n if v.endswith('\"'):\n v = v[:-1]\n dd[k] = v\n\n r['dict'] = dd\n\n return r",
"def load_embed(file_name, vocab_size):\n\n with tf.io.gfile.Open(file_name, 'r') as embed_file:\n vocab = []\n embeds = []\n depth = -1\n for index, line in enumerate(embed_file):\n if vocab_size > 0 and index >= vocab_size:\n break\n line = line.strip()\n tokens = line.strip().split(' ')\n word = tokens[0]\n vocab.append(word)\n if depth == -1:\n embed = [float(token) for token in tokens[1:]]\n else:\n embed = [float(token) for token in tokens[-depth:]]\n d = len(embed)\n if depth == -1:\n depth = d\n if d != depth:\n raise ValueError('Inconsistent embedding sizes')\n embeds.append(embed)\n\n embeds = np.stack(embeds)\n\n return vocab, embeds, depth",
"def decode(self,data):\n import yaml\n return yaml.load(data.decode('utf-8'))",
"def load_glove_embeddings():\n data = open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n embeddings = []\n word_index_dict = {'UNK':0}\n index = 1\n for lines in data:\n wordVector = lines.split(\" \")\n if(wordVector[0] in string.punctuation or any(char.isdigit() for char in wordVector[0])):\n continue\n embeddings.append(wordVector[1:-1])\n word_index_dict[wordVector[0]] = index\n index+=1\n print(\"done\")\n\n return embeddings, word_index_dict",
"def read_txt_embeddings(path, params):\n word2id = {}\n vectors = []\n\n # load pretrained embeddings\n _emb_dim_file = params.emb_dim\n with io.open(path, 'r', encoding='utf-8', newline='\\n', errors='ignore') as f:\n for i, line in enumerate(f):\n if i == 0:\n split = line.split()\n assert len(split) == 2\n assert _emb_dim_file == int(split[1])\n continue\n word, vect = line.rstrip().split(' ', 1)\n vect = np.fromstring(vect, sep=' ')\n if word in word2id:\n logger.warning(\"Word \\\"%s\\\" found twice!\" % word)\n continue\n if not vect.shape == (_emb_dim_file,):\n logger.warning(\"Invalid dimension (%i) for word \\\"%s\\\" in line %i.\"\n % (vect.shape[0], word, i))\n continue\n assert vect.shape == (_emb_dim_file,)\n word2id[word] = len(word2id)\n vectors.append(vect[None])\n\n assert len(word2id) == len(vectors)\n logger.info(\"Loaded %i pretrained word embeddings from %s\" % (len(vectors), path))\n\n # compute new vocabulary / embeddings\n embeddings = np.concatenate(vectors, 0)\n embeddings = torch.from_numpy(embeddings).float()\n\n assert embeddings.size() == (len(word2id), params.emb_dim)\n return word2id, embeddings",
"def get_embed_dict(self):\n if not self.get_url() or not self.get_embed_url():\n return None\n \n output = {\n \"url\": self.get_url(),\n \"embed_url\": self.get_embed_url(),\n \"provider_url\": self.get_provider_url(),\n \"provider_name\": self.get_provider_name(),\n \"thumbnail_url\": self.get_thumbnail_url(),\n \"type\": \"video\"\n }\n if self.get_height():\n output['iframe_height'] = self.get_height()\n if self.get_width():\n output['iframe_width'] = self.get_width()\n\n return output",
"def load_embeddings(path, vocab, source_domain, target_domain, emb_name):\n\n pkl = './work/embeddings/%s_%s_%s.pkl' % (source_domain, target_domain, emb_name)\n if os.path.exists(pkl):\n print(\"Load embeddings from existing pkl file %s...\" % pkl)\n # word embeddings weights have been loaded\n embeddings = pickle.load(open(pkl, 'rb'))\n else:\n print(\"Load embedding from %s...\" % path)\n raw_embeddings = {}\n if emb_name == 'yelp_electronics':\n with open(path) as fp:\n for line in fp:\n word_vector = line.split(\",\")[:-1]\n vector_list = []\n for element in word_vector[len(word_vector) - 100:]:\n vector_list.append(float(element))\n word = ','.join(word_vector[:len(word_vector) - 100])\n vector = np.asarray(vector_list)\n if word in vocab:\n raw_embeddings[word] = vector\n else:\n with open(path) as fp:\n for line in fp:\n eles = line.strip().split(' ')\n word = eles[0]\n if word in vocab:\n raw_embeddings[word] = eles[1:]\n\n dim_w = len(raw_embeddings['the'])\n n_words = len(vocab)\n embeddings = np.zeros(shape=(n_words, dim_w))\n for w in vocab:\n wid = vocab[w]\n if w in raw_embeddings:\n embeddings[wid] = np.array([float(ele) for ele in raw_embeddings[w]])\n else:\n # for OOV words, add random initialization\n embeddings[wid] = np.random.uniform(-0.25, 0.25, dim_w)\n print(\"Find %s word embeddings...\" % len(embeddings))\n if not os.path.exists('./work/embeddings'):\n os.mkdir('./work/embeddings')\n emb_path = './work/embeddings/%s_%s_%s.pkl' % (source_domain, target_domain, emb_name)\n # write the embedding weights back to the disk\n pickle.dump(embeddings, open(emb_path, 'wb'))\n embeddings = np.array(embeddings, dtype='float32')\n return embeddings",
"def parse_text(self, text: str) -> SectionDict:",
"def load_pretrained_words_data(embeddings_filename, vocab):\n words = dict()\n emb_dim = None\n with gzip.open(cached_path(embeddings_filename), 'rb') as embeddings_file:\n for line in embeddings_file:\n fields = line.decode('utf-8').strip().split(' ')\n if len(fields) == 0:\n continue\n word = fields[0]\n if emb_dim is None:\n emb_dim = len(fields) - 1\n if emb_dim < 10: # my pretrained file is poisonous 😭\n emb_dim = None\n else:\n assert emb_dim == len(fields) - 1, \"{}, {}\".format(emb_dim, len(fields) - 1)\n words.update({word: [float(i) for i in fields[1:]]})\n print(\"Embedding dim: {}\".format(emb_dim))\n tokens = vocab.get_index_to_token_vocabulary(\"tokens\")\n n_tokens = len(tokens)\n data = []\n for i in tokens:\n if tokens[i] in words:\n data.append(words[tokens[i]])\n else:\n data.append([0] * emb_dim)\n return torch.tensor(data), emb_dim",
"def load_embedding(fpath, VOCAB):\n print(\"Loading embeddings...\")\n emb = dict()\n wv_from_bin = KeyedVectors.load_word2vec_format(fpath, limit=VOCAB)\n for word, vector in tqdm(zip(wv_from_bin.vocab, wv_from_bin.vectors)):\n coefs = np.asarray(vector, dtype='float32')\n if word not in emb:\n emb[word] = coefs\n return emb"
] | [
"0.6251451",
"0.59665245",
"0.5931526",
"0.5864249",
"0.5857385",
"0.585637",
"0.5768584",
"0.56255275",
"0.55851597",
"0.5561361",
"0.55584276",
"0.55403835",
"0.5534576",
"0.551691",
"0.5507314",
"0.5474913",
"0.5453954",
"0.5416933",
"0.5389678",
"0.5383825",
"0.53736913",
"0.53670293",
"0.53591716",
"0.5353443",
"0.5341565",
"0.5334648",
"0.53285277",
"0.5322068",
"0.53199",
"0.529038"
] | 0.7389193 | 0 |
Test Jacobi symbol function. Test data by clux .com Copyright (c) 2015 Eirik Albrigtsen. | def test_jacobi_symbol():
assert jacobi_symbol.jacobi_symbol(-1, 5) == 1
assert jacobi_symbol.jacobi_symbol(-1, 13) == 1
assert jacobi_symbol.jacobi_symbol(-1, 3) == -1
assert jacobi_symbol.jacobi_symbol(-1, 7) == -1
assert jacobi_symbol.jacobi_symbol(2, 3) == -1
assert jacobi_symbol.jacobi_symbol(2, 5) == -1
assert jacobi_symbol.jacobi_symbol(2, 7) == 1
assert jacobi_symbol.jacobi_symbol(2, 17) == 1
assert jacobi_symbol.jacobi_symbol(3, 3) == 0
assert jacobi_symbol.jacobi_symbol(3, 5) == -1
assert jacobi_symbol.jacobi_symbol(3, 7) == -1
assert jacobi_symbol.jacobi_symbol(3,5) == jacobi_symbol.jacobi_symbol(-2,5)
assert jacobi_symbol.jacobi_symbol(-1,5) == jacobi_symbol.jacobi_symbol(4,5)
assert jacobi_symbol.jacobi_symbol(11,7) == jacobi_symbol.jacobi_symbol(4,7)
assert jacobi_symbol.jacobi_symbol(-3,7) == jacobi_symbol.jacobi_symbol(4,7)
assert jacobi_symbol.jacobi_symbol(10,7) == jacobi_symbol.jacobi_symbol(3,7)
assert jacobi_symbol.jacobi_symbol(2, 45) == -1
assert jacobi_symbol.jacobi_symbol(3, 45) == 0
assert jacobi_symbol.jacobi_symbol(7, 45) == -1
assert jacobi_symbol.jacobi_symbol(2, 15) == 1
assert jacobi_symbol.jacobi_symbol(1001, 9907) == -1 #wikepedia example
| {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_jacobian(self):\n\n gT1 = Pose2(1, 2, np.pi/2)\n gT2 = Pose2(-1, 4, np.pi)\n\n expected = Pose2(2, 2, np.pi/2)\n\n def error_func(this: CustomFactor, v: gtsam.Values, H: List[np.ndarray]):\n # print(f\"{this = },\\n{v = },\\n{len(H) = }\")\n\n key0 = this.keys()[0]\n key1 = this.keys()[1]\n gT1, gT2 = v.atPose2(key0), v.atPose2(key1)\n error = Pose2(0, 0, 0).localCoordinates(gT1.between(gT2))\n \n if len(H) > 0:\n result = gT1.between(gT2)\n H[0] = -result.inverse().AdjointMap()\n H[1] = np.eye(3)\n return error\n \n noise_model = gtsam.noiseModel.Unit.Create(3)\n cf = ge.CustomFactor(noise_model, gtsam.KeyVector([0, 1]), error_func)\n v = Values()\n v.insert(0, gT1)\n v.insert(1, gT2)\n \n bf = gtsam.BetweenFactorPose2(0, 1, Pose2(0, 0, 0), noise_model)\n\n gf = cf.linearize(v)\n gf_b = bf.linearize(v)\n\n J_cf, b_cf = gf.jacobian()\n J_bf, b_bf = gf_b.jacobian()\n np.testing.assert_allclose(J_cf, J_bf)\n np.testing.assert_allclose(b_cf, b_bf)",
"def jacobi_method(A, b, tol=1e-8, maxiters=100, plot=False):\n \"\"\"\n Pseudocode\n Ensure A, b are in decimal form\n \n \n \n \"\"\"\n \n \n \"\"\"\n e:0...n inclusive\n \"\"\"\n A=np.array(A)*1.0\n b=np.array(b)*1.0 \n m,n=A.shape\n e=[]\n xk=np.zeros((m,))\n \n def iter(xi):\n xj=np.zeros((m,))\n for i in xrange(m):\n xj[i]=(b[i]-(np.dot(A[i],xi)-A[i,i]*xi[i]))/A[i,i]\n return xj\n\n \n for i in xrange(1,maxiters+1):\n e+=[la.norm(np.dot(A,xk)-b,ord=np.inf)]\n xk=iter(xk)\n if (la.norm(np.dot(A,xk)-b,ord=np.inf)<tol) or (i==maxiters):\n e+=[la.norm(np.dot(A,xk)-b,ord=np.inf)]\n break\n \n if plot==False:\n return xk\n else:\n #How many iterations happened\n iters=len(e) #1..len(e)\n dom=np.arange(0,iters)\n \n plt.semilogy(dom,e,'b.-',basey=10,lw=2, ms=2)\n plt.xlabel(\"Iteration #\")\n plt.ylabel(\"Absolute Error of Approximation\")\n #plt.legend(loc=\"upper left\")\n plt.title(\"Convergence of Jacobi Method\", fontsize=18)\n plt.show()\n return xk",
"def test11(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), bcolz.carray(b)\n if self.vm == \"python\":\n cr = bcolz.eval(\"np.sin(c) + 2 * np.log(d) - 3\")\n elif self.vm == \"dask\":\n cr = bcolz.eval(\"da.sin(c) + 2 * da.log(d) - 3\")\n else:\n cr = bcolz.eval(\"sin(c) + 2 * log(d) - 3\")\n nr = np.sin(a) + 2 * np.log(b) - 3\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_allclose(cr[:], nr, err_msg=\"eval does not work correctly\")",
"def convertJacobiToBeta(self,x):\n u = 0.5*(self.high+self.low)\n s = 0.5*(self.high-self.low)\n return s*x+u",
"def test_jacobian_options(self, mocker):\n spy = mocker.spy(qml.gradients, \"param_shift\")\n\n a = jax.numpy.array([0.1, 0.2])\n\n dev = qml.device(\"default.qubit\", wires=1)\n\n def cost(a, device):\n with qml.queuing.AnnotatedQueue() as q:\n qml.RY(a[0], wires=0)\n qml.RX(a[1], wires=0)\n qml.expval(qml.PauliZ(0))\n\n tape = qml.tape.QuantumScript.from_queue(q)\n\n return execute(\n [tape],\n device,\n gradient_fn=param_shift,\n gradient_kwargs={\"shifts\": [(np.pi / 4,)] * 2},\n )[0]\n\n jax.grad(cost)(a, device=dev)\n\n for args in spy.call_args_list:\n assert args[1][\"shifts\"] == [(np.pi / 4,)] * 2",
"def test_test_jacobian(self):\n self.set_up()\n inputObject = self.vmecOptimization.vmecInputObject\n rbc = np.copy(inputObject.rbc)\n zbs = np.copy(inputObject.zbs)\n inputObject.rbc = 0*inputObject.rbc\n inputObject.zbs = 0*inputObject.zbs\n orientable = self.vmecOptimization.test_jacobian(inputObject)\n self.assertFalse(orientable)\n # Reset boundary\n inputObject.rbc = rbc\n inputObject.zbs = zbs\n self.tear_down()",
"def testJacobs():\n\n known_jacobs_array = getJacobs()\n\n num = 84\n unknown_array = getUnknown(num)\n\n image_jacobs_test, labels_jacobs_test = loadImage(known_jacobs_array, unknown_array)\n x_image_test = image_jacobs_test.reshape(image_jacobs_test.shape[0],\n image_jacobs_test.shape[1] * image_jacobs_test.shape[2] *\n image_jacobs_test.shape[\n 3]) # batchsize, height*width*3channels\n\n encoder = LabelEncoder()\n y_image_labels = encoder.fit_transform(labels_jacobs_test)\n\n y_pred = clf_images.predict(x_image_test)\n accuracy_score_84 = (accuracy_score(y_image_labels, y_pred)) * 100\n\n results = model_selection.cross_val_score(clf_images, x_image_test, y_image_labels, cv=k_fold)\n k_fold_accuracy_84 = (results.mean()) * 100\n k_fold_std_84 = results.std()\n\n return known_jacobs_array, accuracy_score_84, k_fold_accuracy_84, k_fold_std_84",
"def test_blend_jacobian_random(self):\n (x_points_init, x_weights_init, x_rotations_init,\n x_translations_init) = test_helpers.generate_random_test_lbs_blend()\n\n self.assert_jacobian_is_correct_fn(\n linear_blend_skinning.blend,\n [x_points_init, x_weights_init, x_rotations_init, x_translations_init])",
"def convertBetaToJacobi(self,y):\n u = 0.5*(self.high+self.low)\n s = 0.5*(self.high-self.low)\n return (y-u)/(s)",
"def blk_jacobi(A, x, b, smooth_opts):\n x0 = x[:]\n\n if smooth_opts.sparse:\n diag = A.diagonal()\n else:\n diag = np.diag(A)\n\n color_order = range(smooth_opts.num_color)\n if smooth_opts.color_flip:\n color_order = reversed(color_order)\n\n for color in color_order:\n diaginv = np.zeros(len(diag))\n if smooth_opts.color_list != None:\n diaginv[smooth_opts.color_list[color]] = \\\n 1./diag[smooth_opts.color_list[color]]\n else:\n diaginv[color::smooth_opts.num_color] = \\\n 1./diag[color::smooth_opts.num_color]\n\n if smooth_opts.sparse:\n diaginv = sp.diags(diaginv)\n else:\n diaginv = np.diag(diaginv)\n\n x += diaginv.dot(b - A.dot(x))\n\n return smooth_opts.omega*x + (1-smooth_opts.omega)*x0",
"def testActivation(self):\n result = Sigmoid.activ(self, 12)\n self.assertEqual(0.9999938558253978, result)",
"def test_system_jacobian(self, scml_system):\n el_jac = np.arange(4).reshape(2, 2)\n el_over_omega = np.arange(4, 6)\n torque_over_el = np.arange(6, 8)\n # Set the el. jacobian returns to specified values\n scml_system.electrical_motor.electrical_jac_return = (el_jac, el_over_omega, torque_over_el)\n me_jac = np.arange(8, 12).reshape(2, 2)\n me_over_torque = np.arange(12, 14)\n # Set the mech. jabobian returns to specified values\n scml_system.mechanical_load.mechanical_jac_return = me_jac, me_over_torque\n sys_jac = scml_system._system_jacobian(0, np.array([0, 1, 2, 3]), [0, -1])\n\n #\n assert np.all(sys_jac[-2:, -2:] == el_jac), 'The el. jacobian is false'\n assert np.all(sys_jac[:2, :2] == me_jac), 'The mech. jacobian is false'\n assert np.all(sys_jac[2:, 0] == el_over_omega), 'the derivative of the el.state over omega is false'\n assert np.all(sys_jac[2:, 1] == np.zeros(2))\n assert np.all(sys_jac[:-2, 2:] == np.array([[72, 84], [78, 91]])), 'The derivative of the mech.state ' \\\n 'over the currents is false'",
"def chibar(z):\n return chi(z)",
"def f_x_b88(x, beta=0.0042, use_jax=True):\n np = jnp if use_jax else onp\n c1 = 8 * (1 / 2) ** (2 / 3) * (np.pi / 3) ** (1 / 3) / 3 * beta\n c2 = 6 * 2 ** (1 / 3) * beta\n return 1 + c1 * x ** 2 / (1 + c2 * x * np.arcsinh(2 ** (1 / 3) * x))",
"def test_multi_tape_jacobian_probs_expvals(self, execute_kwargs):\n adjoint = execute_kwargs.get(\"gradient_kwargs\", {}).get(\"method\", \"\") == \"adjoint_jacobian\"\n if adjoint:\n pytest.skip(\"The adjoint diff method doesn't support probabilities.\")\n\n def cost(x, y, device, interface, ek):\n with qml.queuing.AnnotatedQueue() as q1:\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.expval(qml.PauliZ(0))\n qml.expval(qml.PauliZ(1))\n\n tape1 = qml.tape.QuantumScript.from_queue(q1)\n\n with qml.queuing.AnnotatedQueue() as q2:\n qml.RX(x, wires=[0])\n qml.RY(y, wires=[1])\n qml.CNOT(wires=[0, 1])\n qml.probs(wires=[0])\n qml.probs(wires=[1])\n\n tape2 = qml.tape.QuantumScript.from_queue(q2)\n\n return qml.execute([tape1, tape2], device, **ek, interface=interface)[0]\n\n dev = qml.device(\"default.qubit\", wires=2)\n x = jax.numpy.array(0.543)\n y = jax.numpy.array(-0.654)\n\n x_ = np.array(0.543)\n y_ = np.array(-0.654)\n\n res = cost(x, y, dev, interface=\"jax-jit\", ek=execute_kwargs)\n\n exp = cost(x_, y_, dev, interface=\"autograd\", ek=execute_kwargs)\n\n for r, e in zip(res, exp):\n assert jax.numpy.allclose(r, e, atol=1e-7)",
"def test_csc():\n c=14\n assert {'diff':EF.csc(c).der, 'value': EF.csc(c).val}=={'diff':0, 'value': 1/math.sin(c)}",
"def jacobi(A,b,x0,kmax=10,err=1e-4):\n n = len(A)\n k = 0\n x = np.ones(n)\n delta = 1\n \n while (k < kmax):\n k += 1\n for i in range(0,n):\n s = b[i]-np.dot(A[i][:],x0)\n x[i] = x0[i]+s/(A[i][i]*1.)\n \n delta = max(abs(x-x0))\n if (delta < err):\n break\n x0 = np.copy(x)\n print(x)\n \n return x",
"def test_create_basis(self):\n\t\tbasis = schrodinger.create_basis(3)\n\t\ta = basis[0](0).numpy()\n\t\tb = math.cos(0)\n\t\tself.assertEqual(a, b)\n\t\tc = basis[1](math.pi/2).numpy()\n\t\td = math.sin(math.pi/2)\n\t\tself.assertEqual(c, d)",
"def jacobi_cupy(inv_diag: sparse.dia_matrix, lower_upper: sparse.dia.dia_matrix,\n b: cp.ndarray, min_iter: int = 10, max_iter: int = 20, TOL = 0.001):\n x = b.copy()\n for i in range(min_iter):\n x = inv_diag @ (b - lower_upper @ x)\n \n for i in range(max_iter - min_iter):\n x_ = inv_diag @ (b - lower_upper @ x)\n rel_err = cp.mean(cp.abs(x - x_))\n x = x_\n if rel_err < TOL:\n break\n return x",
"def jacobi(inv_diag: sparse.dia_matrix, lower_upper: sparse.dia.dia_matrix,\n b: np.ndarray, min_iter: int = 10, max_iter: int = 20, TOL = 0.001):\n x = b.copy()\n for i in range(min_iter):\n x = inv_diag @ (b - lower_upper @ x)\n \n for i in range(max_iter - min_iter):\n x_ = inv_diag @ (b - lower_upper @ x)\n rel_err = np.mean(np.abs(x - x_))\n x = x_\n if rel_err < TOL:\n break\n return x",
"def _symbolic_jacobian(self):\n return self._symbolic_system.jacobian([V[0], V[1]])",
"def test_bisection_system(testFunctions,tol, printFlag):\n pass",
"def test_jax(self, approx_order, strategy, tol):\r\n jax = pytest.importorskip(\"jax\")\r\n from jax import numpy as jnp\r\n from pennylane.interfaces.jax import JAXInterface\r\n from jax.config import config\r\n\r\n config.update(\"jax_enable_x64\", True)\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n params = jnp.array([0.543, -0.654])\r\n\r\n def cost_fn(x):\r\n with JAXInterface.apply(qml.tape.QubitParamShiftTape()) as tape:\r\n qml.RX(x[0], wires=[0])\r\n qml.RY(x[1], wires=[1])\r\n qml.CNOT(wires=[0, 1])\r\n qml.expval(qml.PauliZ(0) @ qml.PauliX(1))\r\n\r\n tape.trainable_params = {0, 1}\r\n tapes, fn = finite_diff(tape, n=1, approx_order=approx_order, strategy=strategy)\r\n jac = fn([t.execute(dev) for t in tapes])\r\n return jac\r\n\r\n res = jax.jacobian(cost_fn)(params)\r\n x, y = params\r\n expected = np.array(\r\n [\r\n [-np.cos(x) * np.sin(y), -np.cos(y) * np.sin(x)],\r\n [-np.cos(y) * np.sin(x), -np.cos(x) * np.sin(y)],\r\n ]\r\n )\r\n assert np.allclose(res, expected, atol=tol, rtol=0)",
"def __init__(self):\n GinacFunction.__init__(self, \"binomial\", nargs=2, preserved_arg=1,\n conversions=dict(maxima='binomial',\n mathematica='Binomial',\n sympy='binomial'))",
"def test_coefficients_jax_interface(self):\n import jax\n\n # Need to enable float64 support\n from jax.config import config\n\n remember = config.read(\"jax_enable_x64\")\n config.update(\"jax_enable_x64\", True)\n\n qnode = qml.QNode(self.circuit, self.dev, diff_method=\"parameter-shift\")\n\n weights = jax.numpy.array([0.5, 0.2])\n\n obtained_result = coefficients(partial(qnode, weights), 2, 1)\n\n assert np.allclose(obtained_result, self.expected_result)\n\n config.update(\"jax_enable_x64\", remember)",
"def test_simple_ib_1():\n dist = Distribution(['00', '02', '12', '21', '22'], [1 / 5] * 5)\n ib = IBCurve(dist, rvs=[[0], [1]], beta_max=10, beta_num=21)\n assert ib.complexities[2] == pytest.approx(0.0, abs=1e-4)\n assert ib.complexities[5] == pytest.approx(0.8, abs=1e-4)\n assert ib.complexities[20] == pytest.approx(1.5129028136502387, abs=1e-4)\n assert ib.relevances[2] == pytest.approx(0.0, abs=1e-4)\n assert ib.relevances[5] == pytest.approx(0.4, abs=1e-4)\n assert ib.relevances[20] == pytest.approx(0.5701613885745838, abs=1e-4)\n assert 3.0 in ib.find_kinks()",
"def test_coefficients(self):\n\n coefs = self.cs.coefficients\n\n self.assertEqual(coefs, (1, 0, 1, 0, 0, -1))",
"def test_solve_toruscases_bce():\n import numpy as np\n from crpm.setup_toruscases import setup_toruscases\n from crpm.fwdprop import fwdprop\n from crpm.lossfunctions import loss\n from crpm.gradientdecent import gradientdecent\n from crpm.analyzebinaryclassifier import analyzebinaryclassifier\n\n #init numpy seed\n np.random.seed(40017)\n\n #setup model\n model, data = setup_toruscases()\n nx = data.shape[0]\n nsample = data.shape[1]\n\n #partition training and validation data\n valid = data[1:data.shape[0],0:nsample//3]\n validtargets = data[0,0:nsample//3]\n train = data[1:data.shape[0],nsample//3:nsample]\n targets =data[0,nsample//3:nsample]\n\n #calculate initial binary cross entropy error\n pred, _ = fwdprop(train, model)\n icost, _ = loss(\"bce\", pred, targets)\n\n #analyze binary classifier\n pred, _ = fwdprop(valid, model)\n roc, ireport = analyzebinaryclassifier(pred, validtargets)\n if ireport[\"AreaUnderCurve\"]<.5:\n pred = 1-pred\n icost, _ = loss(\"bce\", pred, validtargets)\n roc, ireport = analyzebinaryclassifier(pred, validtargets)\n print(ireport)\n #plotroc(roc)\n\n #train model\n pred, cost, _ = gradientdecent(model, train, targets, \"bce\", valid, validtargets, earlystop=True)\n\n #analyze binary classifier\n pred, _ = fwdprop(valid, model)\n roc, report = analyzebinaryclassifier(pred, validtargets)\n if report[\"AreaUnderCurve\"]<.5:\n pred = 1-pred\n cost, _ = loss(\"bce\", pred, validtargets)\n roc, report = analyzebinaryclassifier(pred, validtargets)\n print(report)\n #plotroc(roc)\n\n\n #print(model)\n print(icost)\n print(cost)\n assert icost > cost\n assert cost < .4\n assert report[\"MatthewsCorrCoef\"] > .1\n #don't expect problem can be solved with linear model\n #assert report[\"AreaUnderCurve\"] > ireport[\"AreaUnderCurve\"]",
"def test_chao1(self):\n self.assertEqual(chao1(self.TestData), 9.75)\n self.assertEqual(chao1(self.TestData,bias_corrected=False),10.5)\n self.assertEqual(chao1(self.NoSingles), 4)\n self.assertEqual(chao1(self.NoSingles,bias_corrected=False),4)\n self.assertEqual(chao1(self.NoDoubles), 5)\n self.assertEqual(chao1(self.NoDoubles,bias_corrected=False),5)",
"def Jacobi(A):\n # Manda a llamar a la funcion para hacer el intercambio de los renglones necesarios, de tal manera que la matriz resultante sea una matriz diagonal dominante\n A = MatDiagDom.Matriz_Diagonal_Dominante(A)\n\n # Imprime la matriz\n np.set_printoptions(precision = 6, suppress = True)\n print(\"\\nMetodo de Jacobi\\n\")\n print(\"\\n\", A, \"\\n\")\n\n # Pide al usuario los valores necesarios para el metodo\n tolerancia = float(input(\"\\nIngrese el error de tolerancia para el metodo de Jacobi: \"))\n limite = float(input(\"Ingrese el limite de iteraciones para el metodo de Jacobi: \"))\n print()\n\n print(\"Ingrese el vector incial para comenzar con el metodo de Jacobi\\n\")\n # Crea el vector inicial para comenzar con el metdo y se llena en el bucle for\n x = np.empty(A.shape[0], dtype = 'f')\n for comp in range(A.shape[0]):\n x[comp] = float(input(f\"Ingrese la componente {comp + 1} del vector: \"))\n\n # Bucle anidado que modifica la matriz A para poder aplicar el metodo de Jacobi\n for fil in range(A.shape[0]):\n denominador = A[fil, fil]\n for col in range(A.shape[1]):\n # Condicional para cambiar la componente de la diagonal por cero\n if col == fil:\n A[fil, col] = 0\n else:\n if (col + 1) == A.shape[1]:\n A[fil, col] /= denominador\n else:\n A[fil, col] /= -denominador\n\n T = np.copy(A[:, :A.shape[0]])\n\n c = np.copy(A[:, A.shape[0]:])\n\n # Calcula la norma de 'x'\n normaX1 = np.linalg.norm(x)\n\n cont = 0\n\n # Bucle que se repetira hasta que el error sea menor o igual al permitido\n while True:\n # Multiplica la matriz 'T' por el vector 'x' y le suma el vector 'c'\n x = np.matmul(T, np.reshape(x, (A.shape[0], 1))) + np.reshape(c, (A.shape[0], 1))\n\n cont += 1\n\n # Calcula la norma de 'x'\n normaX2 = np.linalg.norm(x)\n\n # Calcula el error aproximado porcentual y almacena el resultado en la variable 'errorAproxPorcen'\n errorAproxPorcen = ((normaX2 - normaX1) / normaX2) * 100\n\n if abs(errorAproxPorcen) < tolerancia:\n break\n\n if cont == limite:\n # En caso que se hayan hecho 'x' iteraciones, entonces suponemos que\n # no se ha determinado el resultado y se detiene la ejecucion del programa\n print(\"\\n\\nSe ha llegado al limite de iteraciones y no se ha encontrado un posible \", end = \"\")\n print(\"resultado aplicando el Metodo de Jacobi para resolver el sistema de ecuaciones lineales\")\n print(\"Pruebe con otro vector inicial o ingrese un limite de iteraciones mayor\\n\\n\")\n sys.exit(1)\n\n # Se copia el valor de 'normaX2' en la variable 'normaX1' para que en la siguiente iteracion se considere la norma que se acaba de calcular\n normaX1 = normaX2\n\n print(\"\\nUna aproximacion a la solucion es:\\n\", np.transpose(x).reshape(A.shape[0], 1))\n print()\n\n return x"
] | [
"0.6263861",
"0.60016906",
"0.5993425",
"0.59792936",
"0.59679013",
"0.5947442",
"0.58563167",
"0.5767559",
"0.57407844",
"0.56984955",
"0.5689885",
"0.56845725",
"0.5673471",
"0.56505066",
"0.56440735",
"0.5604703",
"0.5593576",
"0.55914634",
"0.5581195",
"0.557651",
"0.5572182",
"0.5569898",
"0.5563673",
"0.5562454",
"0.555096",
"0.55489314",
"0.5548445",
"0.5545176",
"0.55413043",
"0.5531898"
] | 0.82948303 | 0 |
set up some polynomials | def setUp(self):
self.f1 = uniutil.polynomial(enumerate([3, 6, 81, 1]), Z)
self.f2 = uniutil.polynomial(enumerate([1, 81, 6, 3]), Z)
self.f3 = uniutil.polynomial(enumerate([37, 6, 18, 1]), Z)
self.f4 = uniutil.polynomial(enumerate([91, 7, 14, 1]), Z)
# f5 = (x - 6)(x - 5)...x(x + 1)(x + 2) - 1
self.f5 = uniutil.polynomial(enumerate([1439, -1368, -1324,
1638, -231, -252,
114, -18, 1]), Z) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test():\n assert str(Polynomial(0, 1, 0, -1, 4, -2, 0, 1, 3, 0)) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x\"\n assert str(Polynomial([-5, 1, 0, -1, 4, -2, 0, 1, 3, 0])) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x - 5\"\n assert str(Polynomial(x7=1, x4=4, x8=3, x9=0, x0=0, x5=-2, x3=-1, x1=1)) == \"3x^8 + x^7 - 2x^5 + 4x^4 - x^3 + x\"\n assert str(Polynomial(x2=0)) == \"0\"\n assert str(Polynomial(x0=0)) == \"0\"\n assert Polynomial(x0=2, x1=0, x3=0, x2=3) == Polynomial(2, 0, 3)\n assert Polynomial(x2=0) == Polynomial(x0=0)\n assert str(Polynomial(x0=1) + Polynomial(x1=1)) == \"x + 1\"\n assert str(Polynomial([-1, 1, 1, 0]) + Polynomial(1, -1, 1)) == \"2x^2\"\n pol1 = Polynomial(x2=3, x0=1)\n pol2 = Polynomial(x1=1, x3=0)\n assert str(pol1 + pol2) == \"3x^2 + x + 1\"\n assert str(pol1 + pol2) == \"3x^2 + x + 1\"\n assert str(Polynomial(x0=-1, x1=1) ** 1) == \"x - 1\"\n assert str(Polynomial(x0=-1, x1=1) ** 2) == \"x^2 - 2x + 1\"\n pol3 = Polynomial(x0=-1, x1=1)\n assert str(pol3 ** 4) == \"x^4 - 4x^3 + 6x^2 - 4x + 1\"\n assert str(pol3 ** 4) == \"x^4 - 4x^3 + 6x^2 - 4x + 1\"\n assert str(Polynomial(x0=2).derivative()) == \"0\"\n assert str(Polynomial(x3=2, x1=3, x0=2).derivative()) == \"6x^2 + 3\"\n assert str(Polynomial(x3=2, x1=3, x0=2).derivative().derivative()) == \"12x\"\n pol4 = Polynomial(x3=2, x1=3, x0=2)\n assert str(pol4.derivative()) == \"6x^2 + 3\"\n assert str(pol4.derivative()) == \"6x^2 + 3\"\n assert Polynomial(-2, 3, 4, -5).at_value(0) == -2\n assert Polynomial(x2=3, x0=-1, x1=-2).at_value(3) == 20\n assert Polynomial(x2=3, x0=-1, x1=-2).at_value(3, 5) == 44\n pol5 = Polynomial([1, 0, -2])\n assert pol5.at_value(-2.4) == -10.52\n assert pol5.at_value(-2.4) == -10.52\n assert pol5.at_value(-1, 3.6) == -23.92\n assert pol5.at_value(-1, 3.6) == -23.92",
"def __init__(self, polyorder=2):\n self.polyorder = polyorder",
"def build_poly(x, degree):\n \"\"\"\n Assemble the 3 label vectors with the original ordering \n Inputs:\n - x (ndarray) : binary prediction for set 1\n - degree (int) : binary prediction for set 2 \n Outputs: \n - p (ndarray) : predicted labels for test set ( with the original ordering)\n \"\"\"\n # forming a matrix containing the data points\n terms = np.hstack([np.ones([x.shape[0],1]),np.tile(x,(1,degree))])\n index = np.arange(degree)+1\n \n # forming a matrix contnaining the exponents\n exponents = np.multiply(np.ones((1, x.shape[1])), index[:, np.newaxis])\n exponents = exponents.reshape([1, x.shape[1]*degree])\n exponents = np.multiply(exponents, np.ones([x.shape[0], 1]))\n exponents = np.hstack([np.ones( (x.shape[0], 1) ),exponents])\n \n # using the exponent matrix as the element-wise exponents of the terms in the terms matrix\n p=np.power(terms,exponents)\n return p",
"def build_poly(x, degree): \n # ***************************************************\n # COPY YOUR CODE FROM EX03 HERE\n # polynomial basis function: TODO\n # this function should return the matrix formed\n # by applying the polynomial basis to the input data\n # ***************************************************\n raise NotImplementedError",
"def __init__(self, pol1, pol2):\n\n self._pol1, self._pol2 = pol1, pol2\n self.deg = self._pol1.deg*self._pol2.deg # degree of composed polynomial\n\n # WARNING: numpy.polynomial.polynomial.polyadd and polypow considers\n # arrays as polynomials with lowest coefficient first,\n # contrarily to polyval and polyfit.\n _pol1, _pol2 = self._pol1.pol[::-1], self._pol2.pol[::-1]\n\n self.pol = np.zeros((1,)) # composed polynomial\n for i in range(pol1.deg + 1):\n self.pol = polyadd(self.pol, _pol1[i]*polypow(_pol2, i))\n\n self.pol = self.pol[::-1]",
"def __init__(self, *args, **kwargs):\n self.polynomials = {}\n\n if len(args) == 1 and isinstance(args[0], list): # Polynomials represented as list\n index = 0\n for polynomial in args[0]:\n self.polynomials[index] = polynomial\n index += 1\n elif len(args) > 0: # Polynomials represented as arguments\n index = 0\n for polynomial in args:\n self.polynomials[index] = polynomial\n index += 1\n elif len(kwargs) > 0: # Polynomials represented as keyword arguments\n for index, polynomial in kwargs.items():\n index = index[1:]\n self.polynomials[index] = polynomial",
"def poly(x, y, pd) :\n # Maximum polynomial degree allowed is 7.\n maxD = 7\n if pd > maxD :\n exit(\"Please choose a reasonable polynomial degree (0 <= pd <= \" + maxD + \").\")\n \n # Make the polynomial matrix one degree at a time.\n p = np.zeros((len(x), int((pd+1)*(pd+2)/2)), float)\n count = 0\n numP = 0\n for i in range(pd + 1) :\n for j in range(numP + 1) :\n if (j == 0) and (numP == 0) :\n p[:,count] = 1\n elif (j == 0) :\n p[:,count] = x**(numP-j)\n elif (numP-j == 0) :\n p[:,count] = y**j\n else :\n p[:,count] = x**(numP-j) * y**j\n count += 1\n numP += 1\n \n return p",
"def generate_polynomial():\n degree = numpy.random.choice(range(3, 7))\n x = numpy.linspace(-10, 10, 1000)\n coefficients = numpy.random.chisquare(3, size=degree) + 1\n coefficients *= numpy.random.choice([-1, 1], size=coefficients.shape)\n coefficients *= 0.5\n y = numpy.polyval(coefficients, x)\n add_noise(y, 0.1)\n return x, y",
"def construct_poly(data, power):\n return np.power(data, power)",
"def __init__( self , power , the_phis = None ) :\n\n ## check the arguments \n assert isinstance ( power , num_types ) and int ( power ) == power and 0 <= power, \\\n \"Phases: invalid type/value for ``power''-parameter: %s/%s\" % ( power , type(power) )\n power = int ( power ) \n\n if isinstance ( the_phis , Phases ) : \n self.__phis = [ i for i in the_phis.phis ] \n self.__phi_list = the_phis.phi_list \n assert power == len( self.__phis ) , \"Phases: Invalid length of ``phis'' %d/%s\" % ( power , len ( self.__phis ) ) \n return ## RETURN\n elif the_phis and isinstance ( the_phis , ROOT.RooArgList ) :\n self.__phis = [ i for i in the_phis] \n self.__phi_list = the_phis \n assert power == len( self.__phis ) , \"Phases: Invalid length of ``phis'' %d/%s\" % ( power , len ( self.__phis ) ) \n return ## RETURN \n elif the_phis and isinstance ( the_phis , (tuple,list) ) :\n self.__phis = [ i for i in the_phis] \n self.__phi_list = ROOT.RooArgList()\n for phi in the_phis : self.__phi_list.add ( phi )\n assert power == len( self.__phis ) , \"Phases: Invalid length of ``phis'' %d/%s\" % ( power , len ( self.__phis ) ) \n return ## RETURN\n elif the_phis :\n self.warning(\"unknown type for ``the_phis'' %s/%s, skip it\" % ( the_phis , type(the_phis) ) )\n\n self.__phis = []\n self.__phi_list = ROOT.RooArgList()\n from math import pi\n for i in range( 0 , power ) :\n phi_i = self.make_var ( None ,\n 'phi%d_%s' % ( i , self.name ) ,\n '#phi_{%d}(%s)' % ( i , self.name ) ,\n None , 0 , -1.55 * pi , 3.55 * pi )\n self.__phis .append ( phi_i ) \n self.__phi_list.add ( phi_i )",
"def addPoly(self,p):\n for t in triangulate(p):\n self.addTri(t)\n return self",
"def __test_s_polynomial():\n poly_ring = PolynomialRing(QQ, 'x,y', order='deglex')\n x, y = poly_ring('x'), poly_ring('y')\n g = x ** 3 - 2 * x * y\n h = x ** 2 * y - 2 * y ** 2 + x\n print __s_polynomial(g, h) # Expected -x^2",
"def __init__(self, poly, ambient=None):\n if not is_MPolynomial(poly):\n raise TypeError(\"Defining polynomial (= %s) must be a multivariate polynomial\"%poly)\n if ambient is None:\n R = poly.parent()\n from sage.schemes.affine.affine_space import AffineSpace\n ambient = AffineSpace(R.base_ring(), R.ngens())\n ambient._coordinate_ring = R\n AlgebraicScheme_subscheme_affine.__init__(self, ambient, [poly])",
"def __init__(self,dim,degree,coords):\n d = degree +1\n spec_shape = tuple([(d) for x in range(dim)])\n array_var_names = list(map(lambda x: \"x[\"+str(x)+\"]\",range(dim)))\n self.coords = kill_extra_indicies(coords,d)\n array_poly = polyString(array_var_names,self.coords)\n self.array_poly = array_poly\n normal_var_names = list(map(lambda x: \"x\"+str(x),range(dim)))\n normal_poly = polyString(normal_var_names,self.coords)\n self.normal_poly = normal_poly\n \n test=spec_shape==coords.shape\n \n if not(test):\n raise ValueError(\"Polynomial coords shape not in accordance with dim and degree\")\n self.python_func = \"lambda x: \" + array_poly\n self.sympy_exp = parse_expr(normal_poly)",
"def __init__(self, coefficients):\n self.coefficients = coefficients",
"def generate_poly(hyper, params):\n\n k, d = hyper['k'], hyper['d']\n #atoms = { \n # (h,) : symbols('h_%d'%h)\n # for h in xrange(1, k+1)\n # }\n #atoms[(k,)] = 1. - sum( symbols('h_%d'%h) for h in xrange(1, k) )\n\n atoms = {}\n for h in xrange(1,k+1):\n atoms.update({ \n (h,x1) : symbols('x_%d%d'%(h,x1))\n for x1 in xrange(1,d+1)\n })\n #atoms[(h,d)] = 1. - sum(symbols('x_%d%d'%(h,x1)) for x1 in xrange(1,d))\n\n m = {}\n for x1 in xrange(1,d+1):\n m[(x1,)] = poly( sum( atoms[(h,x1)] for h in xrange(1,k+1) ) )\n for x2 in xrange(1,d+1):\n m[(x1,x2)] = poly( sum( atoms[(h,x1)] * atoms[(h,x2)] for h in xrange(1,k+1) ) )\n for x3 in xrange(1,d+1):\n m[(x1,x2,x3)] = poly( sum( atoms[(h,x1)] * atoms[(h,x2)] * atoms[(h,x3)] for h in xrange(1,k+1) ) )\n\n return m",
"def generate_polynomial_features(self, X) :\n\n n,d = X.shape\n\n ### ========== TODO : START ========== ###\n # part b: modify to create matrix for simple linear model\n # part g: modify to create matrix for polynomial model\n Phi = X\n m = self.m_\n\n if m == 1:\n Phi = np.zeros((n,2))\n for i in range(n):\n Phi[i,0] = 1\n Phi[i, 1] = X[i]\n\n else:\n Phi = np.ones((n,m+1))#n*m+1 dimmension\n power_arr = np.arange(0, m+1)\n for index, row in enumerate(Phi):# get every row\n row = np.repeat(X[index],m+1)\n row = np.power(row,power_arr)\n Phi [index,] = row\n #also could use the following\n \"\"\"\n import sklearn.preprocessing as sk\n #X is a N*1 vector\n poly_mat = sk.PolynomialFeatures(3)\n poly.fit_transform(a)\n \"\"\"\n\n\n\n\n\n ### ========== TODO : END ========== ###\n\n return Phi",
"def definePolyFunction():\n lstWeights=[]\n degree = input(\"degree of polynomial in terms of highest exponent of x:\")\n degree = int(degree+1)\n for a in range (0,degree):\n string='weight for x^'+str(a)+':'\n weight = input(string)\n weight = float(weight)\n lstWeights.append(weight)\n return lstWeights",
"def parameters_polynomial(cobj, prop, prop_units, alist, blist):\n for i, aval in enumerate(alist):\n if i == 0:\n param_units = prop_units\n else:\n param_units = prop_units / pyunits.K**i\n\n coeff = Var(doc=\"A parameter for CoolProp polynomial form\", units=param_units)\n cobj.add_component(prop + \"_coeff_A\" + str(i), coeff)\n coeff.fix(aval)\n\n for i, bval in enumerate(blist):\n if i == 0:\n param_units = pyunits.dimensionless\n else:\n param_units = pyunits.K**-i\n\n coeff = Var(doc=\"B parameter for CoolProp exponential form\", units=param_units)\n cobj.add_component(prop + \"_coeff_B\" + str(i), coeff)\n coeff.fix(bval)",
"def set_coeffs(self, sol):\n # TODO: look for bugs here!\n self.log_debug(\"Set spline coefficients\")\n\n # task: find which of the free parameters (coeffs) belong to which spline object\n sol_bak = sol.copy()\n subs = dict()\n\n # iterate over the OrderedDict {'x1': [cx1_..., ...], 'u1': [cu1_...]}\n for k, v in list(self.indep_vars.items()):\n i = len(v)\n # TODO: improve comment\n subs[k] = sol[:i] # set numerical value to symbolical value\n sol = sol[i:] ##:: sol = []\n \n if self._parameters['use_chains']:\n for var in self.sys.states + self.sys.inputs:\n for ic in self._chains:\n if var in ic: ##:: ('x1','x2','u1') and ('x3','x4')\n subs[var] = subs[ic.upper] ##:: elements in the same chain have the same coefficients (number, not symbol).\n \n # set numerical coefficients for each spline and derivative\n # TODO: handle `!!`-comments after talking to yx \n ##!! spline_key_plus_k = self.splines.keys().append('k')\n for k in list(self.splines.keys()): ##:: ['x1','x3']\n self.splines[k].set_coefficients(free_coeffs=subs[k])\n ##:: self._indep_vars = free_coeffs (self.splines[k]._indep_coeffs=free_coeffs) makes symbols changing into numbers. {'x1': <Spline object>, 'x3': <Spline object>}, Spline._P[k] saves the polynomial.\n \n # yet another dictionary for solution and coeffs\n# ##!! indep_vars['z_par'] = np.array([sp.symbols('k')])\n# ##!! self.indep_vars = indep_vars\n\n coeffs_sol = OrderedDict()\n\n # used for indexing\n i = 0\n j = 0\n\n for k, v in list(self.indep_vars.items()):\n ##:: ['x1': array([0.12,0.13,...,]), 'x3':...] symbols change into numbers\n j += len(v)\n coeffs_sol[k] = sol_bak[i:j]\n i = j\n\n self.coeffs_sol = coeffs_sol\n ##:: {'x1': array([ 25.94485709, 16.38313857, -35.65010072, ..., 2.28427004, 2.82974712, 1.88490863]), 'x3': array([-34.33884269, 45.13959025, 1.3272378 , -4.15546318,# 5.3863866 , -5.39286006, -8.86559812, -6.11620983, -2.95630206])}\n\n ##!! return self.coeffs_sol['z_par'].tolist()",
"def polyFeat(X, p):\r\n # You need to return the following variables correctly.\r\n X_poly = np.zeros((X.shape[0], p))\r\n\r\n # ====================== YOUR CODE HERE ======================\r\n\r\n for i in range(p):\r\n X_poly[:, i] = X[:, 0] ** (i + 1)\r\n\r\n # ============================================================\r\n return X_poly",
"def base_polynome(numbers):\n\n monomes = [ x**n for n in numbers ]\n polynome = sum(monomes)\n\n return poly(polynome, x)",
"def polygen(count=10, sum_count=10, deg=5, cof=10):\n\n s = enumi_beg\n ans = enumi_beg\n\n for i in range(count):\n s += item_beg\n ans += item_beg\n p = genpoly(sum_count, deg, cof)\n ans += p.print_out()\n s += p.rep + item_end\n ans += item_end\n s += enumi_end\n ans += enumi_end\n return s, ans",
"def begin_poly(self):\n self._poly = [self._position]\n self._creatingPoly = True",
"def poly_ring(self, *gens):\n from sympy.polys.domains import PolynomialRing\n return PolynomialRing(self, *gens)",
"def __init__(self, poly, ambient=None):\n if not is_MPolynomial(poly):\n raise TypeError(\"Defining polynomial (=%s) must be a multivariate polynomial.\"%poly)\n if not poly.is_homogeneous():\n raise TypeError(\"Defining polynomial (=%s) must be homogeneous.\"%poly)\n if ambient is None:\n R = poly.parent()\n from sage.schemes.projective.projective_space import ProjectiveSpace\n ambient = ProjectiveSpace(R.base_ring(), R.ngens()-1)\n ambient._coordinate_ring = R\n AlgebraicScheme_subscheme_projective.__init__(self, ambient, [poly])",
"def __init__(self, obj):\n if type(obj) is Monomial:\n Polynomial.__init__(self, obj)\n else:\n Polynomial.__init__(self, *obj.monomials)",
"def __init__(self, coef, f1=unit_function, f2=unit_function):\n super(self.__class__, self).__init__(f1, f2)\n self.p = np.poly1d(coef)",
"def __pow__(self, power):\n if power == 1:\n return self\n elif power == 0:\n return Polynomial(1)\n\n self.polynomials = {key: val for key, val in self.polynomials.items() if val != 0}\n self.polynomials = dict(sorted(self.polynomials.items(), reverse=True))\n\n attributes = {}\n\n # Using Binomial theorem\n n = 0\n m = power\n use_n = True\n\n for k in range(0, power + 1):\n result = self.calculate_combinatorial_number(power, k)\n\n for index, polynomial in self.polynomials.items():\n if use_n:\n result *= pow(polynomial, (power - n))\n n += 1\n use_n = False\n else:\n result *= pow(polynomial, (power + m))\n m -= 1\n use_n = True\n\n attributes[\"x\" + str(n - 1)] = result\n\n return Polynomial(**attributes)",
"def _poly_func(x, a, b, c, d, e):\n return a * x ** 6 + b * x ** 5 + c * x ** 4 + d * x ** 3 + e * x ** 2"
] | [
"0.678141",
"0.67440933",
"0.6514138",
"0.65139806",
"0.64966166",
"0.6434011",
"0.62719",
"0.6235043",
"0.62243843",
"0.62179774",
"0.6173415",
"0.61612695",
"0.60989493",
"0.6082673",
"0.60799944",
"0.6079461",
"0.6062244",
"0.60604817",
"0.6048867",
"0.60426575",
"0.6030644",
"0.60289216",
"0.6010282",
"0.6005424",
"0.5982188",
"0.5980817",
"0.59765345",
"0.5968951",
"0.59552157",
"0.5953608"
] | 0.70385593 | 0 |
return the size and line number of all the files end with file_format in file_path | def get_files_size_and_line_number(file_path, file_format):
logging.info("[get_file_size_and_line_number] file_path: %s, file_format: %s", file_path, file_format)
size = 0
lines = 0
for root, dirs, files in os.walk(file_path):
for file in files:
for one_format in file_format:
if file.endswith(one_format):
size += os.path.getsize(os.path.join(root, file))
lines += get_file_lines(os.path.join(root, file))
return size, lines | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def size(path):",
"def file_stat(self, file_path):",
"def get_file_size(file_path):\n with open(file_path, 'rb') as infile:\n infile.seek(0, 2)\n return infile.tell()",
"def fileLineCount(fPath):\n\twith open(fPath) as f:\n\t\tfor i, li in enumerate(f):\n\t\t\tpass\n\treturn (i + 1)",
"def read_file(path_to_file):\n 8",
"def trace_file_len(fname):\n try:\n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return i - 1\n except FileNotFoundError:\n return 0",
"def file_size(self,file_path):\n if os.path.isfile(file_path):\n file_info = os.stat(file_path)\n return self.convert_bytes(file_info.st_size)",
"def _get_file_info(filename):\n filename = os.path.split(filename)[-1]\n filename = filename[:str.rfind(filename, '.jsonl.gz')]\n _, mode, idx = filename.split('_')\n return mode, idx",
"def file_len(f):\n\n for n, l in enumerate(f, 1):\n pass\n f.seek(0) # rewind\n return n",
"def get_file_size(file_path):\n return os.path.getsize(file_path)",
"def file_size(file_path):\n if os.path.isfile(file_path):\n file_info = os.stat(file_path)\n return (file_info.st_size)",
"def FileLen(filename):\n return os.stat(str(filename))[6]",
"def getFileCount(self) -> int:\n ...",
"def file_len(fname):\n \n with open(fname) as f:\n for i, l in enumerate(f):\n pass\n return i + 1",
"def file_len(file_name):\n with open(file_name) as f:\n for i, l in enumerate(f):\n pass\n return i + 1",
"def get_file_size(path):\n\n return os.stat(path).st_size",
"def get_file_line_count(a_file):\r\n count = -1\r\n try:\r\n for count, line in enumerate(open(a_file, \"rU\")):\r\n pass\r\n except IOError:\r\n pass\r\n count += 1\r\n return count",
"def get_file_line_count(a_file):\r\n count = -1\r\n try:\r\n for count, line in enumerate(open(a_file, \"rU\")):\r\n pass\r\n except IOError:\r\n pass\r\n count += 1\r\n return count",
"def file_size():\n return os.path.getsize(FILE_NAME)",
"def file_length(fileName):\n with open(f_pass) as f:\n for i, l in enumerate(f):\n pass\n return i + 1",
"def file_size(file_path):\n if os.path.isfile(file_path):\n file_info = os.stat(file_path)\n return convert_bytes(file_info.st_size)",
"def file_size(file_path):\n if os.path.isfile(file_path):\n file_info = os.stat(file_path)\n return convert_bytes(file_info.st_size)",
"def file_size(file_path):\n if os.path.isfile(file_path):\n file_info = os.stat(file_path)\n return convert_bytes(file_info.st_size)",
"def get_file_size(path: str):\n return os.path.getsize(path)",
"def _get_nparts(filename,headersize,itemsize):\n return (os.path.getsize(filename)-headersize)/itemsize",
"def count_total_line():\n count = 0\n file_count = 0\n for filename in os.listdir('.'):\n if filename.endswith(\".json\"):\n file_count += 1\n with open(filename, 'r', encoding='utf8') as f:\n for line in f:\n count += 1\n print(\"There are {0} lines in {1} json files\".format(count, file_count))",
"def getFileSize( self, path ):\n res = self.__checkArgumentFormat( path )\n if not res['OK']:\n return res\n urls = res['Value']\n successful = {}\n failed = {}\n gLogger.debug( \"DIPStorage.getFileSize: Attempting to obtain size for %s files.\" % len( urls ) )\n res = self.getFileMetadata( urls )\n if not res['OK']:\n return res\n for url, urlDict in res['Value']['Successful'].items():\n if urlDict['Exists']:\n successful[url] = urlDict['Size']\n else:\n failed[url] = 'File does not exist'\n for url, error in res['Value']['Failed'].items():\n failed[url] = error\n resDict = {'Failed':failed, 'Successful':successful}\n return S_OK( resDict )",
"def file_len(filename):\n with open(filename) as f:\n for i, l in enumerate(f):\n pass\n return i + 1",
"def file_len(full_path):\n f = open(full_path)\n nr_of_lines = sum(1 for line in f)\n f.close()\n return nr_of_lines",
"def getFileCount(self, startingWithPath=\"\"):\n return self.__controller._getRecordsCount(startingWithPath)"
] | [
"0.63892037",
"0.6363271",
"0.60669774",
"0.60568386",
"0.60216236",
"0.59628934",
"0.5924917",
"0.5862097",
"0.5836959",
"0.581429",
"0.5798193",
"0.5783594",
"0.57677364",
"0.5739231",
"0.57310176",
"0.572738",
"0.57239795",
"0.57239795",
"0.57225364",
"0.5717134",
"0.57096237",
"0.57096237",
"0.57096237",
"0.57068825",
"0.56526226",
"0.56408876",
"0.56378347",
"0.5614633",
"0.56099266",
"0.55978507"
] | 0.8117311 | 0 |
Save updates to this user. Updates will be made column by column based on the result of self.what_changed(). | def save(self, context=None):
updates = self.obj_get_changes()
self.dbapi.update_user(context, self.id, updates)
self.obj_reset_changes() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def save_to_users(self):\n Data.add_data(self.user_data())",
"def update(self):\n db.session.commit()",
"def update(self):\n db.session.commit()",
"def save(self):\n self.__db.commit()",
"def save(self):\n self.db.commit()",
"def save(self):\n db.session.commit()",
"def update_user():",
"def save(self):\n\n self.__session.commit()",
"def save(self):\n\n self.__session.commit()",
"def save(self):\n self.__session.commit()",
"def save(self):\n self.__session.commit()",
"def save(self):\n self.__session.commit()",
"def save(self):\n self.__session.commit()",
"def save(self):\n self.__session.commit()",
"def save(self):\n self.__session.commit()",
"def save(self):\n self.__session.commit()",
"def save(self):\n self.__session.commit()",
"def save(self):\n self.session.commit()",
"def save(self):\n users = User.getall()\n users[self.username] = dict(self)\n return self.db().put(self.udb, users)",
"def model_update(self, db):\n db.session.commit()",
"def update_users(self):\n conn = sqlite3.connect(self.__DB)\n cursor = conn.cursor()\n\n users_data = []\n unsaved_histories_data = []\n for key, user in self.__users.items(): # here, key it's actually users id\n users_data.append((user.get_balance(), key))\n for register in user.get_history():\n register_str, is_saved = register\n if not is_saved:\n unsaved_histories_data.append((register_str, key))\n\n cursor.executemany('''\n UPDATE users\n SET balance=?\n WHERE id=?;\n ''', users_data)\n\n cursor.executemany('''\n INSERT INTO history (register, owner)\n VALUES (?, ?);\n ''', unsaved_histories_data)\n\n conn.commit()\n conn.close()\n\n self.load_users() # RELOADING!!! Pew, pew, pew, pew, pew...",
"def save_user(self):\n db.session.add(self)\n db.session.commit()",
"def saveSettings(self):\n self.userFiles.applyData()\n self.userPersonal.applyData()",
"def update_user():\n #TODO user update \n pass",
"def save_model(self, request, instance, form, change):\r\n instance.updated_by = request.user\r\n instance.save()",
"def save_model(self, request, instance, form, change):\r\n instance.updated_by = request.user\r\n instance.save()",
"def save(self):\n self.updated_at = datetime.now()",
"def save(self):\n\n pass",
"def save(self):\n # TODO (Pierre): code",
"def commit(self):\n\t\t#firstly, get all variables and values of this model\n\t\tcontent = self.__dict__.copy() \n\t\t#if '_rev' is one of the variables of this model instance,\n\t\t#it means this user is retrived from database. \n\t\t#We are actually going to update the model document in database\n\t\t#instead of creating a new user document.\n\t\tres = dbop.update_create_user_in_database(self._id, content) \n\t\tself._id = res['id']\n\t\tself._rev = res['rev']"
] | [
"0.6838319",
"0.6649925",
"0.6649925",
"0.66234463",
"0.65625477",
"0.64974636",
"0.6454244",
"0.63921773",
"0.63921773",
"0.63760066",
"0.63760066",
"0.63760066",
"0.63760066",
"0.63760066",
"0.63760066",
"0.63760066",
"0.63760066",
"0.6369375",
"0.6336045",
"0.63228583",
"0.63187677",
"0.62552166",
"0.62322146",
"0.6215068",
"0.621214",
"0.621214",
"0.6208253",
"0.62054306",
"0.62014246",
"0.6192232"
] | 0.6958138 | 0 |
Return the name of the appliance | def name(self):
return self.appliance_name | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def storage_appliance_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"storage_appliance_name\")",
"def getApplicationName(self) -> unicode:\n ...",
"def get_name(self, name):\n return self.apps[name]['name']",
"def name(self):\n return self.application_tree['name']",
"def get_name():\n return config.APP_NAME",
"def name(self):\r\n if self._name is not None:\r\n return self._name\r\n else:\r\n try:\r\n return Inspection.find_application_name()\r\n # TODO(wickman) Be more specific\r\n except Exception:\r\n return 'unknown'",
"def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")",
"def application_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"application_name\")",
"def name(self):\n try:\n return self['DW_AT_name'].val\n except KeyError:\n return None",
"def application_name(self) -> Optional[str]:\n return pulumi.get(self, \"application_name\")",
"def app_name(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"app_name\")",
"def test_app_appliances(self):\n\n self.assertEqual(\n AppliancesConfig.name,\n \"appliances\"\n )\n self.assertEqual(\n apps.get_app_config('appliances').name,\n \"appliances\"\n )",
"def app_name(self) -> str:\n return self._app_name",
"def app_name(self):\n return self._app_name",
"def app_name(self):\n return self._chromecast.app_display_name if self._chromecast else None",
"def _get_app_name(app):\n return app[APP_NAME_KEY]",
"def product(self):\n return self.appName",
"def app_name(self): # pylint:disable=function-redefined\n return self._app_name",
"def name(self):\n\n return self.manifest[\"name\"]",
"def get_name(app):\n from uuid import uuid4 as uuid\n return (f'accelpy_{app[\"application\"][\"product_id\"]}'\n f'_{str(uuid()).replace(\"-\", \"\")[:8]}')",
"def name(self) -> Dict[str, str]:\n self.__logger.debug('Eva.name called')\n return self.__http_client.name()",
"def app_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"app_name\")",
"def aggregate_compliance_pack_name(self) -> str:\n return pulumi.get(self, \"aggregate_compliance_pack_name\")",
"def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")",
"def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")",
"def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")",
"def application_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"application_name\")",
"def name(self):\n return f\"BlueAir {self._ba_name}\"",
"def name(self) -> str:\n return self.dev.label",
"def name(self):\n if self._name is not None:\n return self._name\n else:\n return 'Alarm {}'.format(self._id)"
] | [
"0.728623",
"0.670762",
"0.6638334",
"0.66285485",
"0.6624678",
"0.6546256",
"0.6457588",
"0.6457588",
"0.64034915",
"0.6325202",
"0.62845224",
"0.6272063",
"0.62540203",
"0.62129927",
"0.6174521",
"0.61575866",
"0.6152049",
"0.610121",
"0.6100212",
"0.6099339",
"0.60911846",
"0.6081494",
"0.60629433",
"0.6056342",
"0.6056342",
"0.6056342",
"0.6056342",
"0.60378975",
"0.60306054",
"0.60035545"
] | 0.90326834 | 0 |
Inject an extend method in obj that will used append method. | def define_extend_as_seq_of_appends(obj):
assert hasattr(
obj, 'append'
), f'Your object needs to have an append method! Object was: {obj}'
def extend(self, items):
for item in items:
self.append(item)
if isinstance(obj, type):
obj = type(obj.__name__, (obj,), {})
obj.extend = extend
else:
obj.extend = types.MethodType(extend, obj)
return obj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def extend(self, *args, **kwargs): # real signature unknown\n pass",
"def extend(self, x) -> None:\n pass",
"def append(self, obj):\r\n raise NotImplementedError",
"def extend(self, other):\n # YOUR CODE HERE\n raise NotImplementedError()",
"def append(self, *args, **kwargs): # real signature unknown\n pass",
"def add_base_class(\n existing_object: Any,\n import_method: Callable[[Any], Any],\n export_method: Callable[[Any], Any],\n):\n existing_object.export_data = types.MethodType(export_method, existing_object)\n existing_object.import_data = types.MethodType(import_method, existing_object)",
"def append(self, object):\r\n raise NotImplementedError()",
"def extend(class_to_extend):\n def decorator(func):\n if hasattr(class_to_extend, func.func_name):\n raise except_osv(_(\"Developper Error\"),\n _(\"You can extend the class %s with the method %s.\",\n \"Indeed this method already exist use the decorator 'replace' instead\"))\n setattr(class_to_extend, func.func_name, func)\n return class_to_extend\n return decorator",
"def add_python_append(self, method: Function, lines):\n return self.add_feature(lines, method.pattern, 'pythonappend')",
"def extend(self, *args):\n for arg in args:\n self.add(arg)",
"def augment(self, *args, **kwargs):\n pass",
"def contribute_to_object(self, obj):\n pass",
"def extend(self, extension):\n for element in extension:\n self.append(element)",
"def append(self, *__args): # real signature unknown; restored from __doc__ with multiple overloads\n pass",
"def extend(source, add_attribute):\n\n ExtendCommandExecutor().extend(source, add_attribute)",
"def append_to_request(self, request_base, request_object):\n\n pass",
"def add(obj):",
"def extend(self, item: Any) -> BaseList:\n super().extend(item)\n return self",
"def add(self, obj):\n raise NotImplementedError",
"def override(class_to_extend, prefix):\n def decorator(func):\n if not hasattr(class_to_extend, func.func_name):\n raise except_osv(_(\"Developper Error\"),\n _(\"You can replace the method %s of the class %s. \"\n \"Indeed this method doesn't exist\")%(func.func_name, class_to_extend))\n original_function_name = prefix + func.func_name\n if hasattr(class_to_extend, original_function_name):\n raise except_osv(_(\"Developper Error\"),\n _(\"The method %s already exist. \"\n \"Please change the prefix name\")%original_function_name)\n setattr(class_to_extend, original_function_name, getattr(class_to_extend, func.func_name))\n setattr(class_to_extend, func.func_name, func)\n return class_to_extend\n return decorator",
"def extend_info(self, extend_info):\n self._extend_info = extend_info",
"def add_extend(self, lines, name=''):\n return self._add_scope(lines, '%extend ' + name + '{', '}', indent=None, inline=False)",
"def append(self, x) -> None:\n pass",
"def extend_instance(obj, cls):\n base_cls = obj.__class__\n base_cls_name = obj.__class__.__name__\n obj.__class__ = type(base_cls_name, (base_cls, cls), {})",
"def append (self, item):\n pass",
"def __init__(self, obj, adapted_methods):\n self.obj = obj\n self.__dict__.update(adapted_methods)",
"def AppendExtra(self, extra):\n self.script.append(extra)",
"def test_AppendToTagObj( self ):\n\t\tsource = AppendToTagobjMethodSource()\n\t\tresult = self.parse( \"\"\"\n\t\t\tx := d*\n\t\t\td := 'd'\n\t\t\"\"\", 'x', 'ddd', source)\n\t\tassert source._o_d == [ (None,0,1,NullResult),(None,1,2,NullResult),(None,2,3,NullResult)], \"\"\"Method source methods were not called, or called improperly:\\n%s\"\"\"%(source._o_d,)",
"def extend_param(self, extend_param):\n self._extend_param = extend_param",
"def extend(doc):\n # Escape if extending null documents.\n if doc is None:\n return\n\n # Verify that document type is supported.\n if type(doc) not in _TYPES:\n rt.throw(\"Unsupported document type: {0}.\".format(type(doc)))\n\n # Initialize document extension information.\n doc.ext = DocumentExtensionInfo()\n\n # Instantiate extension context.\n ctx = _ExtensionContextInfo(doc, doc.meta, doc.ext)\n\n # Step 1: invoke default pre-extenders.\n for extender in default.PRE_EXTENDERS:\n extender(ctx)\n\n # Step 2: invoke type specific extenders.\n if is_extendable(doc):\n for extender in SUPPORTED[doc.type_key.lower()].EXTENDERS:\n extender(ctx)\n\n # Step 3: invoke default post-extenders.\n for extender in default.POST_EXTENDERS:\n extender(ctx)\n\n return doc"
] | [
"0.7326428",
"0.6949307",
"0.6912196",
"0.6708326",
"0.6557307",
"0.6522524",
"0.6451939",
"0.64439166",
"0.63893723",
"0.6349176",
"0.62845767",
"0.60959977",
"0.60721445",
"0.60629874",
"0.5988625",
"0.5980781",
"0.5946136",
"0.5893937",
"0.58636904",
"0.5830696",
"0.5825573",
"0.5805074",
"0.5745609",
"0.5731495",
"0.57178533",
"0.57131773",
"0.5685094",
"0.564716",
"0.56348085",
"0.5618629"
] | 0.72547257 | 1 |
Make item2kv from a item2key function (the value will be the item itself). | def item_to_key(item2key):
def item2kv(item):
return item2key(item), item
return item2kv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def item_to_key_params_and_val(item_to_key_params_and_val, key_str_format):\n\n def item2kv(item):\n key_params, val = item_to_key_params_and_val(item)\n if isinstance(key_params, dict):\n return key_str_format.format(**key_params), val\n else:\n return key_str_format.format(*key_params), val\n\n return item2kv",
"def _getNextKey(self, item):\n return (2, item)",
"def by_key(item):\n return Line['key', item]",
"def get_key(self, item):\r\n return item[0]",
"def _getPrevKey(self, item):\n return (1, item)",
"def to_kv(k: str, v: typing.Union[int, float]) -> typing.Tuple[str, float]:\n return k, v * v",
"def to_kv(k: str, v: Union[int, float]) -> Tuple[str, float]:\n my_tuple = (k, v*v)\n\n return my_tuple",
"def to_kv(k: str, v: Union[int, float]) -> Tuple[str, float]:\n return (k, v**2)",
"def to_group_key(args_items):\n args_items = dict(args_items)\n del args_items['seed']\n del args_items['uid']\n return tuple(args_items.items())",
"def strkey(item):\n return '%s:%s:%s' % (item['group_id'], item['artifact_id'], item['version'])",
"def to_kv(k: str, v: Union[int, float]) -> Tuple[str, float]:\n return (k, pow(v, 2))",
"def item_to_dynamo_db_item(item):\n now = long(time.time())\n return {\n 'timeserie': {'S': item['timeserie']},\n 'time': {'S': str(item['time'])},\n 'value': {'N': str(item['value'])},\n 'ttl': {'N': str(now + (1 * 60))},\n }",
"def id2item(self):\n if self._id2item is None:\n self._id2item = {j: item for item, j in self.item2id.items()}\n return self._id2item",
"def key_func(entry):\n return (entry[1].name, entry[1].number, entry[1].price)",
"def _key_sorting(item):\n key, value = item\n if isinstance(value, Link):\n return (1, key)\n return (0, key)",
"def converter(item):\n pass",
"def get_new_key(key, word):\n return (key[1], word)",
"def item_to_dict(dict_item):\n info = {}\n item_info = None\n\n for k, v in dict_item.items():\n if k == 'ItemType':\n info[k] = api.item_dict_inv[dict_item['ItemType']]\n elif k == 'Item':\n item_info = colectica.parse_xml(v, api.item_dict_inv[dict_item['ItemType']])\n else:\n info[k] = v\n d = {**info, **item_info}\n return d",
"def extract_key_item_data(item_data):\n extracted_item_data = {}\n\n for item_id in item_data:\n key_data = {}\n key_data[\"id\"] = item_id\n key_data[\"name\"] = item_data[item_id][\"name\"]\n key_data[\"image\"] = item_data[item_id][\"image\"][\"full\"]\n key_data[\"gold\"] = item_data[item_id][\"gold\"][\"total\"]\n key_data[\"tags\"] = item_data[item_id][\"tags\"]\n extracted_item_data[item_id] = key_data\n \n return extracted_item_data",
"def decode_map_element(self, item_type, value):\r\n import urllib\r\n key = value\r\n if \":\" in value:\r\n key, value = value.split(':',1)\r\n key = urllib.unquote(key)\r\n if Model in item_type.mro():\r\n value = item_type(id=value)\r\n else:\r\n value = self.decode(item_type, value)\r\n return (key, value)",
"def get_key_wrapper(o):\n new_key = get_key(o)\n print 'key_wrapper (%s) -> %s' %(o,new_key)\n return new_key",
"def key(self, x):\r\n return tuple(x)",
"def string_to_keypair(self, data): \n return keypair_lst",
"def do_dict_entry_for_item(parser, token):\r\n bits = token.contents.split()\r\n if len(bits) != 6:\r\n raise template.TemplateSyntaxError(\"'%s' tag takes exactly five arguments\" % bits[0])\r\n if bits[2] != 'from':\r\n raise template.TemplateSyntaxError(\"second argument to '%s' tag must be 'from'\" % bits[0])\r\n if bits[4] != 'as':\r\n raise template.TemplateSyntaxError(\"fourth argument to '%s' tag must be 'as'\" % bits[0])\r\n return DictEntryForItemNode(bits[1], bits[3], bits[5])",
"def _key_func_3(entry: tuple[str, list]) -> str:\n key, (targets, sub_items, category_key) = entry\n # hack: mutating the sub_items dicts to a list in the key_func\n entry[1][1] = sorted(((sub_key, sub_targets)\n for (sub_key, (sub_targets, _0, _sub_category_key))\n in sub_items.items()), key=_key_func_2)\n\n if category_key is not None:\n return category_key\n\n # now calculate the key\n if key.startswith('\\N{RIGHT-TO-LEFT MARK}'):\n key = key[1:]\n letter = unicodedata.normalize('NFD', key[0])[0].upper()\n if letter.isalpha() or letter == '_':\n return letter\n\n # get all other symbols under one heading\n return _('Symbols')",
"def _make_hashable(items):\n\n def convert(x):\n # Perform any conversions here to make a variable hashable\n if isinstance(x, np.ndarray):\n # Create an sha1 of the data, and throw in a string\n # and the shape.\n return ('__type_np.ndarray', x.shape,\n xxhash.xxh3_128_hexdigest(x))\n elif isinstance(x, (list, tuple)):\n return _make_hashable(x)\n elif isinstance(x, dict):\n return _make_hashable(sorted(x.items()))\n return x\n\n return tuple(map(convert, items))",
"def get_item_keys(self, item):\n return self._reverse_store[item]",
"def item2id(self):\n if self._item2id is None:\n self._item2id = dict(zip(self.item_unique_vals, range(self.n_items)))\n return self._item2id",
"def conv_kv(val: ValidKVs) -> str:\n if isinstance(val, str): # Early out for speed\n return val\n elif val is True:\n return '1'\n elif val is False:\n return '0'\n elif isinstance(val, Matrix) or isinstance(val, FrozenMatrix):\n return str(val.to_angle())\n elif isinstance(val, float):\n return format_float(val)\n else:\n return str(val)",
"def decrease_key(self, old_item, new_item):"
] | [
"0.718581",
"0.5973326",
"0.59086525",
"0.59082144",
"0.5864925",
"0.5777844",
"0.5666772",
"0.56512356",
"0.5644327",
"0.56334215",
"0.5619753",
"0.54938334",
"0.54744387",
"0.54451644",
"0.53108513",
"0.5293976",
"0.52673554",
"0.522438",
"0.519466",
"0.5186249",
"0.5139293",
"0.5137866",
"0.511613",
"0.5106366",
"0.5102764",
"0.51018745",
"0.5097873",
"0.50952816",
"0.50545835",
"0.5050206"
] | 0.8628558 | 0 |
Make an item2kv function that uses the current time as the key, and the unchanged item as a value. The offset_s, which is added to the output key, can be used, for example, to align to another system's clock, or to get a more accurate timestamp of an event. | def utc_key(offset_s=0.0):
if offset_s == 0.0: # splitting for extra speed (important in real time apps)
def item2kv(item):
return time.time(), item
else:
def item2kv(item):
return time.time() + offset_s, item
return item2kv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def item_to_key(item2key):\n\n def item2kv(item):\n return item2key(item), item\n\n return item2kv",
"def item_to_key_params_and_val(item_to_key_params_and_val, key_str_format):\n\n def item2kv(item):\n key_params, val = item_to_key_params_and_val(item)\n if isinstance(key_params, dict):\n return key_str_format.format(**key_params), val\n else:\n return key_str_format.format(*key_params), val\n\n return item2kv",
"def item_to_dynamo_db_item(item):\n now = long(time.time())\n return {\n 'timeserie': {'S': item['timeserie']},\n 'time': {'S': str(item['time'])},\n 'value': {'N': str(item['value'])},\n 'ttl': {'N': str(now + (1 * 60))},\n }",
"def addKey(self, time, value) -> None:\n ...",
"def getKeyTime(self, index, keyIndexOrHash, view) -> float:\n ...",
"def getPositionKeyTime(self, index, keyIndex, view) -> float:\n ...",
"def getTransformKeyTime(self, index, view) -> float:\n ...",
"def getTranslationKeyTime(self, index, view) -> float:\n ...",
"def update_time(cls, key):\n key.put()",
"def addKey(self, time, name, value, view) -> None:\n ...",
"def to_kv(k: str, v: typing.Union[int, float]) -> typing.Tuple[str, float]:\n return k, v * v",
"def getScaleKeyTime(self, index, view) -> float:\n ...",
"def update_keys(self, delta_time):\n for key, value in self.inputs.items():\n if value[0]:\n value[1] += delta_time\n else:\n value[1] = value[1] - delta_time if value[1] - delta_time > 0 else 0\n # end if\n # value[1] = max(min(value[1], self.mas_time), self.min_time)\n # end for",
"def transform_times(event):\n if isinstance(event, dict):\n retval = {}\n for key, value in event.items():\n if key == 'times' and len(value) == 2:\n retval[key] = [transform_time(t) for t in value]\n else:\n retval[key] = transform_times(value)\n else:\n retval = event\n return retval",
"def getRotationKeyTime(self, index, view) -> float:\n ...",
"def to_kv(k: str, v: Union[int, float]) -> Tuple[str, float]:\n my_tuple = (k, v*v)\n\n return my_tuple",
"def conv(assimp_keys, ticks_per_second):\n return {key.time / ticks_per_second: key.value for key in assimp_keys}",
"def _format_acp_query_items(\n cls, apc_key: str, params: Dict[str, str] = {}\n ) -> Dict[str, str]:\n ts = int(time.time())\n pre_str = \"\"\n keys_lst = params.keys()\n sorted(keys_lst)\n for key in keys_lst:\n pre_str += f\"{params[key]}\"\n pre_str += f\"{ts}\"\n pre_str += apc_key\n\n token = hashlib.md5(pre_str.encode()).hexdigest()\n return {\"ts\": f\"{ts}\", \"token\": f\"{token}\"}",
"def setKey(self, time, attributeIndex, hash, value, view) -> None:\n ...",
"def by_key(item):\n return Line['key', item]",
"def setPositionKey(self, time, index, value, id, view) -> None:\n ...",
"def update_keys(self, delta_time):\n for key, value in self.input_manager.inputs.items():\n if value[0]:\n value[1] += delta_time\n else:\n value[1] = value[1] - delta_time if value[1] - delta_time > 0 else 0\n # end if\n # end for",
"def to_kv(k: str, v: Union[int, float]) -> Tuple[str, float]:\n return (k, v**2)",
"def construct_kv_dict(self):\r\n key1 = user_state_key('field_a')\r\n key2 = user_state_key('field_b')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}",
"def getSkewXKeyTime(self, index, view) -> float:\n ...",
"def convert_timedelta(item):\r\n if isinstance(item, timedelta):\r\n seconds = int(item.total_seconds())\r\n hours, remainder = divmod(seconds, 3600)\r\n minutes, seconds = divmod(remainder, 60)\r\n formated = '{}h {}m {}s'.format(hours, minutes, seconds)\r\n else:\r\n raise TypeError(item, 'is not timedelta object')\r\n return formated",
"def put(self, key, item):\n if key is not None and item is not None:\n # modify the time and change the next newer value\n self.timesKey[key] = self.time\n self.time += 1\n\n # add the new item\n self.cache_data[key] = item\n\n if len(self.cache_data) > BaseCaching.MAX_ITEMS:\n discard_key = None\n newer = self.time - 2\n\n for _key, _value in self.timesKey.items():\n if newer == _value:\n discard_key = _key\n break\n\n # del key in time and cache data\n del self.cache_data[discard_key]\n del self.timesKey[discard_key]\n\n print(\"DISCARD: {}\".format(discard_key))",
"def _keygen(self, event, ts=None):\n return \"%s:%s\" % (self.namespace(ts or time.time()), event)",
"def format_time_sortkey(self, data):\n return self.input['start_time'].time().strftime('%H%M').lstrip('0')",
"def shift_time_points(self, offset):\n # Note that this is different from what we are doing in\n # shift_values_by_time in the helper class.\n self._time = [t + offset for t in self._time]\n self._time_idx_map = {t: idx for idx, t in enumerate(self._time)}"
] | [
"0.599946",
"0.57572216",
"0.5707284",
"0.56803143",
"0.5570548",
"0.5564531",
"0.54696536",
"0.5291604",
"0.5251823",
"0.52447885",
"0.5214924",
"0.51746917",
"0.51510656",
"0.51070833",
"0.5074654",
"0.5035714",
"0.5030335",
"0.50250804",
"0.5017685",
"0.5007017",
"0.4954305",
"0.49520516",
"0.49454772",
"0.49379694",
"0.49250436",
"0.49176255",
"0.49172315",
"0.4878772",
"0.48695335",
"0.48606303"
] | 0.7907506 | 0 |
Make item2kv from a function that produces key_params and val, and a key_template that will produce a string key from the key_params | def item_to_key_params_and_val(item_to_key_params_and_val, key_str_format):
def item2kv(item):
key_params, val = item_to_key_params_and_val(item)
if isinstance(key_params, dict):
return key_str_format.format(**key_params), val
else:
return key_str_format.format(*key_params), val
return item2kv | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def item_to_key(item2key):\n\n def item2kv(item):\n return item2key(item), item\n\n return item2kv",
"def to_kv(k: str, v: Union[int, float]) -> Tuple[str, float]:\n my_tuple = (k, v*v)\n\n return my_tuple",
"def to_kv(k: str, v: typing.Union[int, float]) -> typing.Tuple[str, float]:\n return k, v * v",
"def MakeKey(self, string, string_1, string_2):\n ...",
"def create_key(template, outtype=('nii.gz',), annotation_classes=None):\n\n if template is None or not template:\n raise ValueError('Template must be a valid format string')\n return template, outtype, annotation_classes",
"def create_key(template, outtype=('nii.gz',), annotation_classes=None):\n\n if template is None or not template:\n raise ValueError('Template must be a valid format string')\n return template, outtype, annotation_classes",
"def to_kv(k: str, v: Union[int, float]) -> Tuple[str, float]:\n return (k, v**2)",
"def _template_kwargs(*, logical_name: str, bucket: str, key: str) -> Dict[str, str]:\n if logical_name == \"ArtifactBuilder\":\n return dict(ArtifactBucketName=bucket, WorkersS3Key=key)\n elif logical_name == \"LayerBuilder\":\n return dict(ReplicationBucket=bucket, WorkersS3Key=key)\n else:\n raise ValueError(f\"Unknown logical name: {logical_name}\")",
"def create_key ():",
"def to_kv(k: str, v: Union[int, float]) -> Tuple[str, float]:\n return (k, pow(v, 2))",
"def createkey(*args): # {{{2\n return '-'.join(map(simplifyname, args))",
"def make_key(*values, **kwargs):\n if len(kwargs) == 0:\n key = tuple(v.key for v in values)\n else:\n res = [v.key for v in values]\n for k, v in sorted(kwargs.items()):\n if isinstance(v, (int, float, str)):\n res.append(k)\n res.append(v)\n else:\n raise TypeError(\n f\"Type {type(v)} is not yet supported, \"\n f\"v={v} and parameter {k!r}.\")\n key = tuple(res)\n return key",
"def construct_kv_dict(self):\r\n key1 = self.key_factory('existing_field')\r\n key2 = self.key_factory('other_existing_field')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}",
"def construct_kv_dict(self):\r\n key1 = user_state_key('field_a')\r\n key2 = user_state_key('field_b')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}",
"def strkey(item):\n return '%s:%s:%s' % (item['group_id'], item['artifact_id'], item['version'])",
"def _construct_key(previous_key, separator, new_key):\n if previous_key:\n return u\"{}{}{}\".format(previous_key, separator, new_key)\n else:\n return new_key",
"def conv_kv(val: ValidKVs) -> str:\n if isinstance(val, str): # Early out for speed\n return val\n elif val is True:\n return '1'\n elif val is False:\n return '0'\n elif isinstance(val, Matrix) or isinstance(val, FrozenMatrix):\n return str(val.to_angle())\n elif isinstance(val, float):\n return format_float(val)\n else:\n return str(val)",
"def makekey(function, *args, **kwargs) -> str:\n arguments = str((function.__name__, args, kwargs)).strip()\n arguments = arguments.translate(\n str.maketrans('', '', string.punctuation+string.whitespace)\n )\n key = codecs.encode(pickle.dumps(arguments, protocol=0), \"base64\").decode().strip()\n return key",
"def test_2():\n return KV.new(key=\"hey\", val=\"you\")",
"def make_key(*args, **kwargs) -> Hashable:\n if len(args) == 1 and isinstance(args[0], (int, str)):\n return args[0]\n if kwargs:\n args = sum(kwargs.items(), (*args, _KWD_MARK))\n return _HashedSeq(args)",
"async def build_key(self, attribute, value, record_id='*'):\n self.key = '{var1}:{var2}:{var3}:{var4}:{var5}'.format(var1=record_id, var2=self.industry, var3=self.merchant,\n var4=attribute, var5=value)",
"def substitute_params_keys(params, new_keys):\n for p in params:\n p['type'] = new_keys[p['type']]",
"def make_s3_keys(task, fmt):\n table_key = fmt.format(task.db_name, task.orm.__tablename__)\n version_key = fmt.format(task.db_name, \"__version__\")\n return table_key, version_key",
"def _kv_helper(cache, value):\n vals = [v.replace('\"','') for v in value.split(cache[\"delimiter\"])]\n if \"filtering\" not in cache or _filtering_passed_helper(cache[\"filtering\"], vals): #yield if filtering criteria met or no filtering criteria \n k = \"+\".join(vals) if cache[\"key_columns\"] == \"*\" else \"+\".join(vals[l] for l in cache[\"key_columns\"]) \n v = \",\".join(vals) if cache[\"target_columns\"] == \"*\" else \",\".join([vals[l] for l in cache[\"target_columns\"]])\n return k, v\n return None, None",
"def _make_key(args, kwds, typed,\r\n kwd_mark = (object(),),\r\n fasttypes = {int, str, frozenset, type(None)},\r\n tuple=tuple, type=type, len=len):\r\n # All of code below relies on kwds preserving the order input by the user.\r\n # Formerly, we sorted() the kwds before looping. The new way is *much*\r\n # faster; however, it means that f(x=1, y=2) will now be treated as a\r\n # distinct call from f(y=2, x=1) which will be cached separately.\r\n key = args\r\n if kwds:\r\n key += kwd_mark\r\n for item in kwds.items():\r\n key += item\r\n if typed:\r\n key += tuple(type(v) for v in args)\r\n if kwds:\r\n key += tuple(type(v) for v in kwds.values())\r\n elif len(key) == 1 and type(key[0]) in fasttypes:\r\n return key[0]\r\n return _HashedSeq(key)",
"def generate_key(name, func, *extra_keys, **options):\n\n return get_component(CachingPackage.COMPONENT_NAME).generate_key(name, func,\n *extra_keys, **options)",
"def key_handler(args):\n key = create_key(args.key_type, args.key_size, args.key_out)\n\n if not args.key_out:\n print(print_key(key))\n\n return key",
"def _make_key(self, extra_prefix, key):\n if extra_prefix:\n return \"-\".join((self.prefix, extra_prefix, key))\n else:\n return \"-\".join((self.prefix, key))",
"def _make_key(self, extra_prefix, key):\n if extra_prefix:\n return \"-\".join((self.prefix, extra_prefix, key))\n else:\n return \"-\".join((self.prefix, key))",
"def prepare_key (self, key, for_seq):\n r_key = \"%s:%d:%s\" % (self.classkey, for_seq, key)\n return r_key"
] | [
"0.6950581",
"0.60080147",
"0.59396696",
"0.5878557",
"0.58238006",
"0.58238006",
"0.57599187",
"0.5747728",
"0.57112104",
"0.5701399",
"0.56740856",
"0.5659779",
"0.56510776",
"0.5546873",
"0.54509264",
"0.53719",
"0.5371222",
"0.5350639",
"0.5309715",
"0.52902204",
"0.5276561",
"0.525983",
"0.52163917",
"0.5194463",
"0.5181573",
"0.5179495",
"0.5177659",
"0.5168562",
"0.5168562",
"0.51439494"
] | 0.77231133 | 0 |
the function print a data from dict 'new_dict' according to the number(picked_n) the user choosed. | def pickedFromDict(picked_num, new_dict):
#1-printing mariah's last name
#2-printing mariah's birth date
#3-printing mariah's hobbies
#4-printing mariah's last hobbie
#5-adds "coocking" to mariah's hobbies and printing mariah's updated hobbies
#6-printing mariah's birth date into tuple of 3 numbers
#7-printing the dict with a new key- 'age'
if picked_num == 1:
print(new_dict["last_name"])
elif picked_num == 2:
print(new_dict["birth_date"])
elif picked_num == 3:
print(len(new_dict["hobbies"]))
elif picked_num == 4:
print((new_dict["hobbies"][-1]))
elif picked_num == 5:
new_dict["hobbies"] = ("Sing", "Compose", "Act", "coocking")
print(new_dict["hobbies"])
elif picked_num == 6:
a = new_dict["birth_date"].split(".")
print(tuple(a))
elif picked_num == 7:
new_dict["age"] = "51"
print(new_dict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def make_five_gram_dict(n_gram_dict):\n n_gram = dict()\n\n for line_idx, line_val in enumerate(n_gram_dict):\n if n_gram.get(line_val[0], None) == None:\n n_gram[line_val[0]] = [line_val[1]+\" \"+line_val[2], int(line_val[3])]\n\n if DEBUGGING_MODE:\n print(\"\\n========== check your dict ===============\")\n print(\"N_gram: type-{}, len-{}\".format(type(n_gram), len(n_gram)))\n counting = 0\n for key, val in n_gram.items():\n if counting < 10:\n print(\"({}, {}), \".format(key, val), end=\" \")\n counting +=1 \n return n_gram",
"def generate_random_dict(self, n):\n dict_content = (int, str, list, dict)\n return {self.generate_random_string(10): self.generate_random_payload(dict_content)\n for i in range(n)}",
"def get_n_items(d,f,n):\n return dict(islice(d.items(),f, n))",
"def __init__(self, n):\n self._dictOut = {}\n self._dictIn = {}\n for i in range(n):\n self._dictOut[i] = []\n self._dictIn[i] = []",
"def printTopN(dict, N): \n sorted_list = sorted(dict, key=dict.get, reverse=True)[0:N]\n for key in sorted_list:\n print key, dict[key]",
"def __init__(self,n):\n\t\tself._dictOut={}\n\t\tself._dictIn = {}\n\t\tfor i in range(n):\n\t\t\tself._dictOut[i]=[]\n\t\t\tself._dictIn[i] = []",
"def nmer_dictionary(self,n,dic={}):\n if self.sequence == \"\":\n self.fetchSequence()\n self.sequence = self.sequence.upper()\n for i in range(0,len(self.sequence)-n):\n subseq = self.sequence[i:][:n]\n dic[subseq]=1+dic.get(subseq,0)\n del subseq\n return dic",
"def multivalue():\n d = {1: \"George\", \"Prince\", \"Male\", 2: \"Margaret\", \"Queen\", \"Lizard\"}\n print(d)",
"def printTopN(dict, N):\n sorted_list = sorted(dict, key=dict.get, reverse=True)[0:N]\n for key in sorted_list:\n print key",
"def print_dictionary(d, start_pos=0, end_pos=2):\n if type(d) is list: # end_pos will also act as limit for no. of results\n print(\"\\n\" + \"_\" * 37 + \"BEGIN\" + \"_\" * 37 + \"\\n\")\n for i in range(start_pos, end_pos + 1):\n if i == len(d):\n break\n if len(d) != 1: # Skip item number for single track dictionary\n print(\"Item no.: {}\".format(i + 1))\n for key, value in d[i].items():\n if type(value) is str and len(value) > 79:\n value = value[:40]\n value = value + '...'\n print(\"{0}: {1}\".format(key, value))\n print()\n\n inner_choice = input(\"Want more results? (y/n): \")\n if inner_choice.lower() in ['y', 'yes']:\n print_dictionary(d, start_pos=end_pos + 1, end_pos=end_pos + 5)\n\n if i == len(d):\n print(\"_\" * 38 + \"END\" + \"_\" * 38 + \"\\n\")\n return 1\n\n elif type(d) is dict:\n print()\n for key, value in d.items():\n if type(value) is str and len(value) > 79:\n value = value[:40]\n value = value + '...'\n print(\"{0}: {1}\".format(key, value))\n print()\n return 1",
"def show_proposal(self, req):\n z = 1\n y = z\n x = 1\n self.dico_product = {}\n for prod in req:\n if z <= 5:\n List_store = orm_imp.find_store(prod.id)\n print(\"Choix numéro\", z, \":\", prod.name, \"| score : \",\n prod.nutriscore, \"| Magasins : \", List_store,\n \"| Lien :\",\n prod.url, \"| \\n ==> description :\",\n prod.ingredient, \"\\n==================================\")\n self.dico_product.update({z: prod.id})\n x += 1\n z += 1\n return self.dico_product, y, x - 1",
"def __display(self) -> None:\n ligne = 0\n for key, value in self.values.items():\n self.my_data(self.master, key, value, ligne, 0, 1, 1, 2, 2)\n ligne += 2",
"def generate_text(book_dict, num):\n new_key = ''.join(sample(list(book_dict), 1))\n output_list = new_key.split(' ')\n while len(output_list) < num:\n if new_key in book_dict:\n output_list.append(''.join(sample(book_dict[new_key], 1)))\n new_key = output_list[-2] + ' ' + output_list[-1]\n else:\n new_key = ''.join(sample(list(book_dict), 1))\n\n return ' '.join(output_list[0:num])",
"def DictFunction():\r\n print \"{name} is from {city}, and he likes {cake} cake, {fruit} fruit, {salad} salad and {pasta} pasta\".format(**food_prefs)",
"def DictFunction3():\r\n print \"Create Third Dictionary\"\r\n Dictionary3 = {key:value.count(\"a\") for key, value in food_prefs.iteritems()}\r\n print Dictionary3",
"def print_statistics(fib_details: dict, nth_value: int):\n line = '\\n' + (\"---------------\" * 5)\n pass # TODO: Replace with implementation!",
"def sample_by_num(data_dict: dict, num: int):\n samples = {}\n for k, v in data_dict.items():\n if k == \"index\":\n samples[k] = v[0: num]\n else:\n samples[k] = v[0: int(data_dict[\"index\"][num])]\n return samples",
"def choix_repas(num_choix=4):\n choix = {1: pizza, 2: nuggets, 3: quiche, 4: courgettes}\n return choix.get(num_choix, 4)()",
"def populate(d, n, target=(0,0)):\n # Index of key to increment\n index = 0\n\n # Instantiate the largest value of the dictionary\n d[max(d.keys())] = math.floor(n / max(d.keys()))\n\n # Loop until n is obtained, or until an error is thrown\n while True:\n\n # We break the loop if the sum of all values in the dictionary is equal to n\n if dict_sum(d) >= n: \n break \n\n # We begin incrementation\n # If we run into IndexError, the value cannot be made equal to n and thus should be is dropped\n try: \n d = count_up(d, n, 0, target)\n except IndexError:\n break\n\n # If the sum is equal to n, return it, else return None\n if dict_sum(d) == n:\n return d \n\n else:\n return None",
"def print_dict(data):\n print data",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def picksomechoices(question, answer):\n \"\"\" because of the way dict() works all 4 choices will be unique \"\"\"\n choices = dict()\n choices[question] = answer\n for choice in random.sample(nlist, 10):\n choices[choice[0]] = choice[1]\n if len(choices.keys()) > 3:\n break\n\n return choices",
"def test_dict_same_occurrence_all_number(self):\n\n argument = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10, 11: 10, 12: 10, 13: 10}\n actual = file_io.top_ten(argument)\n expected = [[10, 13], [10, 12], [10, 11], [10, 10], [9, 9], [8, 8], [7, 7], [6, 6], [5, 5], [4, 4]]\n self.assertEqual(actual, expected)",
"def test_dict_size_ten_all_number(self):\n argument = {1: 1, 2: 2, 3: 3, 4: 4, 5: 5, 6: 6, 7: 7, 8: 8, 9: 9, 10: 10}\n actual = file_io.top_ten(argument)\n expected = [[10, 10], [9, 9], [8, 8], [7, 7], [6, 6], [5, 5], [4, 4], [3, 3], [2, 2], [1, 1]]\n self.assertEqual(actual, expected)",
"def main():\n pokedict = dict()\n inputn = int(input())\n for _ in range(inputn):\n temp = input().split(\" \")\n pokedict[temp[0]] = int(temp[1])\n check = input()\n if check.isnumeric():\n for i in pokedict:\n if pokedict[i] == int(check):\n print(i)\n else:\n print(pokedict[check])",
"def __init__(self,n):\n\t\tself._dict={}\n\t\tfor i in range(n):\n\t\t\tself._dict[i]=[]",
"def renum_branches_plus(self, level, n_num):\n # new dictionary\n d_new = dict()\n # to add above\n a = 0\n # find items that level with higher number\n mask = str(self.n_parent)\n for k, v in self.d.items():\n if len(str(k).split(\":\"))> level - 1:\n a = 1\n else:\n a = 0\n if a == 1:\n num = int(str(k).split(\":\")[level-1])\n if len(str(k).split(\":\")) >= level and num >= int(n_num) and str(k).startswith(mask):\n l_elem = str(k).split(\":\")\n num = int(l_elem[level - 1]) + 1\n\n # summon key\n s_first = \"\" # first part of string\n s_last = \"\" # last part of string\n for i in range(0, level - 1):\n s_first = s_first + l_elem[i]\n if not i == (level - 1):\n s_first = s_first + \":\"\n try:\n for j in range(level, len(l_elem)):\n s_last = s_last + l_elem[j]\n if not j == len(l_elem):\n s_last = s_last + \":\"\n except:\n pass\n\n # summon\n if s_last:\n s_summon = str(s_first) + str(num) + str(s_last)\n else:\n s_summon = str(s_first) + str(num)\n\n # write to dictionary\n d_new[s_summon] = v\n\n # delete item from self.d\n self.d.pop(k)\n continue\n else:\n d_new[k] = self.d[k]\n continue\n a = 0\n d_new[k] = self.d[k]\n # change dictionary\n self.d = d_new\n mask = str(self.n_parent) + \":\" + str(self.n)\n self.d[mask] = ''\n mask = str(self.n_parent) + \":\" + str(self.n+1)",
"def make_text(chains, n):\n\n first_key = random.choice(chains.keys(n))\n first_key_values = chains[first_key]\n third_word = random.choice(first_key_values)\n temp_list = [first_key[0], first_key[1], third_word]\n # for item in temp_list\n new_key = (first_key[1], third_word)\n\n while True:\n try:\n new_value = random.choice(chains[new_key])\n temp_list.append(new_value)\n new_first_word = new_key[1]\n new_key = (new_first_word, new_value)\n except KeyError:\n break\n\n text = \" \".join(temp_list)\n return text"
] | [
"0.62824196",
"0.6082546",
"0.5780949",
"0.5698525",
"0.5650637",
"0.5601323",
"0.55702174",
"0.5543086",
"0.5519851",
"0.5518044",
"0.5468941",
"0.54135096",
"0.53970253",
"0.53740084",
"0.53520656",
"0.5302337",
"0.52912074",
"0.52847147",
"0.5274072",
"0.52664304",
"0.5217955",
"0.5217955",
"0.5217955",
"0.5217955",
"0.52173764",
"0.5200279",
"0.5190378",
"0.51837385",
"0.5174922",
"0.51531965"
] | 0.74096864 | 0 |
Iterator for page links | def iter_page_links(self) -> Iterable[str]:
base_url = 'https://www.med.navy.mil'
r = requests.get(self.starting_url, verify=CERTIFICATE_DIR + '/cat3.pem')
soup = bs4.BeautifulSoup(r.content, features="html.parser")
# get target column of list items
issuance_list = soup.find('div', attrs={'class': 'noindex ms-wpContentDivSpace'})
matches = ["Publications", "BUMEDNotes", "BUMEDInstructions"]
# extract links
links = [link for link in issuance_list.find_all('a')]
for link in links[2:-1]:
if any(x in str(link) for x in matches):
if not link['href'].startswith('http'):
url = base_url + link['href']
else:
url = link['href']
yield url | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def iter_links(self):",
"def iter_page_links(self) -> Iterable[str]:\n base_url = 'https://health.mil/About-MHS/OASDHA/Defense-Health-Agency/Resources-and-Management/DHA-Publications'\n yield base_url",
"def links(cls, page):\r\n for match in cls.HREF_RE.finditer(page):\r\n yield cls.href_match_to_url(match)",
"def extract_linked_items(pages):\n for page in pages:\n for iterate in iterate_on_items(page):\n yield((iterate[1:])[:-1])",
"def _paginate(self) -> Iterable[List[str]]:\n req = self.html\n videos_lens = self._extractor(req)\n yield videos_lens # yielding doesn't mean that is the end\n\n # The above only returns 100 or fewer links\n # as Youtube loads 100 videos at a time\n # Simulating a browser request for the load more link\n load_more_url = self._find_load_more_url(req)\n\n while load_more_url: # there is an url found\n req = get(load_more_url)\n load_more = json.loads(req)\n try:\n html = load_more[\"content_html\"]\n except KeyError:\n return # if there is no content_html there is no chanch to find_load_more_url\n videos_lens = self._extractor(html)\n yield videos_lens\n\n load_more_url = self._find_load_more_url(\n load_more[\"load_more_widget_html\"],\n )\n\n return",
"def rel_links(cls, page):\r\n for match in cls.REL_RE.finditer(page):\r\n href, rel = match.group(0), match.group(1)\r\n if rel not in cls.REL_TYPES:\r\n continue\r\n href_match = cls.HREF_RE.search(href)\r\n if href_match:\r\n href = cls.href_match_to_url(href_match)\r\n parsed_href = urlparse(href)\r\n if any(parsed_href.path.endswith(ext) for ext in cls.REL_SKIP_EXTENSIONS):\r\n continue\r\n yield href",
"def page_iterator(url, save, fun):\n tekstowo_url = 'http://www.tekstowo.pl'\n while True:\n source = html_dl(url)\n tree = html.fromstring(source)\n links = tree.xpath(u\"//div[@class='content']//a[@class='title']\")\n for l in links:\n fun(tekstowo_url + l.attrib['href'], save)\n\n next_page = tree.xpath(u\"//a[@title='Następna >>']\")\n if not next_page:\n break\n url = tekstowo_url + next[0].attrib['href']",
"def _paginated_generator(self, request_args):\n while request_args:\n resp = self._api._session.request(**request_args)\n if not resp.ok:\n raise Basecamp3Error(response=resp)\n link_header = resp.headers.get(\"Link\")\n if link_header:\n next_page_url = self._LINK_HEADER_URL_REGEX.findall(link_header)[0]\n request_args = {'url': next_page_url, 'method': 'GET'} # get ready to call the next page\n else:\n request_args = None # clear it so we break the loop\n items_json = resp.json()\n for jdict in items_json:\n item = self.OBJECT_CLASS(jdict, self) # convert JSON dict into a BasecampObject\n yield item",
"def iter_pages(self):\n for num in range(1, self.pages + 1):\n yield Page(num)",
"def iter_links(self):\n for site in self.iter_sites():\n for u in range(self.dim):\n yield tuple(list(site) + [u])",
"def __iter__(self):\n while self.has_next_page():\n response = self.get_next_page_response()\n for item in self.get_items_from_response(response):\n yield item",
"def __iter__(self) -> Generator[WikiPage, None, None]:\n response = self.subreddit._reddit.get(\n API_PATH[\"wiki_pages\"].format(subreddit=self.subreddit),\n params={\"unique\": self.subreddit._reddit._next_unique},\n )\n for page_name in response[\"data\"]:\n yield WikiPage(self.subreddit._reddit, self.subreddit, page_name)",
"def getLinks(self):\n return self.pageLinks",
"def __iter__(self):\n yield from self.url.generator",
"async def _find_links(self, res: aiohttp.ClientResponse) -> Iterator[str]:\n\n content = await res.text()\n soup = BeautifulSoup(content, 'html.parser')\n links = [self._format(res.url, a) for a in soup.find_all('a')]\n return filter(lambda l: l is not None, links)",
"def test_tags_browse_pagination_page_links(self):\n\n po = self.catalog.load_pageobject('TagsBrowsePage')\n po.goto_page()\n pagenumbers = po.get_link_page_numbers()\n\n while len(pagenumbers) > 0:\n page = int(pagenumbers[0])\n starturl = po.current_url()\n\n # click the link to go to the next page\n po.goto_page_number(page)\n endurl = po.current_url()\n\n # make sure the page changed\n assert starturl != endurl, \\\n \"clicking the page link for page %s\" % (page) \\\n + \" did not change pages: starturl = %s,\" % (starturl) \\\n + \" endurl = %s\" % (endurl)\n\n\n # update the page numbers\n # generally only a few page numbers surrounding the\n # current page are shown. as we progress through the\n # pages, more page numbers become available.\n if len(pagenumbers) > 1:\n new_pagenumbers = po.get_link_page_numbers()\n assert len(new_pagenumbers) != 0, \\\n 'retrieving new page numbers failed while evaluating' \\\n + ' page #%s (%s)' % (page,endurl)\n pagenumbers = [int(i) \\\n for i in new_pagenumbers if int(i) > page]\n else:\n pagenumbers = []\n\n\n #FIXME: check the current page number matches page",
"def _extractLinks(self):\n # `lxml.html` object has a `.iterlinks` function which is crucial for this\n # task to be completed.\n if self.lxml is None:\n raise RuntimeError(\"Couldn't generate a etree object for the url %s\" % self.url)\n\n # stores the etree.html object generated by the lxml in the attribute\n for i in self.lxml.iterlinks():\n self.url_handler.handle(*i)",
"def next(self):\n cur_item = None\n while cur_item == None:\n try:\n cur_item = self.iterator.next()\n except StopIteration:\n if self.current_page < self.total_pages:\n self.__next_page()\n else: raise\n\n element = cur_item.find(self.GLOBAL_NP + 'link')\n\n return element.attrib['href']",
"def __iter__(self):\n return self.paged()",
"def getlinks(url):\n page = Linkfetcher(url)\n page.linkfetch()\n for i, url in enumerate(page):\n print(\"%d ==> %s\" % (i, url))",
"def parse(self, response):\n for href in response.xpath(\"//h2/a/@href\"):\n url = response.urljoin(href.extract())\n yield scrapy.Request(url, self.parse_post_content)\n\n # Check for a next page\n next_page_links = response.xpath(\"//a[@class='right']/@href\")\n if len(next_page_links) > 0:\n next_url = response.urljoin(next_page_links[0].extract())\n yield scrapy.Request(next_url, self.parse)",
"def linked_pages(corpus, page):\n pages = list()\n\n for link in corpus:\n if page in corpus[link]:\n pages.append(link)\n\n if not corpus[link]:\n pages.append(link)\n\n return pages",
"def __urlImageGenerator(cls, link):\n\n try:\n a = Article(url=link)\n a.download()\n a.parse()\n a.fetch_images()\n\n for img in a.imgs:\n yield img\n except Exception:\n pass",
"def get_all_links(page):\n\tlinks = []\n\twhile True:\n\t\turl, end_pos = get_next_target(page)\n\t\tif url:\n\t\t\tlinks.append(url)\n\t\t\tpage = page[end_pos:]\n\t\telse:\n\t\t\tbreak\n\treturn links",
"def get_page_links(num_pages,url):\n\n\n html = requests.get(url)\n page = bs(html.text, 'lxml')\n page_links = []\n for i in range(2, num_pages + 1):\n next_button = page.find(\"a\", {\"data-page-number\": str(i)})\n next_url = 'https://www.tripadvisor.com' + next_button['href']\n page_links.append(next_url)\n\n next_link = requests.get(next_url)\n page = bs(next_link.text, 'lxml')\n page_links.insert(0, url)\n return page_links",
"def __iterate_link(links_p, current_url, ignored_links, links_titles, extensions):\n folder_found = False\n # iterating inside each link found on a crawled url\n for link_element in links_p:\n if (link_element.string not in ignored_links and\n link_element.get('href') != '/') and\\\n link_element.string:\n if not current_url.endswith('/'):\n # if the link is a file\n if Crawler.__is_file(link_element.string.split('.')[-1], extensions):\n links_titles.append(\n (current_url + '/' + link_element.get('href').lstrip('/'),\n link_element.string, 'file'))\n else:\n folder_found = True\n links_titles.append(\n (current_url + '/' + link_element.get('href').lstrip('/'),\n link_element.string, 'folder'))\n else:\n if Crawler.__is_file(link_element.string.split('.')[-1], extensions):\n links_titles.append((current_url +\n link_element.get('href').lstrip('/'),\n link_element.string, 'file'))\n else:\n folder_found = True\n links_titles.append((current_url +\n link_element.get('href').lstrip('/'),\n link_element.string, 'folder'))\n\n return [folder_found, links_titles]",
"def parse_index(self, response):\n items = response.css('.item')\n for item in items:\n href = item.css('.top a::attr(href)').extract_first()\n detail_url = response.urljoin(href)\n logger.info('detail url %s', detail_url)\n yield PyppeteerRequest(detail_url, callback=self.parse_detail, wait_for='.item .name')\n \n # next page\n match = re.search(r'page/(\\d+)', response.url)\n if not match: return\n page = int(match.group(1)) + 1\n next_url = f'{self.base_url}/page/{page}'\n yield PyppeteerRequest(next_url, callback=self.parse_index, wait_for='.item .name')",
"def test_iter_version_links():\n HTML = dedent(\"\"\"\\\n <html>\n <body>\n <a/>\n <a href=\"../../packages/foo-1.0.tar.gz\"/>foo-1.0.tar.gz</a>\n <a href=\"../../packages/bar-1.0.tar.gz\"/>bar-1.0.tar.gz</a>\n <a href=\"http://foo.com/foo\" rel=\"download\"/>foo download link</a>\n <a href=\"http://foo.com/files/foo-0.1.0.zip\" rel=\"download\">0.1.0 download_url</a><br/>\n </body>\n </html>\"\"\")\n\n iter_ = iter_version_links(HTML, \"foo\")\n eq_(next(iter_), (\"foo-1.0.tar.gz\", \"../../packages/foo-1.0.tar.gz\"))\n eq_(next(iter_), \"http://foo.com/foo\")\n eq_(next(iter_), (\"foo-0.1.0.zip\", \"http://foo.com/files/foo-0.1.0.zip\"))\n\n with assert_raises(StopIteration):\n next(iter_)",
"def get_links(corpus, page):\n res = []\n for p in corpus:\n if page in corpus[p]:\n res.append(p)\n return res",
"def find_links(obj):\n if isinstance(obj, dict):\n for key, value in obj.iteritems():\n for url in find_links(value):\n yield url\n elif isinstance(obj, list):\n for item in obj:\n for url in find_links(item):\n yield url\n else:\n try:\n if is_link(str(obj)):\n yield obj\n except Exception:\n pass"
] | [
"0.80651504",
"0.750158",
"0.7370286",
"0.73280376",
"0.69186187",
"0.69141686",
"0.6908116",
"0.6793799",
"0.67863417",
"0.6741017",
"0.66748494",
"0.6670355",
"0.66565365",
"0.6593838",
"0.65873545",
"0.65807104",
"0.6502916",
"0.6478992",
"0.64505965",
"0.64059603",
"0.63940126",
"0.63902026",
"0.636362",
"0.6362722",
"0.63455135",
"0.6332261",
"0.6330052",
"0.63288105",
"0.63275266",
"0.6327345"
] | 0.75957644 | 1 |
Parse document objects from page of text | def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:
# parse html response
url = "https://www.med.navy.mil/directives/Pages/Publications.aspx"
base_url = 'https://www.med.navy.mil'
parsed_docs = []
doc_name_list = []
if (page_url.find("Publications") != -1):
doc_type = "NAVMED"
elif (page_url.find("BUMEDNotes") != -1):
doc_type = "BUMEDNOTE"
elif (page_url.find("BUMEDInstructions") != -1):
doc_type = "BUMEDINST"
cac_required = ['CAC', 'PKI certificate required', 'placeholder', 'FOUO']
page = requests.get(page_url, verify=CERTIFICATE_DIR + '/cat3.pem')
soup = bs4.BeautifulSoup(page.content, 'html.parser')
webpart = soup.find(id="onetidDoclibViewTbl0")
items = webpart.find_all('a')
meta = webpart.find_all(lambda tag: tag.name == 'td' and tag.get('class') == ['ms-vb2'])
meta_list = list(meta)
meta_list = [remove_html_tags(str(t)) for t in meta_list]
meta_list = [str(t).encode('ascii', 'ignore').decode('ascii') for t in meta_list]
meta_list = [x.replace("\r\n", " ") for x in meta_list]
if (doc_type == "NAVMED"):
n = 3
elif (doc_type == "BUMEDINST" or doc_type == "BUMEDNOTE"):
n = 4
meta_ = [meta_list[i:i + n] for i in range(0, len(meta_list), n)]
if (doc_type == "NAVMED"):
subject = webpart.find_all(lambda tag: tag.name == 'td' and tag.get('class') == ['ms-vb-title'])
name_list = list(subject)
name_list = [remove_html_tags(str(t)) for t in name_list]
name_list = [str(t).encode('ascii', 'ignore').decode('ascii') for t in name_list]
subnum = [str(t[1]).split()[:2] for t in meta_]
title_ = [str(t[1]).split()[2:] for t in meta_]
title = [' '.join(t) for t in title_]
title = [str(t).replace(',', '') for t in title]
date = [t[0] for t in meta_]
metadata = zip(subnum, title, date, name_list)
metadata = [list(a) for a in metadata]
elif (doc_type == "BUMEDINST"):
subject = webpart.find_all(lambda tag: tag.name == 'td' and tag.get('class') == ['ms-vb-title'])
name_list = list(subject)
name_list = [remove_html_tags(str(t)) for t in name_list]
name_list = [str(t).encode('ascii', 'ignore').decode('ascii') for t in name_list]
metadata = list(zip(name_list, meta_))
elif (doc_type == "BUMEDNOTE"):
metadata = meta_
item_list = list(items)
pdf_links = [link['href'] for link in item_list if link['href'].endswith(('pdf', 'html'))]
pdf_links = ["https://www.med.navy.mil" + a for a in pdf_links]
pdf_links = [str(a).replace(' ', '%20') for a in pdf_links]
if (doc_type == "BUMEDINST" or doc_type == "BUMEDNOTE"):
metadata = [list(ele) for ele in metadata]
for i in range(0, len(metadata)):
metadata[i].append(pdf_links[i])
for item in metadata:
if (doc_type == "NAVMED"):
pdf_di = DownloadableItem(doc_type='pdf', web_url=item[4])
if (str(item[3])[0].isdigit()):
doc_name = "NAVMED P-" + str(item[3]).split()[0]
doc_num = "P-" + str(item[3]).split()[0]
if (doc_name in doc_name_list):
number_of_times = sum(1 for s in doc_name_list if doc_name in s)
doc_name = doc_type + " " + doc_num + "-" + str(number_of_times)
doc_num = doc_num + "-" + str(number_of_times)
else:
doc_name = "NAVMED " + str(item[0][1]) + " " + ' '.join(str(item[3]).split()[:3])
doc_num == str(item[0][1]) + " " + ' '.join(str(item[3]).split()[:3])
if (doc_name in doc_name_list):
number_of_times = sum(1 for s in doc_name_list if doc_name in s)
doc_name = doc_type + " " + doc_num + "-" + str(number_of_times)
doc_num = doc_num + "-" + str(number_of_times)
doc_title = str(item[1])
publication_date = str(item[2])
cac_login_required = False
crawler_used = "navy_med_pubs"
source_page_url = page_url
downloadable_items = [pdf_di]
version_hash_fields = {
"item_currency": str(item[3]).split('/')[-1], # version metadata found on pdf links
"pub_date": publication_date.strip(),
"document_title": doc_title.strip(),
"document_number": doc_num.strip()
}
elif (doc_type == "BUMEDINST"):
pdf_di = DownloadableItem(doc_type='pdf', web_url=item[2])
doc_num = str(item[0]).split()[0]
doc_name = doc_type + " " + doc_num
doc_title = str(item[1][1])
publication_date = str(item[1][0])
if (str(item[2]).endswith('html')):
cac_login_required = True
elif (str(item[2]).endswith('pdf')):
cac_login_required = False
if (doc_name in doc_name_list):
number_of_times = sum(1 for s in doc_name_list if doc_name in s)
doc_name = doc_type + " " + doc_num + "-" + str(number_of_times)
doc_num = doc_num + "-" + str(number_of_times)
doc_name_list.append(doc_name)
elif (doc_type == "BUMEDNOTE"):
pdf_di = DownloadableItem(doc_type='pdf', web_url=item[4])
doc_num = str(item[0]).split()[1]
doc_name = doc_type + " " + doc_num
doc_title = str(item[2])
publication_date = str(item[1])
cac_login_required = False
if (doc_name in doc_name_list):
number_of_times = sum(1 for s in doc_name_list if doc_name in s)
doc_name = doc_type + " " + doc_num + "-" + str(number_of_times)
doc_num = doc_num + "-" + str(number_of_times)
doc_name_list.append(doc_name)
version_hash_fields = {
"doc_name": doc_name, # version metadata found on pdf links
"pub_date": publication_date.strip(),
"document_title": doc_title.strip(),
"document_number": doc_num.strip()
}
version_hash_raw_data = version_hash_fields
doc = Document(
doc_name=re.sub(',', '', doc_name.strip()),
doc_title=re.sub('\\"', '', doc_title),
doc_num=re.sub(',', '', doc_num.strip()),
doc_type=re.sub(',', '', doc_type.strip()),
publication_date=publication_date,
cac_login_required=cac_login_required,
crawler_used="navy_med_pubs",
source_page_url=page_url.strip(),
version_hash_raw_data=version_hash_fields,
downloadable_items=[pdf_di]
)
parsed_docs.append(doc)
return parsed_docs | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_docs_from_page(self, page_url: str, page_text: str) -> Iterable[Document]:\n # get the data\n data = requests.get(page_url)\n\n # load data into bs4\n soup = BeautifulSoup(data.text, 'html.parser')\n # links = []\n pdf_dis = []\n dates = []\n table = []\n version_hash_fields = []\n\n for tr in soup.find_all('tr'):\n date_col = soup.find_all('td', attrs={'class': 'fd-col2'})\n hyperlink_col = soup.find_all('td', attrs={'class': 'fd-col1'})\n values = [td.text for td in tr.find_all('td')]\n table.append(values)\n for link in hyperlink_col:\n pdf_url = 'https://www.health.mil/' + link.find('a')['href']\n pdf_di = DownloadableItem(doc_type='pdf',\n web_url=pdf_url)\n pdf_dis.append(pdf_di)\n for date in date_col:\n dates.append(date.text)\n\n doc_nums = []\n doc_titles = []\n doc_names = []\n for row in table[1:]:\n doc_data = row[0].split(':')\n\n if len(doc_data) == 1: # if no colon then no doc number\n if doc_data[0] == \"(DTM)-19-004 -Military Service by Transgender Persons and Persons with Gender Dysphoria (Change 1)\":\n doc_nums.append(\"19-004\")\n doc_names.append(\"DTM\")\n doc_titles.append(doc_data[0][14:])\n version_hash_fields.append({\"doc_name\": 'DTM', \"doc_title\": doc_data[0][14:]})\n else:\n doc_nums.append(\" \")\n doc_titles.append(doc_data[0])\n doc_names.append(doc_data[0])\n version_hash_fields.append({\"doc_name\": doc_data[0], \"doc_title\": doc_data[0]})\n else:\n\n tmptitle = doc_data[1][1:].replace(\"\\u201cClinical\",\"Clinical\").replace(\"System,\\u201d\",\"System\").replace(\"BUILDER\\u2122 \", \"Builder\").replace(\"\\u2013\",\"\")\n\n if \"Volume\" in tmptitle:\n doc_nums.append(doc_data[0][7:]+\" Volume \"+tmptitle.split()[-1])\n else:\n doc_nums.append(doc_data[0][7:])\n doc_titles.append(doc_data[1][1:].replace(\"\\u201cClinical\",\"Clinical\").replace(\"System,\\u201d\",\"System\").replace(\"BUILDER\\u2122 \", \"Builder\").replace(\"\\u2013\",\"\"))\n doc_names.append(doc_data[0][:6])\n\n version_hash_fields.append({\"doc_name\": doc_data[0][:7], \"doc_title\": doc_data[1]})\n\n parsed_docs = []\n page_url = 'https://www.health.mil/About-MHS/OASDHA/Defense-Health-Agency/Resources-and-Management/DHA-Publications'\n num_docs = len(doc_nums)\n for i in range(num_docs):\n # put all the relevant info into dictionaries\n doc = Document(doc_type=doc_names[i].replace(\" \",\"-\"),\n doc_title=doc_titles[i],\n doc_num=doc_nums[i],\n doc_name=doc_names[i].replace(\" \",\"-\")+\" \"+doc_nums[i],\n publication_date=dates[i],\n cac_login_required=False,\n crawler_used='dha_pubs',\n source_page_url=page_url,\n downloadable_items=[pdf_dis[i]],\n version_hash_raw_data=version_hash_fields[i])\n parsed_docs.append(doc)\n\n return parsed_docs",
"def parse_text(self, page):\n text = page.find(self.tag_prefix + self.revision_tag).find(self.tag_prefix + self.text_tag).text\n title = page.find(self.tag_prefix + self.title_tag).text\n categories = []\n #\n text = self.parse_archivo(text)\n text = self.parse_foto(text)\n text = self.parse_by_line(text)\n text = self.parse_link(text)\n text = self.parse_url(text)\n text = self.parse_fecha(text)\n text = self.parse_bracketed_word(text)\n #\n if text:\n categories = re.findall(self.category_finder_regex, text)\n #\n text = self.parse_category(text)\n text = self.parse_other_language(text)\n text = self.parse_table_regex(text)\n text = self.parse_ver_fuente(text)\n text = self.remove_extra_text(text)\n text = self.remove_extra_characters(text)\n\n categorias = []\n for cat in categories:\n categorias.append(cat[6])\n\n if text:\n if 'REDIRECT' in text or 'redirect' in text:\n return None\n\n return Article(title=title, content=text, categories=categorias)",
"def parse_document(self, response):\n document = response.meta['document']\n document['title'] = ' '.join(response.css('p.s32B251D').css(\n 'span.s7D2086B4 ::text').extract())\n paragraphs = []\n for paragraph in response.css('p'):\n spans = [span for span in paragraph.css('span ::text').extract()\n if span != u'\\xa0' and span != '']\n if len(spans):\n paragraphs.append(u' '.join(spans))\n document['sentences'] = paragraphs\n yield document",
"def parse(self, source):\n command = 'pandoc ' + source + ' -t json'\n proc = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n res = proc.communicate()\n if res[1]:\n print('PROCESS FAILED. SEE BELOW:')\n print(str(res[1]))\n return None # sending stderr output to user\n else:\n document = json.loads(res[0])\n self.document_parse(document)\n return self.tables, self.text",
"def parse(text):\n md = markdown.Markdown(['codehilite', 'tables', ])\n\n for iref in re.findall(img_ref_re, text):\n img_id = iref[7]\n try:\n image = FlatPageImage.objects.get(pk=int(img_id))\n md.references[img_id] = (image.image_path.url, '')\n except ObjectDoesNotExist:\n pass\n\n for lref in re.findall(reference_re, text):\n doc_name = lref[7]\n try:\n doc = File.objects.get(name=doc_name)\n md.references[doc_name]= (doc.url, doc.name)\n except ObjectDoesNotExist:\n pass\n\n return md.convert(text)",
"def olive_parser(text: str) -> dict:\n soup = BeautifulSoup(text, \"lxml\")\n root = soup.find(\"xmd-entity\")\n page_no = root['page_no']\n identifier = root['id']\n language = root['language']\n title = soup.meta['name']\n entity_type = root['entity_type']\n issue_date = soup.meta['issue_date']\n\n out = {\n \"meta\": {\n \"language\": None,\n \"type\": {}\n },\n \"r\": [],\n \"stats\": {},\n \"legacy\": {\"continuation_from\": None, \"continuation_to\": None},\n }\n out[\"meta\"][\"title\"] = title\n out[\"meta\"][\"page_no\"] = [int(page_no)]\n out[\"meta\"][\"language\"] = normalize_language(language)\n out[\"meta\"][\"type\"][\"raw\"] = entity_type\n out[\"meta\"][\"issue_date\"] = issue_date\n\n new_region = {\n \"c\": [],\n \"p\": []\n }\n\n new_paragraph = {\n \"l\": []\n }\n\n new_line = {\n \"c\": [],\n \"t\": []\n }\n\n new_token = {\n \"c\": [],\n \"tx\": \"\"\n }\n\n for primitive in soup.find_all(\"primitive\"):\n\n # store coordinate of text areas (boxes) by page\n # 1) page number, 2) coordinate list\n region = copy.deepcopy(new_region)\n region[\"c\"] = [int(i) for i in primitive.get('box').split(\" \")]\n\n para = None\n line = None\n line_counter = 0\n\n for tag in primitive.find_all(recursive=False):\n\n if tag.name == \"l\":\n\n if para is None and line is None:\n para = copy.deepcopy(new_paragraph)\n line = copy.deepcopy(new_line)\n\n if line_counter > 0 and line is not None:\n line = normalize_line(line, out[\"meta\"][\"language\"])\n para[\"l\"].append(line)\n\n if tag.get(\"p\") in [\"S\", \"SA\"] and line_counter > 0:\n region[\"p\"].append(para)\n para = copy.deepcopy(new_paragraph)\n\n line = copy.deepcopy(new_line)\n line[\"c\"] = [\n int(i)\n for i in tag.get('box').split(\" \")\n ]\n line_counter += 1\n\n if tag.name in [\"w\", \"q\"]:\n\n # store coordinates of each token\n # 1) token, 2) page number, 3) coordinate list\n t = copy.deepcopy(new_token)\n t[\"c\"] = [int(i) for i in tag.get('box').split(\" \")]\n t[\"tx\"] = tag.string\n t[\"s\"] = int(tag.get('style_ref'))\n\n if tag.name == \"q\" and tag.get('qid') is not None:\n qid = tag.get('qid')\n normalized_form = soup.find('qw', qid=qid).text\n t[\"nf\"] = normalized_form\n t[\"qid\"] = qid\n\n # append the token to the line\n line[\"t\"].append(t)\n\n # append orphan lines\n if line is not None:\n line = normalize_line(line, out[\"meta\"][\"language\"])\n para[\"l\"].append(line)\n\n region[\"p\"].append(para)\n\n if para is not None:\n out[\"r\"].append(region)\n\n out[\"legacy\"][\"id\"] = identifier\n out[\"legacy\"][\"source\"] = soup.link['source']\n \"\"\"\n # I suspect this could be deleted\n out[\"legacy\"][\"word_count\"] = int(soup.meta['wordcnt'])\n out[\"legacy\"][\"chars_count\"] = int(soup.meta['total_chars_count'])\n suspicious_chars_count = int(soup.meta['suspicious_chars_count'])\n out[\"legacy\"][\"suspicious_chars_count\"] = int(suspicious_chars_count)\n \"\"\"\n out[\"legacy\"][\"first_id\"] = soup.link['first_id']\n out[\"legacy\"][\"last_id\"] = soup.link['last_id']\n out[\"legacy\"][\"next_id\"] = soup.link['next_id']\n out[\"legacy\"][\"prev_id\"] = soup.link['prev_id']\n\n if root.has_attr('continuation_from'):\n out[\"legacy\"][\"continuation_from\"] = root['continuation_from']\n\n if root.has_attr('continuation_to'):\n out[\"legacy\"][\"continuation_to\"] = root['continuation_to']\n\n return out",
"def parse(self, text):\n parse_results = self._parse(text)\n if self.resolve_corefs:\n self._resolve_corefs(parse_results)\n return parse_results",
"def scrape(data):\n result = {}\n xml_str = scraperwiki.pdftoxml(data)\n root = xml.etree.ElementTree.fromstring(xml_str)\n page_id = 0\n for page in root:\n page_id += 1\n for text in page.iter(tag=\"text\"):\n if text.get(\"font\") == \"3\":\n text_id = (page_id, text.get(\"top\"))\n row = result.get(text_id, \"\")\n if row and len(row) < 60:\n row = row + \" \" * (60 - len(row))\n result[text_id] = row + text.text\n return result",
"def parse_text(self, text: str) -> SectionDict:",
"def parse(doc):\n\n i, comment = _parse_comment(0, doc)\n i, guard = _parse_prefix(i, doc)\n i, body = _parse_body(i, doc)\n i = _parse_suffix(i, doc)\n i = parse_blanks(i, doc)\n\n if i < len(doc):\n raise ParseFailure(i, \"Unexpected content after end of document\")\n\n return Document(comment, guard, body)",
"def parse(self, text, showToc=True):\n return self.parser.parse(text, showToc, attributes=ALLOWED_ATTRIBUTES)",
"def document_parse(self, document):\n if type(document) == dict:\n self.list_parse(document['blocks'])\n elif type(document) == list:\n self.list_parse(document[1])\n else:\n print('Incompatible Pandoc version. Process failed.')",
"def read(self, content: str):\n documents = []\n # 1. Split the text in documents using string '-DOCSTART- -X- O O' and loop over it\n content = content.split('-DOCSTART- -X- O O')\n for doc in content:\n if doc != '':\n words = []\n sentences = []\n labels = []\n start = 0\n # 2. Split lines and loop over\n str_sentences = doc.split('\\n\\n')\n # 3. Make vectors of tokens and labels (colunn 4) and at the '\\n\\n' make a sentence\n for sentence in str_sentences:\n if sentence != '':\n tokens = sentence.split('\\n')\n for token in tokens:\n if ' ' in token :\n cols = token.split(' ')\n words.append(cols[0])\n labels.append(cols[1])\n sentences.append(Sentence(doc, start, start+len(tokens)))\n start += len(tokens)\n # 4. Create a Document object\n documents.append(Document.create_from_vectors(words, sentences, labels))\n\n return documents",
"def parse(self, text):\n assert isinstance(text, str)\n if text.strip() == '':\n return [], []\n\n output = self._annotate(text, properties={\n \"annotators\": \"tokenize,ssplit,pos\",\n \"coref.md.type\": \"dep\",\n \"coref.mode\": \"statistical\"\n })\n\n words = []\n postags = []\n\n for sentence in output['sentences']:\n for token in sentence['tokens']:\n word = token['word']\n pos = token['pos']\n word = re.sub(r'\\s', '', word)\n words.append(word)\n postags.append(pos)\n return words, postags",
"def parse(text, showToc=True):\n p = Parser(show_toc=showToc)\n return p.parse(text)",
"def split(text):\n articles = re.split(\"<doc>\", text)\n del articles[0]\n return articles",
"def parseDocumentsForW2ui(response, obj_type):\n records = []\n #create a list of dicts\n for record in response[\"data\"]:\n records.append(record.to_mongo())\n return parseDocObjectsToStrings(records, obj_type)",
"def parse(self, text, start=None):\n return self.parser.parse(text, start=start)",
"def _parse_documents(self, item):\n documents = []\n agenda_url = item.css('a[href*=Agenda]::attr(href)').extract_first()\n if agenda_url:\n documents.append({'url': agenda_url, 'note': 'Agenda'})\n minutes_url = item.css('a[href*=Minutes]::attr(href)').extract_first()\n if minutes_url:\n documents.append({'url': minutes_url, 'note': 'Minutes'})\n video_url = item.css('td[headers~=VideoLink] a::attr(onclick)').extract_first()\n video_url_match = re.search(r'http.*(?=\\',\\'p)', video_url or '')\n if video_url and video_url_match:\n documents.append({'url': video_url_match.group(), 'note': 'Video'})\n return documents",
"def parse(text):\n ret = Docstring()\n if not text:\n return ret\n\n # Clean according to PEP-0257\n text = inspect.cleandoc(text)\n\n # Find first title and split on its position\n match = _titles_re.search(text)\n if match:\n desc_chunk = text[: match.start()]\n meta_chunk = text[match.start():]\n else:\n desc_chunk = text\n meta_chunk = \"\"\n\n # Break description into short and long parts\n parts = desc_chunk.split(\"\\n\", 1)\n ret.short_description = parts[0] or None\n if len(parts) > 1:\n long_desc_chunk = parts[1] or \"\"\n ret.blank_after_short_description = long_desc_chunk.startswith(\"\\n\")\n ret.blank_after_long_description = long_desc_chunk.endswith(\"\\n\\n\")\n ret.long_description = long_desc_chunk.strip() or None\n\n # Split by sections determined by titles\n matches = list(_titles_re.finditer(meta_chunk))\n if not matches:\n return ret\n splits = []\n for j in range(len(matches) - 1):\n splits.append((matches[j].end(), matches[j + 1].start()))\n splits.append((matches[-1].end(), len(meta_chunk)))\n\n chunks = {}\n for j, (start, end) in enumerate(splits):\n title = matches[j].group(1)\n if title not in _valid:\n continue\n chunks[title] = meta_chunk[start:end].strip(\"\\n\")\n if not chunks:\n return ret\n\n # Add elements from each chunk\n for title, chunk in chunks.items():\n # Determine indent\n indent_match = re.search(r\"^\\s+\", chunk)\n if not indent_match:\n raise ParseError('Can\\'t infer indent from \"{}\"'.format(chunk))\n indent = indent_match.group()\n\n # Check for returns/yeilds (only one element)\n if _sections[title] in (\"returns\", \"yields\"):\n part = inspect.cleandoc(chunk)\n ret.meta.append(_build_meta(part, title))\n continue\n\n # Split based on lines which have exactly that indent\n _re = \"^\" + indent + r\"(?=\\S)\"\n c_matches = list(re.finditer(_re, chunk, flags=re.M))\n if not c_matches:\n raise ParseError('No specification for \"{}\": \"{}\"'.format(title, chunk))\n c_splits = []\n for j in range(len(c_matches) - 1):\n c_splits.append((c_matches[j].end(), c_matches[j + 1].start()))\n c_splits.append((c_matches[-1].end(), len(chunk)))\n for j, (start, end) in enumerate(c_splits):\n part = chunk[start:end].strip(\"\\n\")\n ret.meta.append(_build_meta(part, title))\n\n return ret",
"def _parse(self):\n tree = etree.parse(self.filename)\n root_node = tree.getroot()\n\n text_node = root_node.find(\"TEXT\")\n\n # Get text\n self.text = self._extract_text(text_node)\n\n # Get and create Event objects\n self.events = self._get_and_create_event_objects(text_node, root_node)\n\n # Get and create Timex objects\n self.timex = self._get_and_create_timex_objects(text_node, root_node)\n\n # Create Relation objects and link them\n self.relations = self._get_and_create_relation_objects(root_node)\n\n # Build text structure. Must be called last.\n self.text_structure = self._get_and_build_text_structure()",
"def parse(self, doc):\n self.preprocessor.preprocess(doc)\n\n for extractor in self.extractors:\n extractor.extract(doc)\n\n return doc",
"def parseDocument(self, lines):\r\n # Create a ElementTree from the lines\r\n self.root = util.etree.Element(self.markdown.doc_tag)\r\n self.parseChunk(self.root, '\\n'.join(lines))\r\n return util.etree.ElementTree(self.root)",
"def extract_paragraph(file_name, url_text = None, show_property = False, database = None, extract_all_property=False, \n return_documenTM = False, cut_off = True, unit_dict = None, special_unit_dictionary = None):\n if not url_text:\n url_text = file_name\n \n if not database: \n database = {}\n \n if not isinstance(unit_dict, dict):\n unit_dict = unit_dict_default\n \n keyword_dict = make_keyword_dict(unit_dict)\n \n Q = DocumentTM(file_name, **database)\n Q.doc()\n Q.find_strange()\n chemical_type_dict = {}\n database = Q.database()\n \n if special_unit_dictionary:\n Q.set_special_unit(special_unit_dictionary)\n \n \n data_collection = []\n json_list = []\n \n for Para in Q.Para:\n new_split, unit = Q.tokenize_paragraph(Para, lemma = False, Strange = True, cut_off=cut_off)\n \n if not new_split:\n continue\n \n #print (new_split)\n \n before_represent_chem = False\n \n for sent in cut_paragraph(new_split):\n new_sent, unit_dictionary, next_represent_chem = matching_algorithm(sent, database, chemical_type_dict, before_represent_chem)\n\n if extract_all_property:\n #iters = chain.from_iterable(unit_dictionary.values())\n iters = chain.from_iterable([dics.values() for dics in unit_dictionary.values()])\n else:\n iters = unit_dictionary['Character'].values()\n \n \n #print (unit_dictionary['Character'])\n #if unit_dictionary['Character'] or unit_dictionary['Reaction']:\n #data_collection.append([sent, unit_dictionary])\n \n if show_property and (unit_dictionary['Character'] or unit_dictionary['Reaction']):\n \n print (\"\\n\\n------------------------------------\")\n print (file_name)\n print (\" \".join([str(t) for t in new_sent]))\n print (\"\\n\")\n #print (Para)\n #print (\" \".join(new_split))\n print (\"------------------------------------\")\n \n for T in chain.from_iterable(iters):\n #for T in t:\n dictionary_chemical = {'Material':T.target, 'Value':T.value, 'Unit':T.unit, 'Condition':T.condition, 'Property':T.prop,\n 'Reference':str(file_name)}\n \n json_list.append(dictionary_chemical)\n\n if show_property:\n print (\"value:\", T, \"condition:\", T.condition, \"chemical:\", T.target)\n \n if isinstance(next_represent_chem, Chemical) or not next_represent_chem:\n before_represent_chem = next_represent_chem \n \n if return_documenTM:\n return json_list, Q\n \n return json_list",
"def __parse_document(self, results):\n fullname = self.__extract_fullname(results[0])\n if not results[1].startswith(\"-\"):\n raise ValueError(\"Invalid second line of output: '%s'. \"\\\n \"Expected a title underline.\"\n % text[1])\n results = results[2:] # trim off top two lines of header information\n maintests, cleanup = self.__split_on_cleanup(results)\n overall_success = not (maintests[0] == FAILURE_MARKER)\n\n if overall_success:\n testcases = self.__parse_success(fullname, maintests)\n else:\n testcases = self.__parse_failures(fullname, maintests)\n\n return testcases",
"def parse_document(file):\n lines = file.read_text(encoding='utf-8').split('\\n')\n # If the \"#\" character is present, it means the line contains the\n # document original link. So, if the # is not present,\n # we have a normal paragraph to append to the list.\n return [line for line in lines if line != '' and '#' not in line]",
"def __init__(self, page): \n self.page_num = page.id[page.id.rfind(\"_\") + 1:]\n self.offset = page.offset\n text = page.text\n text = text.replace(b'\\r', b'')\n text = text.replace(b'\\n', b'') # Cleaning up the contents of the bytes \n self.text = text.decode(\"ascii\", \"ignore\") # Converting from bytes to string\n self.names = [] \n for nameString in page.names: \n name = Name(nameString) \n self.names.append(name)",
"def generate_paragraphs(self):\n def dig(hr_tag, end_index):\n paragraphs = []\n for tag in hr_tag.children:\n if tag.name == 'hr':\n return paragraphs + dig(tag, end_index)\n text = (str(tag)\n if isinstance(tag, NavigableString)\n else tag.get_text())\n if '$' in text and not tag.find('table'):\n start_index = document_txt.index(text[:search_chars])\n end_index = start_index + len(text)\n paragraphs.append({\n 'text': text,\n 'start': start_index,\n 'end': end_index\n })\n return paragraphs\n\n with open('document.txt', 'rb') as f1:\n document_txt = f1.read().decode()\n search_chars = 20\n paragraphs = dig(self.soup.find('body'), 0)\n paragraphs = sorted(paragraphs, key=lambda x: x['start'])\n with open('paragraphs.txt', 'wb') as f2:\n f2.write(json.dumps(paragraphs, indent=2, sort_keys=True).encode())",
"def HTMLparser(self):\n soup = self.getHTML()\n \n # Sort through all the text in the html:\n for text in soup.find_all('p'):\n try:\n paragraphNo = int(text.parent.p['id'][14:])\n \n # Only grab paragraphs in \"On the Social Contract\"\n if paragraphNo < self.START_PARAGRAPH or paragraphNo > self.END_PARAGRAPH:\n continue\n \n elif text.string:\n \n # Ignore those \"paragraphs\" in the html that simply outline different chapters/books\n if re.search('^(CHAPTER|BOOK)(.*):', text.string):\n continue\n \n else:\n \n # Want to read in the document by sentence (for RousseauBot to use individually later on)\n tempList = re.split('(?<!etc)\\.\\s(?!.*\\\")|\\!', text.string)\n for sentence in tempList:\n \n # When a \"paragraph\" is just a single sentence, re's .split() returns the sentence and a ''\n # Also, remove overly long quotes - Twitter has char limit\n if sentence != '' and len(sentence.strip()) < self.TWITTER_LIMIT:\n self.quotes.append(sentence.strip())\n \n except KeyError:\n \n # BS throws KeyError when <p>'s id field is blank; ignore - all paragraphs I need has an id\n continue",
"def read(cls, text):\n\n\t\treturn cls._parse(cls._tokenize(text))"
] | [
"0.71913856",
"0.69207364",
"0.6807848",
"0.6564104",
"0.6440653",
"0.6434585",
"0.6419085",
"0.6406201",
"0.63876253",
"0.6367041",
"0.6246994",
"0.62138414",
"0.6203199",
"0.6178067",
"0.6176524",
"0.61475307",
"0.6122435",
"0.6108919",
"0.60970265",
"0.6094376",
"0.6085055",
"0.60446584",
"0.6037327",
"0.6010703",
"0.59780526",
"0.59694475",
"0.59677464",
"0.5946251",
"0.5940067",
"0.5930401"
] | 0.6943239 | 1 |
Provide an API call to Pushover for mobile notifications of events in the script. "message" is a string that will display on the Pushover notification. "app_token" is a string for the app token provided by Pushover. | def pushover(message, app_token):
import urllib, httplib
conn = httplib.HTTPSConnection("api.pushover.net:443")
conn.request("POST", "/1/messages.json",
urllib.urlencode({
"token": app_token,
"user": "uU95W9hYqeW3b24uyPaT1skT1SG35N",
"message": message,
}), { "Content-type": "application/x-www-form-urlencoded" })
conn.getresponse() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tell_sophie(message):\n \n d = {'token': cf.get('pushover', 'apikey'),\n 'user': cf.get('pushover', 'userkey'),\n 'message': message }\n requests.post('https://api.pushover.net/1/messages.json', json=d)",
"def send_push(message: Dict[Any, Any]) -> None:\n notification_url = settings.NOTIFICATION_URL\n if notification_url:\n data = {\"message\": message}\n response = requests.post(notification_url, json=data)\n response.raise_for_status()",
"def _send(self, message):\n\n kwargs = message.get()\n kwargs['token'] = self.token\n\n assert 'message' in kwargs\n assert self.token is not None\n\n if not 'user' in kwargs:\n if self.user is not None:\n kwargs['user'] = self.user_token\n if self.user_device is not None:\n kwargs['device'] = self.user_device\n else:\n kwargs['user'] = os.environ['PUSHOVER_USER']\n\n data = urlencode(kwargs)\n conn = HTTPSConnection(Pushover.PUSHOVER_SERVER)\n conn.request(\"POST\", Pushover.PUSHOVER_ENDPOINT, data, Pushover.PUSHOVER_CONTENT_TYPE)\n output = conn.getresponse().read().decode('utf-8')\n data = json.loads(output)\n\n if data['status'] != 1:\n raise PushoverError(output)\n else:\n return True",
"def sendNotification(token, title, message, extraData=None, channelID=None):\n url = 'https://exp.host/--/api/v2/push/send'\n\n headers = {\n \"Content-Type\": \"application/json\"\n }\n\n data = {\n \"to\": token,\n \"title\": title,\n \"body\": message\n }\n\n # Verify we have Additional data to append\n if extraData is not None:\n data[\"data\"] = extraData\n\n # Android Only! Verify if we have a channel ID and append it\n if channelID is not None:\n data[\"channelId\"] = channelID\n\n res = requests.post(url, data=json.dumps(data), headers=headers)\n return res.status_code",
"def on_push(self, payload):\n pass",
"def send_message(token, data):\n\n sender = requests.post(\"https://graph.facebook.com/v2.6/me/messages\", params={\"access_token\": token},\n data=data,\n headers={'Content-type': 'application/json'})\n if sender.status_code != requests.codes.ok:\n print(sender.text)",
"def on_message(data):\n pass",
"def send_notification (event):\n Publisher.sendMessage (event)",
"def handle_app_message(self, message):\n to_print = f'\\nMessage recieved from: {message.get_sender()}...\\n'\n to_print += message.data\n to_print += '\\nStar-node command:'\n print(to_print)\n self._log.write_to_log(\n \"Message\", f'Message received from {message.get_sender()} ')",
"def NotifyPushEvent(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def send_push_nachricht(message: str, pushsafer_code: str, title: str = \"Termin Verfuegbar!\"):\n\n # all\n device = \"a\"\n # Alarm\n icon = 2\n # Buzzer\n sound = 8\n # 3mal\n vibration = 3\n # nicht automatisch loeschen\n ttl = 0\n # Hoechste\n priority = 2\n # nach 60 erneut senden bis gesehen oder expire in sec\n retry = 60\n # stoppen erneutes zustellen in sec\n expire = 60\n # nicht antworten koennen\n answer = 0\n\n url = \"\"\n url_title = \"\"\n image1 = \"\"\n image2 = \"\"\n image3 = \"\"\n\n pushsafer.init(pushsafer_code)\n pushsafer.Client().send_message(message, title, device, icon, sound, vibration, url, url_title, ttl, priority, retry, expire, answer, image1, image2, image3)",
"def send(self, message):\n if type(message) is PushoverMessage:\n return self._send(message)\n else:\n raise PushoverError(\"Wrong type passed to Pushover.send()!\")",
"def test_get_apns_message_with_succes(self, mock_client): \n\n event = {\n 'operation': 'getMessage', \n 'arguments': {\n 'template': 'my-sample-geofence-id',\n }\n }\n\n response = {\n \"PushNotificationTemplateResponse\": {\n 'APNS': {\n 'Action': 'OPEN_APP',\n 'Title': 'Sample Title',\n 'Body': 'This is a sample body'\n }\n }\n }\n\n mock_client().get_push_template.return_value = response\n response = manageMessages.handler(event, None)\n\n self.assertTrue(response)\n self.assertEqual(response['status'], 'MESSAGE_OK')\n self.assertEqual(response['message']['service'], 'APNS')",
"def notify_slack_message(token, channel, message):\n client = WebClient(token=token)\n try:\n response = client.chat_postMessage(\n channel=channel,\n text=message)\n except SlackApiError as e:\n print(f\"Got an error: {e.response['error']}\")",
"def onMessage(self, message):\n raise NotImplementedError",
"def trigger_pushcut(notification: str, data=None) -> int:\n headers = {\"API-Key\": os.environ[\"PUSHCUT_API_TOKEN\"]}\n url = f\"https://api.pushcut.io/v1/notifications/{urllib.parse.quote(notification)}\"\n return requests.post(url, json=data, headers=headers).status_code",
"def on_message(self, ws, message):\n message = json.loads(message)\n if message['type'] == 'error':\n self.on_error(None, message['message'])\n elif message['type'] == 'subscriptions':\n print(\"Subscribed to {}\".format(', '.join([ channel['name'] for channel in message['channels'] ])))\n else:\n if ((message['type']=='ticker' and message['product_id'] in self._ticker) or \n (message['type'] in [\"snapshot\", \"l2update\"] and message['product_id'] in self._level2) or \n (message['type'] in [\"received\",\"open\",\"done\",\"match\",\"change\",\"activate\"] )):\n self.messages.append(message)\n elif message['type']=='heartbeat':\n self.updated_time = time.time()",
"def push_t(bot, message, message_uuid, chat_id):\n\n username = get_user_info(chat_id)['PID'][3:-4].title()\n message = \"Hey {0}!\\n{1}\".format(username, message)\n try:\n response = bot.sendMessage(chat_id=chat_id, text=message, parse_mode='markdown')\n push_message_record = PushNotification(message_uuid=message_uuid, chatID=chat_id, message_id=response.message_id, sent=True)\n list_of_objs.append(push_message_record)\n except Exception as e:\n push_message_record = PushNotification(message_uuid=message_uuid, chatID=chat_id, failure_reason=str(e))\n list_of_objs.append(push_message_record)\n inactive_users.append(chat_id)",
"def msg(self, message):\n\n message = PushoverMessage(message)\n self.messages.append(message)\n return message",
"def get_app_message(self):\n return self.messages[\"app\"].get()",
"def on_event():\n\n event = request.get_json()\n \n token_status, token_text = validate_token()\n\n if token_status != 0:\n return json.jsonify({'text': token_text})\n\n if event['type'] == 'ADDED_TO_SPACE' and event['space']['type'] == 'ROOM':\n text = 'Thanks for adding me to \"%s\"! For help type @bot help' % event['space']['displayName']\n \n elif event['type'] == 'MESSAGE':\n\n room_name = event['space']['name'].split('/')[1]\n commands = ['list', 'add', 'remove', 'help']\n\n try:\n param = event['message']['text'].split()[1:][0]\n except:\n text = _help()\n return json.jsonify({'text': text})\n\n if param in commands:\n\n if param == 'list':\n text = _list(room_name)\n\n elif param == 'add':\n text = _add(event, room_name)\n\n elif param == 'remove':\n text = _remove(event, room_name)\n\n elif param == 'help':\n text = _help()\n return json.jsonify({'text': text})\n \n else:\n text = send_msg(event, room_name)\n\n else:\n return\n \n return json.jsonify({'text': text})",
"def send_slack(self, message):\n self.slack_client.api_call('chat.postMessage', channel=self.slack_channel, text=message, username=self.username, icon_emoji=self.slack_icon_emoji)\n print(\"Slack Notification sent\")",
"async def chat_message(self, event):\n message = event['message']\n await self.send_json({\n 'message': message\n })",
"def send_notification(self, settings, message, image):\n\t\tserver_url = self._get_server_url(settings)\n\t\tif not server_url or not server_url.strip():\n\t\t\t# No APNS server has been defined so do nothing\n\t\t\tself._logger.debug(\"CustomNotifications - No APNS server has been defined so do nothing\")\n\t\t\treturn False\n\n\t\ttokens = settings.get([\"tokens\"])\n\t\tif len(tokens) == 0:\n\t\t\t# No iOS devices were registered so skip notification\n\t\t\tself._logger.debug(\"CustomNotifications - No iOS devices were registered so skip notification\")\n\t\t\treturn False\n\n\t\t# For each registered token we will send a push notification\n\t\t# We do it individually since 'printerID' is included so that\n\t\t# iOS app can properly render local notification with\n\t\t# proper printer name\n\t\tused_tokens = []\n\t\tlast_result = None\n\t\tfor token in tokens:\n\t\t\tapns_token = token[\"apnsToken\"]\n\n\t\t\t# Ignore tokens that already received the notification\n\t\t\t# This is the case when the same OctoPrint instance is added twice\n\t\t\t# on the iOS app. Usually one for local address and one for public address\n\t\t\tif apns_token in used_tokens:\n\t\t\t\tcontinue\n\t\t\t# Keep track of tokens that received a notification\n\t\t\tused_tokens.append(apns_token)\n\n\t\t\tif 'printerName' in token and token[\"printerName\"] is not None:\n\t\t\t\t# We can send non-silent notifications (the new way) so notifications are rendered even if user\n\t\t\t\t# killed the app\n\t\t\t\tprinter_name = token[\"printerName\"]\n\t\t\t\turl = server_url + '/v1/push_printer'\n\n\t\t\t\treturn self._alerts.send_alert(settings, apns_token, url, printer_name, message, None, image) < 300",
"def on_message(self, data):\n req = json.loads(data)\n self.serve(req)",
"def on_message(self, data):\n req = json.loads(data)\n self.serve(req)",
"def send_slack_notification(url: str, title: str, message: str):\n\n content = {\n \"text\": f\"{title}\",\n \"blocks\": [\n {\n \"type\": \"section\",\n \"text\": {\n \"type\": \"mrkdwn\",\n \"text\": f\"{message}\",\n },\n }\n ],\n }\n\n response = requests.post(url, json=content)\n\n # Raise exception if response is not 200\n response.raise_for_status()",
"def async_pubnub_message(\n self, device_id: str, date_time: datetime, message: dict[str, Any]\n ) -> None:\n device = self.get_device_detail(device_id)\n activities = activities_from_pubnub_message(device, date_time, message)\n activity_stream = self.activity_stream\n assert activity_stream is not None\n if activities:\n activity_stream.async_process_newer_device_activities(activities)\n self.async_signal_device_id_update(device.device_id)\n activity_stream.async_schedule_house_id_refresh(device.house_id)",
"def slack(message):\n slack_hook = 'https://hooks.slack.com/services/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'\n headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n requests.post(slack_hook, json.dumps({'text': message}), headers=headers)",
"def push(self, payload, device_tokens=None, aliases=None, tags=None, apids=None):\n if device_tokens:\n payload['device_tokens'] = device_tokens\n if aliases:\n payload['aliases'] = aliases\n if tags:\n payload['tags'] = tags\n if apids:\n payload['apids'] = apids\n body = json.dumps(payload)\n status, response = self._request('POST', body, PUSH_URL,\n 'application/json')\n if not status == 200:\n raise AirshipFailure(status, response)"
] | [
"0.6193292",
"0.6061524",
"0.5765204",
"0.5674728",
"0.54582673",
"0.5439559",
"0.5439116",
"0.5438104",
"0.54364663",
"0.5409798",
"0.5393484",
"0.53734607",
"0.5351594",
"0.5273738",
"0.5259695",
"0.5203758",
"0.51804733",
"0.51779073",
"0.51729697",
"0.51518786",
"0.50869054",
"0.5061803",
"0.50514156",
"0.5045199",
"0.5040529",
"0.5040529",
"0.50337094",
"0.50207925",
"0.5019973",
"0.5014173"
] | 0.79063237 | 0 |
Install our virtual environment; removing the old one if it exists | def install_environment(root):
sys.stdout.write('Installing virtualenv into %s \n' % root)
try:
import virtualenv
except ImportError:
sys.stdout.write('Installing virtualenv into global interpreter \n')
subprocess.call([VE_GLOBAL_SCRIPT, PROJECT_ROOT])
import virtualenv
if path.exists(root):
shutil.rmtree(root)
virtualenv.logger = virtualenv.Logger(consumers=[])
virtualenv.create_environment(root, site_packages=False)
ret_code = subprocess.call([VE_SCRIPT, PROJECT_ROOT, root])
sys.exit(ret_code) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def sync_virtualenv(ctx):\n if not path.isfile(\"./pyenv/bin/pip\"):\n ctx.run(\"virtualenv --no-site-packages --python=/usr/bin/python2.7 pyenv\")\n ctx.run(\"PIP_DOWNLOAD_CACHE=/var/tmp/ ./pyenv/bin/pip install -r requirements.txt\")\n print(\"\"\"\n Installation completed. Please check any error messages above.\n\n If you are going to use `openstack` or ansible directly on the command line, run\n\n . ./pyenv/bin/activate\n\n or even add it to your ~/.bashrc\n \"\"\")",
"def install_virtualenv():\n from .project import sudo_project, virtualenv_path\n\n with sudo():\n virtualenv.install()\n\n with sudo_project():\n virtualenv.create(virtualenv_path())",
"def _setup_virtualenv():\n if files.exists(_interpolate(VIRTUALENV_DIR)):\n print _interpolate('virtualenv %(virtualenv)s already exists')\n else:\n with prefix(_virtualenvwrapper_prefix()):\n run(_interpolate('mkvirtualenv --no-site-packages %(virtualenv)s'))\n with hide('commands'):\n print 'virtualenv %s created with python %s\\n' % (env.project['virtualenv'], run(GET_PYTHON_VERSION))",
"def install(name):\n base = '/home/{}/venvs/{}/base.txt'.format(env.user, name)\n prod = '/home/{}/venvs/{}/prod.txt'.format(env.user, name)\n\n # Upload requirements file.\n put(utils.file_path('requirements', 'base.txt'), base)\n put(utils.file_path('requirements', 'prod.txt'), prod)\n\n # Activate the virtual environment.\n with prefix('source /home/{}/venvs/{}/bin/activate'.format(env.user, name)):\n run('pip install -r {}'.format(prod))",
"def install_or_switch_to_virtualenv(options):\n if options.install_ve:\n install_environment(VE_ROOT)\n elif path.exists(VE_ROOT):\n go_to_ve()\n else:\n sys.stdout.write('No virtualenv detected, please run ./launch.py --install_ve \\n')\n sys.exit(1)",
"def venv():\n path = '/srv/addok/venv/'\n if not exists(path):\n with sudo(user='addok'):\n run(f'python3 -m venv {path}')\n pip('install pip -U')",
"def _setup_venv(self):\n python.setup_virtualenv(\n self.venv_path, sudo_user=self.user, python_version=3)\n packages = [\n \"future\", \"lxml\", \"ipaddress\", \"sqlalchemy < 2.0\", \"python-memcached\",\n \"python-dateutil\", \"configparser\"\n ]\n if self.dbengine == \"postgres\":\n packages.append(\"psycopg2-binary\")\n else:\n packages.append(\"mysqlclient\")\n python.install_packages(packages, self.venv_path, sudo_user=self.user)\n target = \"{}/master.zip\".format(self.home_dir)\n if os.path.exists(target):\n os.unlink(target)\n utils.exec_cmd(\n \"wget https://github.com/sys4/automx/archive/master.zip\",\n sudo_user=self.user, cwd=self.home_dir)\n self.repo_dir = \"{}/automx-master\".format(self.home_dir)\n if os.path.exists(self.repo_dir):\n shutil.rmtree(self.repo_dir)\n utils.exec_cmd(\n \"unzip master.zip\", sudo_user=self.user, cwd=self.home_dir)\n utils.exec_cmd(\n \"{} setup.py install\".format(\n python.get_path(\"python\", self.venv_path)),\n cwd=self.repo_dir)",
"def venv(session):\n # Install dependencies.\n session.install(\"--upgrade\", \"pip\", \"setuptools\")\n session.install(\"-r\", \"requirements-dev.txt\")\n session.install(\"-e\", \".\")\n\n # Customize the venv.\n env_dir = Path(session.bin)\n activate = env_dir / 'activate'\n with activate.open('a') as f:\n f.write(f'\\n[ -f {activate.resolve()}/postactivate ] && . {activate.resolve()}/postactivate\\n')\n\n {{ cookiecutter.project_name }}_complete = nox_file / 'contrib/{{ cookiecutter.project_name }}-complete.sh'\n postactivate = env_dir / 'postactivate'\n with postactivate.open('a') as f:\n f.write('export PYTHONBREAKPOINT=bpdb.set_trace\\n')\n f.write(f'source { {{ cookiecutter.project_name }}_complete.resolve() }\\n')\n\n predeactivate = env_dir / 'predeactivate'\n with predeactivate.open('a') as f:\n f.write('unset PYTHONBREAKPOINT\\n')",
"def setup_virtual_env(self):\n\n venv(\"{0}_env\".format(self.app_name), self.install_django_project)",
"def test_in_virtualenv(self):\n new_executor = self.executor.in_virtualenv('/appenv')\n output, _err = new_executor.pip.install('a-local-package').batch()\n self.assertEqual(output, 'a-local-package installed')\n new_executor_one = self.executor.patch_env(PATH='/bin')\n new_executor_two = new_executor_one.in_virtualenv('/appenv')\n output, _err = new_executor_two.pip.install('a-local-package').batch()\n self.assertEqual(output, 'a-local-package installed')",
"def sub_install_virtualenv():\n sudo('pip install virtualenv') # Need sudo b/c installing to system Python",
"def rebuild():\n try:\n cmd = 'rm -rf %s' % VENV\n if VENVWRAPPER:\n cmd = 'rmvirtualenv %s' % VENV\n _do_virtualenvwrapper_command(cmd)\n except Exception as e:\n print(unicode(e))\n\n cmd = 'virtualenv --no-site-packages -p /usr/bin/python{major}.{minor} {v}'\\\n .format(\n major=sys.version_info[0],\n minor=sys.version_info[1],\n v=VENV,\n )\n if VENVWRAPPER:\n cmd = 'mkvirtualenv --no-site-packages -p /usr/bin/python{major}.{minor} {v}'\\\n .format(\n major=sys.version_info[0],\n minor=sys.version_info[1],\n v=VENV,\n )\n _do_virtualenvwrapper_command(cmd)\n\n # Do two things here:\n # - remove all *.pyc that exist in srcdir.\n # - remove all data/templates dirs that exist (mako caches).\n for base, dirs, files in os.walk(os.getcwd()):\n for fname in files:\n if fname.endswith(\".pyc\"):\n os.remove(os.path.sep.join([base, fname]))\n\n if base.endswith('data/templates'):\n shutil.rmtree(base)",
"def create_virtualenv():\n\n require('virtualenv_root', provided_by=env.environments)\n args = '--clear --distribute --no-site-packages'\n sudo('virtualenv %s %s' % (args, env.virtualenv_root),\n user=env.deploy_user)",
"def setup_virtualenv():\n run('virtualenv -p %(python)s --no-site-packages %(env_path)s;' % env)\n run('source %(env_path)s/bin/activate; easy_install -U setuptools; easy_install pip;' % env)",
"def setup():\r\n global venvs\r\n\r\n try:\r\n os.mkdir(basedir)\r\n except OSError, e:\r\n if e.errno != errno.EEXIST:\r\n raise\r\n os.chdir(basedir)\r\n\r\n # Delete virtualenvs and recreate\r\n for venv in glob('venv-*'):\r\n shutil.rmtree(venv)\r\n for py in available_python_versions():\r\n check_call(['virtualenv', '-p', py,\r\n '--system-site-packages', 'venv-%s' % py])\r\n venvs.append((py, 'venv-%s' % py))\r\n\r\n # Check out and update the repository\r\n if not os.path.exists('Theano'):\r\n try:\r\n check_call(['git', 'clone', ipy_repository])\r\n except CalledProcessError:\r\n check_call(['git', 'clone', ipy_http_repository])\r\n os.chdir(repodir)\r\n check_call(['git', 'checkout', 'master'])\r\n try:\r\n check_call(['git', 'pull', ipy_repository, 'master'])\r\n except CalledProcessError:\r\n check_call(['git', 'pull', ipy_http_repository, 'master'])\r\n os.chdir(basedir)",
"def update(self):\n with settings(user=self.serviceUser):\n self.venv.create()\n\n self.venv.install_twisted()\n self.venv.install(\" \".join(\"\"\"\n psycopg2==2.7.5\n pygments==2.2.0\n spambayes==1.1b3\n trac==1.2.2\n trac-github==2.3\n requests_oauthlib==1.0.0\n svn+https://svn.edgewall.org/repos/trac/plugins/1.2/spam-filter@15310\n git+https://github.com/twisted-infra/twisted-trac-plugins.git\n \"\"\".split()))\n\n # This is txacme v2 but is not yet released.\n # Should be replaced on we have txacme v2.\n # See https://github.com/twisted/txacme/pull/158\n self.venv.install(\n \"--index=https://pypi.chevah.com/simple txacme==1.0.0.chevah4\")\n\n run('mkdir -p ' + self.configDir)\n put(os.path.dirname(__file__) + '/*', self.configDir,\n mirror_local_mode=True)",
"def YumInstall(vm):\n _Install(vm)",
"def YumInstall(vm):\n _Install(vm)",
"def create_venv(venv_directory):\n if os.path.exists(venv_directory) and not confirm(\n f\"{venv_directory} existiert bereits. Überschreiben? [J/n] \"\n ):\n return\n print(\"Erstelle virtuelle Umgebung...\", end=\"\", flush=True)\n _run(f\"python3 -m venv {venv_directory}\")\n _run(f\"{venv_directory}/bin/pip install -qU pip\")\n print(\"OK.\")",
"def setup():\n global venvs\n \n try:\n os.mkdir(basedir)\n except OSError as e:\n if e.errno != errno.EEXIST:\n raise\n os.chdir(basedir)\n \n # Delete virtualenvs and recreate\n for venv in glob('venv-*'):\n shutil.rmtree(venv)\n for py in available_python_versions():\n check_call(['virtualenv', '-p', py, '--system-site-packages', 'venv-%s' % py])\n venvs.append((py, 'venv-%s' % py))\n \n # Check out and update the repository\n if not os.path.exists('ipython'):\n try :\n check_call(['git', 'clone', ipy_repository])\n except CalledProcessError :\n check_call(['git', 'clone', ipy_http_repository])\n os.chdir(repodir)\n check_call(['git', 'checkout', 'master'])\n try :\n check_call(['git', 'pull', ipy_repository, 'master'])\n except CalledProcessError :\n check_call(['git', 'pull', ipy_http_repository, 'master'])\n os.chdir(basedir)",
"def install_deps():\n pipenv_dev = run('pipenv install --dev'.split(), check=True)\n print('Installed dependencies and virtual environment. Type `pipenv shell` to activate later.')",
"def install():\n deploy()\n configure()",
"def setUp(self):\n self.tempdir = tempfile.TemporaryDirectory()\n self.tempdir_path = pathlib.Path(self.tempdir.name)\n self.python = self.tempdir_path / \"bin\" / \"python\"\n venv.create(\n env_dir=self.tempdir_path, system_site_packages=False, with_pip=True\n )\n\n # Ensure the virtual environment has a recent version of pip which\n # has support for PEP 517.\n checked_subprocess_run(f\"{self.python} -m pip install --upgrade pip\")",
"def pip_install():\n _require_environment()\n remote(PIP_INSTALL_PREFIX)",
"def process_develop_setup():\n if 'develop' in sys.argv and os.path.exists('build'):\n # Remove `build` directory created by a regular installation\n shutil.rmtree('build')\n elif 'develop' not in sys.argv and os.path.exists('gfootball_engine'):\n # If `pip install .` is called after development mode,\n # remove the 'fonts' directory copied by a `develop` setup\n copied_fonts = 'third_party/gfootball_engine/fonts'\n if os.path.exists(copied_fonts):\n shutil.rmtree(copied_fonts)\n # Remove .so files (.pyd on Windows)\n for empty_lib in glob.glob(\"brainball_cpp_engine*\"):\n os.remove(empty_lib)\n # Finally, remove symlink to the gfootball_engine directory\n if not os.path.exists('gfootball_engine'):\n return\n if os.path.islink('gfootball_engine'):\n if platform.system() == 'Windows':\n os.remove('gfootball_engine')\n else:\n os.unlink('gfootball_engine')\n else:\n shutil.rmtree('gfootball_engine')",
"def update_dependencies():\n pip = env.virtualenv.child('bin', 'pip')\n reqs = env.code_dir.child('deploy-requirements.txt')\n sudo('%s -q install -U pip' % pip)\n sudo('%s -q install -r %s' % (pip, reqs))",
"def install():\n remote_egg_path = os.path.join(remote_egg_dir, get_egg_name())\n sudo('easy_install -U %s' % remote_egg_path)\n sudo('rm %s' % remote_egg_path)",
"def install_requirements():\r\n if env.hosts:\r\n run ('cd %(path)s %(command_join)s env/bin/pip install -r current-release/requirements.txt' % env)\r\n else:\r\n local('%spip install -r requirements.txt' % virtualenv_bin, capture=False)",
"def fail_local_install(environment):\n environment.add_cleanup(\n environment.cfy.local.execute,\n args=['uninstall'],\n )\n result = environment.cfy.local.execute('install')\n assert result['returncode'] != 0, (\n 'Install workflow succeeded, but should have failed!'\n )\n\n environment.install_result = result",
"def create_virtualenv(virtualenv_path):\n\n run('virtualenv %s --no-site-packages' % virtualenv_path)"
] | [
"0.70328605",
"0.66779727",
"0.66118217",
"0.656353",
"0.65248436",
"0.65089977",
"0.6502636",
"0.6473916",
"0.6418527",
"0.6363957",
"0.63546306",
"0.63459617",
"0.63441616",
"0.62826",
"0.62428427",
"0.6232576",
"0.6230184",
"0.6230184",
"0.62095016",
"0.62049896",
"0.6196062",
"0.6151372",
"0.61335176",
"0.6126647",
"0.61067104",
"0.61001617",
"0.6086509",
"0.60763055",
"0.60689014",
"0.60451174"
] | 0.692725 | 1 |
Decorator validates if the app parameter is registered in the process_context | def valid_process_name(function):
def _wrapper(options, *args, **kwargs):
from synergy.conf.process_context import ProcessContext
if options.app not in ProcessContext.CONTEXT:
msg = 'Aborting: application <%r> defined by --app option is unknown. \n' % options.app
sys.stdout.write(msg)
raise ValueError(msg)
return function(options, *args, **kwargs)
return _wrapper | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def require_context(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n if not is_admin_context(args[0]) and not is_user_context(args[0]):\n raise exception.NotAuthorized()\n return f(*args, **kwargs)\n return wrapper",
"def __call__(self, app: App, **kwargs: Any) -> bool:\n ...",
"def require_admin_context(f):\n @wraps(f)\n def wrapper(*args, **kwargs):\n if not is_admin_context(args[0]):\n raise exception.AdminRequired()\n return f(*args, **kwargs)\n return wrapper",
"def require_context(f):\n\n def wrapper(*args, **kwargs):\n if not is_admin_context(args[0]) and not is_user_context(args[0]):\n raise exception.NotAuthorized()\n return f(*args, **kwargs)\n return wrapper",
"def require_context(f):\n\n def wrapper(*args, **kwargs):\n if not is_admin_context(args[0]) and not is_user_context(args[0]):\n raise exception.NotAuthorized()\n return f(*args, **kwargs)\n return wrapper",
"def require_context(f):\n\n def wrapper(*args, **kwargs):\n if not _is_admin_context(args[0]) and not _is_user_context(args[0]):\n raise exceptions.NotAuthorized()\n return f(*args, **kwargs)\n return wrapper",
"def require_context(f):\n\n def wrapper(*args, **kwargs):\n if not _is_admin_context(args[0]) and not _is_user_context(args[0]):\n raise exceptions.NotAuthorized()\n return f(*args, **kwargs)\n return wrapper",
"def test_process_invalid1(self):\n self.skill.logic = {}\n self.skill.valid.app_id = '12345'\n @self.skill.launch\n def sample_func():\n \"\"\"Decorated function.\"\"\"\n pass\n self.skill.logic['LaunchRequest']()\n self.assertFalse(self.skill.process(data.SAMPLE_LAUNCH_REQUEST))",
"def require_admin_context(f):\n\n def wrapper(*args, **kwargs):\n if not is_admin_context(args[0]):\n raise exception.AdminRequired()\n return f(*args, **kwargs)\n return wrapper",
"def require_admin_context(f):\n\n def wrapper(*args, **kwargs):\n if not is_admin_context(args[0]):\n raise exception.AdminRequired()\n return f(*args, **kwargs)\n return wrapper",
"def wrapped_function(self, *args, **kwargs):\n if self.program is None:\n raise self.api_error(\n status_code=status.HTTP_404_NOT_FOUND,\n developer_message='no program exists with given key',\n error_code='program_does_not_exist'\n )\n return view_func(self, *args, **kwargs)",
"def require_admin_context(f):\n\n def wrapper(*args, **kwargs):\n if not _is_admin_context(args[0]):\n raise exceptions.AdminRequired()\n return f(*args, **kwargs)\n return wrapper",
"def require_admin_context(f):\n\n def wrapper(*args, **kwargs):\n if not _is_admin_context(args[0]):\n raise exceptions.AdminRequired()\n return f(*args, **kwargs)\n return wrapper",
"def _check_study_app_request(context):\n # NOTE: This assumes 'scopes' was overwritten by get_context_data.\n scopes = [x[0] for x in context['scopes']]\n\n try:\n scopes.remove('read')\n scopes.remove('write')\n except ValueError:\n return False\n\n if len(scopes) != 1:\n return False\n\n app_label = re.sub('-', '_', scopes[0])\n app = apps.get_app_config(app_label)\n\n if app and app.verbose_name == context['application'].name:\n return app_label\n\n return False",
"def with_valid_game(f):\n @functools.wraps(f)\n def work(*args, **kwargs):\n try:\n game = current_app.game\n except RuntimeError:\n abort(403)\n else:\n return f(*args, **kwargs, game=game)\n return work",
"def __call__(self, *args, **kwargs):\n with app.app_context(): # pragma: no cover\n return self.run(*args, **kwargs)",
"def should_register(self, app: FlaskUnchained) -> bool:\n if self.only_if in {None, _missing}:\n return True\n elif callable(self.only_if):\n return self.only_if(app)\n return bool(self.only_if)",
"def has_request_context():\n from .application import Nereid\n\n return base_has_request_context() and \\\n isinstance(current_app._get_current_object(), Nereid)",
"async def validate(self, ctx: Context, argument: str) -> bool:\n return True",
"def require_context(fn):\n @functools.wraps(fn)\n def _require_cuda_context(*args, **kws):\n with _runtime.ensure_context():\n return fn(*args, **kws)\n\n return _require_cuda_context",
"def check(fn):\n @wraps(fn)\n def wrapper(*args, **kwargs):\n verify_jwt_in_request()\n token = get_jwt_identity()\n if argument.lower() == 'machine':\n if token['is_user_token'] is False:\n from api.services.data_source_token import \\\n DataSourceTokenService\n _token_usage_counter_add(token['data_source_token']['id'])\n if DataSourceTokenService.check_if_token_is_active(\n DataSourceTokenService,\n token['data_source_token']['id']) is False:\n return ErrorObject.create_response(\n ErrorObject, HTTPStatus.FORBIDDEN,\n 'Token has been revoked')\n else:\n return fn(*args, **kwargs)\n else:\n return ErrorObject.create_response(\n ErrorObject, HTTPStatus.FORBIDDEN,\n 'Unable to access this resource with provided token')\n elif argument.lower() == 'user':\n if token['is_user_token'] is False:\n _token_usage_counter_add(token['data_source_token']['id'])\n return ErrorObject.create_response(\n ErrorObject, HTTPStatus.FORBIDDEN,\n 'Unable to access this resource with provided token')\n else:\n return fn(*args, **kwargs)\n else:\n raise ValueError('Unsupported argument provided')\n\n return wrapper",
"def register(app, fn):\n\n @functools.wraps(fn)\n def config_route(**kwargs):\n \"\"\"\n :param kwargs: str, id of existing entry\n :return: dict or exception\n \"\"\"\n\n return fn(app.config, **kwargs)\n\n app.route(*fn.route_args, **fn.route_kwargs)(config_route)",
"def test_apply_validation(self, has_validation):\n called_with = None\n def validator(*args, **kwargs):\n nonlocal called_with\n called_with = CallArguments(*args, **kwargs)\n\n ctx, name, value = object(), 'myparam', object()\n\n fparam = FParameter(\n POSITIONAL_ONLY,\n name=name,\n validator=validator if has_validation else None,\n )\n fparam.apply_validation(ctx, value)\n if has_validation:\n assert called_with.args == (ctx, name, value)\n else:\n assert called_with is None",
"def apply(self, app, route):\r\n @wraps(app, assigned=())\r\n def wrapped_app(*args, **kwargs):\r\n with environment_as(KRB5_KTNAME=self._keytab):\r\n return self.authorize(request, app, args, kwargs)\r\n return wrapped_app",
"def __call__(self, app: App, **kwargs: Any) -> None:\n ...",
"def validate_request(f):\n\n @wraps(f)\n def wrap(self, **kwargs):\n\n data = {}\n is_error, errmsg, req = DomainConstraintView._get_req_data(kwargs)\n if is_error:\n return errmsg\n\n try:\n for key in req:\n if key == 'convalidated':\n data[key] = True if (req[key] == 'true' or req[key] is\n True) else False\n else:\n data[key] = req[key]\n\n except Exception as e:\n return internal_server_error(errormsg=str(e))\n\n self.request = data\n return f(self, **kwargs)\n\n return wrap",
"def api_request_globals(f):\n @wraps(f)\n def inner(*args, **kwargs):\n request.is_api_request = True\n return f(*args, **kwargs)\n return inner",
"def http_var_required(parameter_name):\n def wrap(func):\n def decorator(request, *args, **kwargs):\n if not (parameter_name in request.POST or parameter_name in request.GET):\n return HttpResponseBadRequest('Please define GET or POST parameter '+parameter_name)\n return func(request, *args, **kwargs)\n return decorator\n return wrap",
"def _is_arg_exempt_from_validation(\n autologging_integration,\n function_name,\n argument,\n argument_index=None,\n argument_name=None,\n):\n return any(\n exemption.matches(\n autologging_integration,\n function_name,\n argument,\n argument_index,\n argument_name,\n )\n for exemption in _VALIDATION_EXEMPT_ARGUMENTS\n )",
"def _require_context(self):\n if not self._context:\n raise ContextRequired()"
] | [
"0.60219806",
"0.5993461",
"0.58701164",
"0.5856209",
"0.5856209",
"0.58344436",
"0.58344436",
"0.5770378",
"0.568177",
"0.568177",
"0.5681647",
"0.56343865",
"0.56343865",
"0.5310002",
"0.5292798",
"0.52796423",
"0.5245052",
"0.52111816",
"0.520173",
"0.5142669",
"0.512641",
"0.5124783",
"0.5075415",
"0.50570536",
"0.5048953",
"0.50477463",
"0.5004257",
"0.49858412",
"0.4958034",
"0.49355513"
] | 0.647955 | 0 |
Convert an adjacency graph in scipy sparse matrix format into an iGraph format. | def convert_sparse_to_igraph(indices, matrix):
# sources, targets = matrix.nonzero()
# weights = matrix[sources, targets]
# weights = np.array(weights)[0]
# print(dir(louvain))
# ig = igraph.Graph(zip(sources, targets), directed=True,
# edge_attrs={'weight': weights})
# return ig
g = igraph.Graph.Adjacency((matrix > 0).tolist())
g.es['weight'] = matrix[matrix.nonzero()]
# g.vs['label'] = node_names # or a.index/a.columns
return g | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def from_nxgraph(G):\n return nx.to_scipy_sparse_matrix(G).astype('float32')",
"def create_adjacency_matrix(graph):\n index_1 = [edge[0] for edge in graph.edges()]\n index_2 = [edge[1] for edge in graph.edges()]\n values = [1 for edge in graph.edges()]\n node_count = max(max(index_1)+1,max(index_2)+1)\n A = sparse.coo_matrix((values, (index_1,index_2)),shape=(node_count,node_count),dtype=np.float32)\n return A",
"def adjacency( graph : SpatialGraph, \n normalize : bool = True,\n sparse : bool = False\n ) -> np.ndarray :\n if graph.directed:\n raise NotImplementedError(\"Directed graphs are currently not supported.\")\n dtype = np.float if normalize else np.int\n\n adj = np.zeros((graph.num_nodes, graph.num_nodes), dtype=dtype)\n if sparse:\n adj = sp.coo_matrix(adj)\n for node in graph.nodes.values():\n for adj_node in node.neighbours.values():\n adj[node.id, adj_node.id] = 1\n return normalize_adj(adj, sparse) if normalize else adj",
"def getSparseAdjacencyMatrix( graph, attribute=None, transposed=False ):\n if (attribute is not None) and (attribute not in graph.es.attribute_names()):\n raise ValueError( \"Attribute does not exists.\" )\n \n row = []\n col = []\n data = []\n \n if attribute is None:\n if transposed:\n for edge in graph.es():\n s,t = edge.tuple\n row.append(t)\n col.append(s)\n else:\n for edge in graph.es():\n s,t = edge.tuple\n row.append(s)\n col.append(t)\n data = np.ones(len(graph.es()))\n else:\n if transposed:\n for edge in graph.es():\n s,t = edge.tuple\n row.append(t)\n col.append(s)\n else:\n for edge in graph.es():\n s,t = edge.tuple\n row.append(s)\n col.append(t)\n data = np.array(graph.es()[attribute])\n\n return sparse.coo_matrix((data, (row, col)) , shape=(len(graph.vs), len(graph.vs))).tocsr()",
"def adjacency(G, nodelist=None, weight=\"weight\"):\n\n if nodelist is None:\n nodelist = G.nodes()\n\n A = nx.to_scipy_sparse_matrix(G, nodelist=nodelist, weight=weight, format=\"csr\")\n\n return A",
"def to_nxgraph(G, directed=None):\n if directed is None:\n directed = is_directed(G)\n if directed:\n create_using = nx.DiGraph\n else:\n create_using = nx.Graph\n return nx.from_scipy_sparse_matrix(G, create_using=create_using)",
"def get_graph(adj):\n # remove all zeros rows and columns\n adj = adj[~np.all(adj == 0, axis=1)]\n adj = adj[:, ~np.all(adj == 0, axis=0)]\n adj = np.asmatrix(adj)\n G = nx.from_numpy_matrix(adj)\n return G",
"def read_graph(Amatrix):\n\tG = nx.from_numpy_matrix(Amatrix)\n\tG = G.to_undirected()\n\treturn G",
"def parse_subgraph(subgraph_nodes, graph, ass_matrix):\n\n sg = graph.subgraph(subgraph_nodes)\n sg_adj = nx.to_scipy_sparse_matrix(sg, format='csc') # New adjacency matrix.\n sg_ass = ass_matrix[list(sg.nodes)]\n\n return sg_adj, sg_ass",
"def build_graph_from_sparse_matrix(gdf, matrix, graph=None):\n\n n, m = matrix.shape\n assert(n == m)\n\n if graph is None:\n logger.info(\"Generating new graph from dataframe.\")\n\n graph = nx.DiGraph()\n for i in range(n):\n name = gdf.loc[i]['name']\n graph_add_node(graph, name)\n\n I, J, V = sparse.find(matrix)\n N = I.size\n\n for k in range(N):\n i = I[k]\n j = J[k]\n v = V[k]\n name_i = gdf.loc[i]['name']\n name_j = gdf.loc[j]['name']\n graph_increment_edge(graph, name_i, name_j, v)\n\n return graph",
"def as_igraph(self):\n if not self.igraph_representation:\n A = self.connectivity_matrix.values\n g = igraph.Graph.Adjacency((A>0).tolist())\n g.es['weight'] = A[A.nonzero()]\n g.vs['name'] = self.connectivity_matrix.columns\n self.igraph_representation = g\n return self.igraph_representation",
"def to_sparse(self):\n from divisi2.sparse import SparseMatrix\n return SparseMatrix(self, self.row_labels, self.col_labels)",
"def dense_to_sparse(adj):\n assert adj.dim() >= 2 and adj.dim() <= 3\n assert adj.size(-1) == adj.size(-2)\n\n index = adj.nonzero(as_tuple=True)\n #print(index)\n edge_attr = adj[index]\n\n if len(index) == 3:\n batch = index[0] * adj.size(-1)\n index = (batch + index[1], batch + index[2])\n\n return torch.stack(index, dim=0), edge_attr",
"def preprocess_adj(adj):\n adj = nx.adjacency_matrix(nx.from_dict_of_lists(adj)) # return a adjacency matrix of adj ( type is numpy)\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0])) #\n # return sparse_to_tuple(adj_normalized)\n return adj_normalized.todense()",
"def formAdjacencyMatrix(self):\n self.adjacencyMatrix = dict()\n for i in self.node:\n self.adjacencyMatrix[i] = dict()\n for j in self.node:\n self.adjacencyMatrix[i][j] = 0\n \n for ij in self.link:\n self.adjacencyMatrix[self.link[ij].tail][self.link[ij].head] = 1",
"def compute_adjacency_matrix(G):\n\n iG = nx.convert_node_labels_to_integers(G)\n adj_list = iG.adjacency_list()\n n_nodes = len(iG.nodes())\n\n adj_mat = np.zeros((n_nodes, n_nodes))\n for x in xrange(n_nodes):\n adj_mat[x, adj_list[x]] = 1\n\n return adj_mat",
"def adj_matrix(G,nodelist=None,weight='weight'):\n return nx.to_numpy_matrix(G,nodelist=nodelist,weight=weight)",
"def adjacency_matrix(g):\n nodes = sorted(g.keys())\n adj = []\n for row_node in nodes:\n row = []\n for column_node in nodes:\n if column_node in g[row_node]:\n row.append(1)\n else:\n row.append(0)\n adj.append(row)\n \n return adj",
"def convert_to_dense_graph(self) -> cjg.Dense:\n N = len(self.indices)\n ising_int = self.ising_interactions()\n\n # cxxjij.graph.dense\n cxx_dense_ising = cjg.Dense(N)\n for i in range(N):\n if ising_int[i,i] != 0.0:\n cxx_dense_ising[i,i] = ising_int[i,i]\n for j in range(i+1, N):\n if ising_int[i,j] != 0.0:\n cxx_dense_ising[i,j] = ising_int[i,j]\n \n return cxx_dense_ising",
"def get_graph_blogcatalog():\n from scipy.io import loadmat\n\n def sparse2graph(x):\n from collections import defaultdict\n from six import iteritems\n\n G = defaultdict(lambda: set())\n cx = x.tocoo()\n for i, j, v in zip(cx.row, cx.col, cx.data):\n G[i].add(j)\n return {str(k): [str(x) for x in v] for k, v in iteritems(G)}\n\n mat = loadmat('./samples/blogcatalog.mat')\n A = mat['network']\n data = sparse2graph(A)\n\n G = eg.Graph()\n for u in data:\n for v in data[u]:\n G.add_edge(u, v)\n\n return G",
"def makeGeneralizedAdjacencyMatrix( adjacencyMatrix, sigma = 1/2 ):\n n = adjacencyMatrix.shape[0]\n D = np.sum( adjacencyMatrix, axis=0 )\n\n \n D1 = sp.sparse.lil_matrix( ( n, n ) ) #Will correspond to D^{-sigma}\n D1_vector = ( np.power( abs( D ), - float( sigma ) ) )\n for i in range(n):\n D1[i,i] = D1_vector[i]\n D1 = sp.sparse.dia_matrix( D1 )\n \n D2 = sp.sparse.lil_matrix( ( n, n ) ) #will correspond to D^{sigma-1}\n D2_vector = ( np.power( abs( D ), float( sigma - 1 ) ) ) \n for i in range(n):\n D2[i,i] = D2_vector[i]\n D2 = sp.sparse.dia_matrix( D2 )\n\n return D1 @ sp.sparse.csr_matrix( adjacencyMatrix ) @ D2",
"def to_sparse(self):\n if self.rep.fmt == 'sparse':\n return self\n\n return self.from_rep(self.rep.to_sdm())",
"def _from_dict_to_sparse(self, adj_dict):\n indices = list(adj_dict.keys())\n values = [1] * len(indices)\n\n edge_index = torch.LongTensor(indices).T.to(self.device)\n edge_attr = torch.FloatTensor(values).to(self.device)\n\n edge_index, edge_attr = utils.to_symmetric(edge_index, edge_attr, self.n)\n\n return SparseTensor.from_edge_index(edge_index=edge_index,\n edge_attr=edge_attr,\n sparse_sizes=torch.Size([self.n, self.n]))",
"def preprocess_adj(adj):\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\n return sparse_to_tuple(adj_normalized)",
"def preprocess_adj(adj):\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\n return sparse_to_tuple(adj_normalized)",
"def preprocess_adj(adj):\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\n return sparse_to_tuple(adj_normalized)",
"def preprocess_adj(adj):\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\n return sparse_to_tuple(adj_normalized)",
"def preprocess_adj(adj):\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))\n return sparse_to_tuple(adj_normalized)",
"def edges_to_adjacency_matrix(mesh):\n adja = graph.edges_to_coo(mesh.edges,\n data=np.ones(len(mesh.edges),\n dtype=np.int8))\n\n return sparse.triu(adja) + sparse.tril(adja).transpose()",
"def preprocess_adj(adj):\n adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0])) # A_~\n return sparse_to_tensor(adj_normalized)"
] | [
"0.7428788",
"0.698894",
"0.6937286",
"0.6890723",
"0.6840507",
"0.6761767",
"0.6671198",
"0.6508733",
"0.64971626",
"0.6486387",
"0.64698505",
"0.6466938",
"0.6412773",
"0.6366936",
"0.6302056",
"0.61876756",
"0.615492",
"0.6137109",
"0.61366796",
"0.60860234",
"0.6081462",
"0.6042604",
"0.6040038",
"0.60226834",
"0.60226834",
"0.60226834",
"0.60226834",
"0.60226834",
"0.60082036",
"0.600217"
] | 0.79616356 | 0 |
Returns a value K which scales logarithmically to the number of cells in a sample. | def get_k(df):
return int(np.log(len(df.columns))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _compute_kl(self, lvl):\n kl = [] # kernal length\n for n in range(lvl):\n fct = self.scaling**n # up-sampling factor\n kl.append(fct*(self.nfreq-1)+1)\n kl.append(kl[-1]) # repeat the value of the coarsest scale for the approximation coefficient\n return kl[::-1]",
"def idcg(k):\n res = sum([1.0 / math.log(i + 2, 2) for i in range(k)])\n if not res:\n return 1.0\n else:\n return res",
"def MaxHks(N): \n return np.log2(N-1)/2",
"def get_level(k):\r\n return int(log2(k))",
"def log_marg_k(self, k):\n k_N = self.prior.k_0 + self.counts[k]\n v_N = self.prior.v_0 + self.counts[k]\n m_N = self.m_N_numerators[k]/k_N\n S_N = self.S_N_partials[k] - k_N*np.square(m_N)\n return (\n - self.counts[k]*self.D/2.*self._cached_log_pi\n + self.D/2.*math.log(self.prior.k_0) - self.D/2.*math.log(k_N)\n + self.prior.v_0/2.*np.log(self.prior.S_0).sum()\n - v_N/2.*np.log(S_N).sum()\n + self.D*(self._cached_gammaln_by_2[v_N] - self._cached_gammaln_by_2[self.prior.v_0])\n )",
"def check_k(k):\n MAX_LOGK = 200 * numpy.log(2)\n\n if k is None:\n return k\n try:\n k = numpy.float64(k)\n except ValueError:\n raise NddError('%r is not a valid cardinality' % k)\n if k.ndim:\n # if k is a sequence, set k = prod(k)\n if k.ndim > 1:\n raise NddError('k must be a scalar or 1D array')\n logk = numpy.sum(numpy.log(x) for x in k)\n if logk > MAX_LOGK:\n # too large a number; backoff to n_bins?\n # TODO: log warning\n raise NddError('k is too large (%e).'\n 'Must be < 2^200 ' % numpy.exp(logk))\n k = numpy.prod(k)\n else:\n # if a scalar check size\n if k <= 0:\n raise NddError('k must be > 0 (%r)' % k)\n if numpy.log(k) > MAX_LOGK:\n raise NddError('k is too large (%e).' 'Must be < 2^200 ' % k)\n if not k.is_integer():\n raise NddError('k must be a whole number (got %r).' % k)\n\n return k",
"def log_factorial(k):\n\tif k ==0:\n\t\treturn 0\n\telse:\n\t\treturn 0.5*np.log(2*TMath.Pi()*k) + k*np.log(k) - k + np.log(1+1./(12*k) + 1/(288.*k**2) -139./(51840*k**3)-571./(2488320*k**4) + 163879./(209018880*k**5))",
"def dcg_at_k(cls, r, k):\n assert k >= 1\n r = np.asfarray(r)[:k]\n if r.size:\n return np.sum(r / np.log2(np.arange(2, r.size + 2)))\n return 0.",
"def build_kbins(minlogk=-2.25, maxlogk=-0.65, bins=17):\n kbins = 10**np.linspace(minlogk, maxlogk, num=bins)\n kcenters = np.exp(0.5 * (np.roll(np.log(kbins), -1) + np.log(kbins)))[:-1]\n return kbins, kcenters",
"def kx(self, k: int) -> float:\n result = self._read_inline(f\"kx({k})\")\n return result",
"def KL(P, Q):\n assert P.size() == Q.size()\n # To prevent divide by zero\n Q = Q + 1e-15\n return torch.sum(P * torch.log(P / Q))",
"def KL(p, q):\n return np.sum(p * np.log(p / q))",
"def kld(mu, log_var):\n return (mu + log_var).sum() # TODO Your code goes here.",
"def _compute_sampling_threshold(global_step, k):\n return k / (k + math.exp(global_step / k))",
"def fit_s_k(self, s_k, max_k=50):\n r_k = dict()\n max_val = float('-inf')\n\n for k in range(1, max_k + 1):\n r_k[k] = 1.0 / s_k[k]\n\n if k > 1:\n d = (r_k[k] - r_k[k-1]) / math.log(k)\n if d > max_val:\n max_val = d\n self.K = k\n self.s_k = s_k\n return self",
"def get_k(self, n, m):\n k = m/n * log(2)\n return int(k)",
"def _K(s):\n p = 0\n for k in range(-10, 10, 1):\n p += (-1)**k * np.exp(-2 * k**2 * s**2)\n return p",
"def kl(p, q):\n return np.sum(np.where(p != 0, p * np.log(p / q), 0))",
"def log_det_K(self, Ks=None):\n log_det = 0.\n for K in self.Ks:\n rank_d = self.n / K.shape[0]\n det = np.linalg.slogdet(K)[1]\n log_det += rank_d * det\n return log_det",
"def Tk(self, x, k):\n self._check(x, k)\n x = float(x)\n log_x = log(x)\n val = float(0)\n rho = self.rho[k]\n for n in range(1, self.N + 1):\n rho_k_over_n = rho[n]\n mu_n = self.mu[n]\n if mu_n != 0:\n z = Ei(rho_k_over_n * log_x)\n val += (mu_n / float(n)) * (2 * z).real()\n return -val",
"def kl(p, q):\n p = np.asarray(p, dtype=np.float)\n q = np.asarray(q, dtype=np.float)\n\n return np.sum(np.where(p != 0, p * np.log(p / q), 0))",
"def maxlevel(self, N, c=1):\n return int(np.floor(np.log(N/self.nfreq/c)/np.log(self.scaling))) + 1",
"def kl(p, q):\n p = np.asarray(p, dtype=float)\n q = np.asarray(q, dtype=float)\n\n return np.where(p != 0, p * np.log(p / q), 0).sum()",
"def k_of_x(x):\n dx = x[1] - x[0]\n N = x.size\n dk = 2.*np.pi/(N*dx)\n inull = N//2\n k = dk*(np.linspace(1, N, N)-inull)\n\n return k",
"def gauss_ker(k, sig):\n\tx = np.linspace(-(k//2), (k//2), k)\n\tgx, gy = np.meshgrid(x, x)\n\tkernel = np.exp(-1*(gx**2 + gy**2)/(2*(sig**2)))\n\treturn kernel",
"def K(self, X, Xstar):\n r = l2norm_(X, Xstar)\n num = - 2 * np.sin(np.pi * r / self.period)\n return np.exp(num / self.l) ** 2 + 1e-4",
"def weight_log(val):\n return val * math.log(val)",
"def KLDiv(sample_E, sample_T):\n x = np.unique(sample_E.append(sample_T))\n x = x.reshape((x.size, 1))\n \n P = sample_E.to_numpy().reshape((sample_E.size, 1))\n Q = sample_T.to_numpy().reshape((sample_T.size, 1))\n \n model = KernelDensity(bandwidth=2)\n model.fit(P)\n prob_P = np.exp(model.score_samples(x))\n model.fit(Q)\n prob_Q = np.exp(model.score_samples(x))\n \n return entropy(prob_P, prob_Q)",
"def compute_K(alpha: torch.FloatTensor,\n beta: torch.FloatTensor) -> torch.FloatTensor:\n return torch.exp(compute_log_K(alpha, beta))",
"def determine_k(dataset, range_k, n_seed=30):\r\n range_Ks = np.arange(0,range_k,1,dtype=int) #range of delays to study\r\n h_K=np.zeros((10,range_k))\r\n \r\n for i in range(10):\r\n for k, K in enumerate(range_Ks):\r\n traj_matrix= embed.trajectory_matrix(dataset, K=K)\r\n labels= cl.kmeans_knn_partition(traj_matrix, n_seed)\r\n h= op_calc.get_entropy(labels)\r\n h_K[i,k]=h\r\n \r\n return(h_K)"
] | [
"0.62024623",
"0.6159338",
"0.6130816",
"0.60664135",
"0.6045936",
"0.6011657",
"0.5971239",
"0.59182364",
"0.58902717",
"0.5853251",
"0.5848989",
"0.58119035",
"0.58073115",
"0.5798724",
"0.5790387",
"0.5779547",
"0.5767433",
"0.5755783",
"0.575504",
"0.57255423",
"0.5699927",
"0.56840396",
"0.567454",
"0.5627566",
"0.56048924",
"0.5595604",
"0.5590354",
"0.5587835",
"0.55795705",
"0.5558928"
] | 0.70775306 | 0 |
Authorize with Spotify API and fetch bearer token. | def authorize(self):
try:
auth_url = 'https://accounts.spotify.com/api/token'
headers={}
data={}
data_string = f"{self.client_id}:{self.client_secret}"
data_bytes = data_string.encode("ascii")
base_bytes = base64.b64encode(data_bytes)
base_message = base_bytes.decode("ascii")
headers['Authorization'] = f"Basic {base_message}"
data = parse.urlencode({"grant_type": "client_credentials"})
data = data.encode('ascii')
req = request.Request(auth_url,data=data, headers=headers)
logging.info("Successfully called Spotify token API!")
except:
logging.error("Failed to create authorization request!")
return False
if req is not None:
try:
response = request.urlopen(req).read().decode()
except error.URLError as e:
response = e.read().decode("utf8", 'ignore')
logging.error(response)
return False
try:
_json = json.loads(response)
self.token = _json["access_token"]
logging.info("Successfully received token from Spotify!")
except:
logging.error("Could not fetch token from response!")
return False
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def authorize():\n encoded_auth = base64.b64encode(\n (os.environ[\"SPOTIFY_CLIENT_ID\"] + ':' + os.environ[\"SPOTIFY_CLIENT_SECRET\"]).encode())\n headers = {\n 'Authorization': 'Basic {}'.format(encoded_auth.decode(\"utf-8\"))\n }\n\n response = requests.post(os.environ['SPOTIFY_AUTH_URL'], data={'grant_type': 'client_credentials'},\n headers=headers).text\n return json.loads(response)",
"def get_token():\n\theaders = {\n\t\t'Authorization': 'Basic ' + (base64.b64encode((client_id + ':' + client_secret).encode(\"utf-8\"))).decode(\"utf-8\")}\n\toptions = {\n\t\t'grant_type': 'client_credentials',\n\t\t'json': True,\n\t}\n\n\tresponse = requests.post(\n\t\t'https://accounts.spotify.com/api/token',\n\t\theaders=headers,\n\t\tdata=options\n\t)\n\tif response.status_code == 200:\n\t\tcontent = json.loads(response.content.decode('utf-8'))\n\t\taccess_token = content.get('access_token', None)\n\t\treturn access_token\n\telse:\n\t\treturn None",
"def authenticate_spotify_api(SPOTIPY_CLIENT_ID, SPOTIPY_CLIENT_SECRET):\r\n auth_manager = SpotifyClientCredentials(client_id = SPOTIPY_CLIENT_ID, \r\n client_secret=SPOTIPY_CLIENT_SECRET)\r\n \r\n return spotipy.Spotify(auth_manager=auth_manager)",
"def authenticate(redirect_uri, client_cred_manager, username, scope,client_id,client_secret):\r\n\r\n sp = spotipy.Spotify(client_credentials_manager = client_cred_manager)\r\n token = util.prompt_for_user_token(username, scope, client_id, client_secret, redirect_uri)\r\n if token:\r\n sp = spotipy.Spotify(auth=token)\r\n else:\r\n print(\"Can't get token for\", username)\r\n return sp",
"def auth(self):\n token = spotipy.util.prompt_for_user_token(self.username,\n self.scope,\n client_id = self.client_id,\n client_secret = self.client_secret,\n redirect_uri= self.redirect_uri)\n if token:\n self.spotify = spotipy.Spotify(auth=token)\n else:\n print(colored.stylize(\"\"\"\\n[*] \"\"\", colored.fg(\"light_red\")) + 'Cant get token for: %s\\n' % (self.username))\n exit()",
"def authorize():\n scopes = 'playlist-modify-public playlist-modify-private playlist-read-private playlist-read-collaborative user-read-email user-read-private'\n\n spotify_authorize_url = 'https://accounts.spotify.com/authorize?'\n params = {\n 'response_type': 'code', \n 'client_id': SPOTIFY_CLIENT_ID,\n 'redirect_uri': 'http://0.0.0.0:5000/callback',\n 'scope': scopes, \n 'show_dialog': True\n }\n\n query_params = urllib.parse.urlencode(params)\n response = make_response(redirect(spotify_authorize_url + query_params))\n return response",
"def get_token():\n\n def token_helper():\n token = util.prompt_for_user_token(username=\"robbo1992\",\n scope='user-library-read playlist-modify-private playlist-modify',\n client_id=config[\"spotify\"][\"client_id\"],\n client_secret=config[\"spotify\"][\"secret_id\"],\n redirect_uri='http://localhost:8080', cache_path=spotify_cache)\n return token\n\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n if motley.internet:\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n log.error(\"Authentication error in create_token method.\")\n raise Exception",
"def authorize(self) -> None:\n\n if not self.login_secret:\n #TODO trigger error\n self.login()\n \n\n sObj = Splitwise(self.consumer_key, self.consumer_secret)\n self.access_token = sObj.getAccessToken(\n self.oauth_token,\n self.login_secret,\n self.oauth_verifier\n )",
"def create_token():\n def token_helper():\n token = util.prompt_for_user_token(username=\"robbo1992\", scope='user-library-read playlist-modify-private playlist-modify',\n client_id=config[\"spotify\"][\"client_id\"], client_secret=config[\"spotify\"][\"secret_id\"],\n redirect_uri='http://localhost:8080', cache_path=spotify_cache)\n return token\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n if motley.internet:\n if token_helper():\n log.debug(\"Succesfully generated a spotify token for authentication\")\n return spotipy.Spotify(auth=token_helper())\n else:\n log.error(\"Authentication error in create_token method.\")",
"def token_auth(self):\n self.client = APIClient()\n self.user = User.objects.create_user(username='testuser', email='[email protected]', password='testpassword')\n self.token = Token.objects.create(user=self.user)\n self.client.credentials(HTTP_AUTHORIZATION='Token ' + self.token.key)",
"def Connect(self,scope):\n\n \"\"\"\n Calling util.prompt_for_user_token will open Spotify’s application authorization\n page in your browser (and require you to log in if you are not already logged in\n to spotify.com), unless a locally cached access token exist from a previous authorization/authentication.\n \"\"\"\n try:\n token = util.prompt_for_user_token(\n self.username,\n scope,\n self.client_id,\n self.secret_id,\n self.redirect_uri)\n except ImportError:\n self._isConnected = False\n print(\" onnecting to Spotify failed\") \n\n\n if token:\n sp = spotipy.Spotify(auth=token)\n self._isConnected = True\n return sp\n else:\n print(\"Can't get token for\", self.username)\n self._isConnected = False",
"def _authorize(self):\n auth = tweepy.OAuthHandler(self.keys[\"consumer_key\"], self.keys[\"consumer_secret\"])\n auth.set_access_token(self.keys[\"access_token\"], self.keys[\"access_token_secret\"])\n return tweepy.API(auth)",
"def fetch_token():\n bucket = os.environ[\"SPOTIFY_BUCKET_NAME\"]\n path = os.getenv(\"SPOTIFY_BUCKET_PATH\", \"\")\n logger.info(\"Reading Spotify OAuth token from s3://%s/%s/token.json.\" %\n (bucket, path))\n s3 = boto3.client('s3')\n content_object = s3.get_object(Bucket=bucket, Key=\"%s/token.json\" % path)\n file_content = content_object['Body'].read().decode('utf-8')\n token = json.loads(file_content)\n return token",
"def authenticate(self):\n try:\n self._token = self._lookup_token()\n except:\n raise HTTPError(\n \"Unable to get short-lived access token for cyberark storage\"\n )",
"def auth(self):\n return self.api(self.token)",
"def authorize(self, oauth2_token):\r\n storage = file.Storage(oauth2_token)\r\n credentials = storage.get()\r\n http = credentials.authorize(httplib2.Http())\r\n self.service = discovery.build('youtube', 'v3', http=http)",
"def _authenticate(self):\n url = self.endpoint + \"/tokens\"\n h = httplib2.Http()\n response, rawcontent = h.request(\n url, \n method=\"POST\",\n headers={ \"Content-Type\":\"application/json\" },\n body=json.dumps(self.credentials()))\n content = json.loads(rawcontent)\n self.token = content['access']['token']['id']\n #TODO: this needs to convert the ISO8601 string to a timestamp\n self.expiration = content['access']['token']['expires']\n self.catalog = content['access']['serviceCatalog']",
"def get_spotify_token(self):\n scope = \"playlist-modify-public playlist-modify-private user-read-email user-library-modify playlist-read-private\"\n token = spotipy.util.prompt_for_user_token(\n username=self.username,\n scope=scope,\n client_id=secrets.client_id,\n client_secret=secrets.client_secret,\n redirect_uri=secrets.redirect_uri\n )\n sp = spotipy.Spotify(auth=token)\n return sp",
"def get_spotify_authtoken(client_id, client_secret, scope, refresh_token=None,\n redirect_uri=\"https://example.com/callback\"):\n\n # If refresh token has been passed in, try to use it to generate a\n # new auth_token.\n\n if refresh_token:\n # Setup Base64 Client Secret to Send\n secret = f\"{client_id}:{client_secret}\"\n b64_secret = base64.b64encode(bytes(secret, \"utf-8\")).decode(\"utf-8\")\n\n body = {\"grant_type\": \"refresh_token\",\n \"refresh_token\": refresh_token}\n auth_url = \"https://accounts.spotify.com/api/token\"\n auth_header = {\"Authorization\": f\"Basic {b64_secret}\"}\n\n res = requests.post(auth_url, data=body, headers=auth_header)\n\n auth_token = res.json()[\"access_token\"]\n try:\n refresh_token = res.json()[\"refresh_token\"]\n except Exception:\n refresh_token = None\n\n # If no refresh token is available, generate a new auth_token by\n # prompting the user to login and authorise the application.\n\n else:\n auth_url = f\"https://accounts.spotify.com/authorize?client_id={client_id}&response_type=code&redirect_uri={redirect_uri}&scope={scope}\"\n\n # Setup Browser\n opts = Options()\n opts.add_argument('--no-sandbox')\n browser = Chrome(\"./chromedriver/chromedriver\", options=opts)\n\n # Go to auth page, sign-in and wait for code to be returned\n browser.get(auth_url)\n WebDriverWait(browser, 60).until(EC.url_contains(redirect_uri))\n\n # Pull auth code from redirect_uri & close browser\n code = browser.current_url.split(\"code=\")[1].split(\"#\")[0]\n browser.close()\n\n # Step 2: Auth Token\n\n body = {\"grant_type\": \"authorization_code\",\n \"code\": code,\n \"redirect_uri\": redirect_uri,\n \"client_id\": client_id,\n \"client_secret\": client_secret}\n auth_url = \"https://accounts.spotify.com/api/token\"\n res = requests.post(auth_url, data=body)\n auth_token = res.json()[\"access_token\"]\n try:\n refresh_token = res.json()[\"refresh_token\"]\n except Exception:\n refresh_token = None\n\n return (auth_token, refresh_token)",
"def update_access_token(self):\n self.token = util.prompt_for_user_token(self._username, scope,\n client_id=const.CLIENT_ID,\n client_secret=const.CLIENT_SECRET,\n redirect_uri=const.REDIRECT_URL)\n self._client = spotipy.Spotify(auth=self.token)",
"def authenticate(self):\n # Check if we already have access token and secret\n if not os.path.exists(self.sTOKEN_FILE):\n # 1) Obtain Request token\n oauth = OAuth1(self.apiKey, client_secret=self.apiKeySecret, callback_uri='oob')\n r = requests.post(url=self.sREQUEST_TOKEN_URL, auth=oauth)\n credentials = parse_qs(r.content)\n resource_owner_key = credentials.get('oauth_token')[0]\n resource_owner_secret = credentials.get('oauth_token_secret')[0]\n\n # 2) Obtain authorization for the user to access resources\n # Redirect the user to /authorize and get the callback\n authorize_url = self.sAUTHORIZE_URL + '?oauth_token=' + resource_owner_key + \\\n '&oauth_consumer_key=' + self.apiKey + \\\n '&Access=Full&Permissions=Modify'\n\n print 'Please go here and authorize,', authorize_url\n verifier = raw_input('Please enter the six-digit PIN code: ')\n\n # 3) Obtain final access token\n oauth = OAuth1(self.apiKey, client_secret = self.apiKeySecret,\n resource_owner_key = resource_owner_key,\n resource_owner_secret = resource_owner_secret,\n verifier=verifier)\n r = requests.post(url=self.sACCESS_TOKEN_URL, auth=oauth)\n\n credentials = parse_qs(r.content)\n access_token = credentials.get('oauth_token')[0]\n access_token_secret = credentials.get('oauth_token_secret')[0]\n\n # Store access token so we can use it later\n with open(self.sTOKEN_FILE, 'w') as f:\n json.dump({'access_token': access_token,\n 'access_token_secret': access_token_secret}, f)\n\n else:\n with open(self.sTOKEN_FILE, 'r') as f:\n tokens = json.load(f)\n access_token = tokens.get('access_token')\n access_token_secret = tokens.get('access_token_secret')\n\n # store the file access token details for use in other methods\n self.accessToken = access_token\n self.accessTokenSecret = access_token_secret",
"def bearer_authentication(self, token: str) -> None:\n self.api_session.headers.update({'Authorization': f'Bearer {token}'})",
"def get_token(): \n \n # Token url\n token_endpoint = \"https://api.signicat.io/oauth/connect/token\"\n # Setting the grant type to client_credentials\n data = {'grant_type':'client_credentials', 'scope':'identify'}\n # Posting to token url with HTTP basic authentication\n token = requests.post(token_endpoint, data=data,allow_redirects=True, auth=(config.CLIENT_ID, config.CLIENT_SECRET))\n # Converting json string to json\n token_json = json.loads(token.text)\n \n # Returning the access_token\n return token_json['access_token']",
"def _authorize(self, token=None, store_token=False, reenter_token=False): # pragma: no cover\n\n if token is None and \"MAST_API_TOKEN\" in os.environ:\n token = os.environ[\"MAST_API_TOKEN\"]\n\n if token is None:\n token = keyring.get_password(\"astroquery:mast.stsci.edu.token\", \"masttoken\")\n\n if token is None or reenter_token:\n auth_server = conf.server.replace(\"mast\", \"auth.mast\")\n auth_link = auth_server + \"/token?suggested_name=Astroquery&suggested_scope=mast:exclusive_access\"\n info_msg = \"If you do not have an API token already, visit the following link to create one: \"\n log.info(info_msg + auth_link)\n token = getpass(\"Enter MAST API Token: \")\n\n # store password if desired\n if store_token:\n keyring.set_password(\"astroquery:mast.stsci.edu.token\", \"masttoken\", token)\n\n self._session.headers[\"Accept\"] = \"application/json\"\n self._session.cookies[\"mast_token\"] = token\n info = self.session_info(silent=True)\n\n if not info[\"anon\"]:\n log.info(\"MAST API token accepted, welcome %s\" % info[\"attrib\"].get(\"display_name\"))\n else:\n log.warn(\"MAST API token invalid!\")\n\n return not info[\"anon\"]",
"def get_token():\n\turl = SPOTIFY_ACCOUNT_HOST + 'token'\n\tcurrent_refresh_token = config.get('spotify_credentials', 'refresh_token')\n\tbody = {'grant_type': 'refresh_token', 'refresh_token': current_refresh_token}\n\tauth_header = 'Basic ' + b64encode('{0}:{1}'.format(SPOTIFY_CLIENT_ID, \n\t\tSPOTIFY_CLIENT_SECRET))\n\theaders = {'Authorization': auth_header}\n\n\tresponse = requests.post(url, headers=headers, data=body).json()\n\tif response.has_key('refresh_token'):\n\t\tlogging.debug('Received new refresh token')\n\t\tconfig.set('spotify_credentials', 'refresh_token', \n\t\t\tresponse['refresh_token'])\n\treturn response['access_token']",
"def authorize(self):\n login_data = {\n 'username': self.username,\n 'password': self.password,\n }\n r = requests.post(f'{self.api_host}/auth', json=login_data)\n\n if r.status_code == 200:\n CentralStorageClient.token = r.json()['access_token']\n\n return True\n\n return False",
"def _request_token(self):\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': self.client_id,\n 'client_secret': self.client_secret\n }\n\n response = self._http_request(\n method='POST',\n headers={'Content-Type': 'application/x-www-form-urlencoded'},\n full_url=self.auth_url,\n data=params\n )\n access_token = response.get('access_token')\n auth_header = {'Authorization': f'Bearer {access_token}'}\n return auth_header",
"def auth():\n\tcode = request.query.code\n\tauth = 'https://foursquare.com/oauth2/access_token'\n\tparams = dict(\n\t\tclient_id=CLIENT_ID,\n\t\tclient_secret=CLIENT_SECRET,\n\t\tgrant_type='authorization_code',\n\t\tredirect_uri=REDIRECT_URI,\n\t\tcode=code\n\t)\n\tauth_says = fetch('%s?%s'%(auth, urlencode(params)))\n\tauth_response = json.loads(auth_says.content)\n\tif 'access_token' in auth_response:\n\t\toauth_token=auth_response['access_token']\n\t\tresponse.set_cookie('user', oauth_token, secret=CLIENT_SECRET)\n\t\tlogging.info('new oauth_token:%s'%oauth_token)\n\t\tredirect('/')\n\telse:\n\t\tlogging.error(auth_response)\n\t\tabort()",
"def bearer_auth():\n authorization = request.headers.get(\"Authorization\")\n if not (authorization and authorization.startswith(\"Bearer \")):\n response = app.make_response(\"\")\n response.headers[\"WWW-Authenticate\"] = \"Bearer\"\n response.status_code = 401\n return response\n slice_start = len(\"Bearer \")\n token = authorization[slice_start:]\n\n return jsonify(authenticated=True, token=token)",
"def __call__(self, context, callback):\r\n\r\n callback((('authorization', 'Bearer ' + self.token_hash ),), None)"
] | [
"0.7426291",
"0.71426195",
"0.71205246",
"0.70915276",
"0.7082394",
"0.69950104",
"0.6989322",
"0.69705796",
"0.67263836",
"0.67155063",
"0.6676822",
"0.6665931",
"0.66071135",
"0.656264",
"0.65159553",
"0.65093845",
"0.6490198",
"0.6468989",
"0.64642835",
"0.6463644",
"0.64543784",
"0.63799185",
"0.6377546",
"0.6376187",
"0.6371332",
"0.63686883",
"0.63655657",
"0.63611656",
"0.63561124",
"0.63238025"
] | 0.81158495 | 0 |
Convert milliseconds to seconds | def millisec_to_sec(self, millisec):
return millisec / 1000 | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def MillisToSec(self):\n self.Millis = [item / 1000 for item in self.Millis]\n return self.Millis",
"def _ms_to_time(self, milliseconds):\n \n ms = milliseconds\n \n # Get the last 3 digits of the milliseconds\n trunc_ms = ms % 1000\n seconds = (ms / 1000)\n minutes = (seconds / 60)\n hours = minutes / 60\n \n # hours can go above 24, so don't modulus\n return '%02d:%02d:%02d,%03d' % (hours, minutes % 60, seconds % 60, trunc_ms)",
"def as_seconds(*, seconds=0, minutes=0, hours=0, days=0, weeks=0, milliseconds=0, as_type=None):\n delta = datetime.timedelta(seconds=seconds, minutes=minutes, hours=hours,\n days=days, weeks=weeks, milliseconds=milliseconds)\n seconds = delta.total_seconds()\n frac, intpart = math.modf(seconds)\n if frac == 0.0:\n seconds = int(intpart)\n if as_type is not None:\n seconds = as_type(seconds)\n return seconds",
"def ms_to_time(ms):\n milliseconds = int(ms % 1000)\n seconds = int((ms / 1000) % 60)\n minutes = int(((ms / 1000 - seconds) / 60) % 60)\n\n return (minutes, seconds, milliseconds)",
"def ms_from_timedelta(td):\n return (td.seconds * 1000) + (td.microseconds / 1000.0)",
"def get_millis(seconds):\n return seconds * 10 ** 3",
"def convert_timeval(seconds_since_epoch):\n frac, whole = math.modf(seconds_since_epoch)\n microseconds = math.floor(frac * 1000000)\n seconds = math.floor(whole)\n return seconds, microseconds",
"def to_seconds(hours, minutes, seconds):\n return hours*60**2 + minutes*60 + seconds",
"def minutes_to_seconds(minutes):\n return minutes * 60",
"def sec2hms(seconds):\n hours, seconds = divmod(seconds, 60**2)\n minutes, seconds = divmod(seconds, 60)\n return int(hours), int(minutes), seconds",
"def t_sec(self):\n return self.t/self.parameters['time_conversion']",
"def convert_to_seconds(unit, value):\n seconds = 1\n minutes = 60\n hours = 3600\n days = 86400\n return value*eval(unit)",
"def _time_ms(dt):\n epoch = datetime.datetime.utcfromtimestamp(0)\n diff = dt - epoch\n return diff.total_seconds() * 1000",
"def to_seconds(time):\n return 3600 * time",
"def timestamp2sec(timestamp):\n return (int(timestamp.seconds) + 60 * int(timestamp.minutes) + 3600 * int(timestamp.hours) + float(int(timestamp.hours) / 1000))",
"def to_seconds(hours, minutes, seconds):\n return hours*3600+minutes*60+seconds",
"def _to_seconds(value, unit):\n if (not isinstance(value, int)) or (not isinstance(unit, int)):\n raise InstrumentProtocolException(\"Invalid second arguments!\")\n \n if unit == 1:\n return value * 60\n elif unit == 0:\n return value\n else:\n raise InstrumentProtocolException(\"Invalid Units!\")",
"def time_to_int(self):\n minutes = self.hour * 60 + self.minute\n seconds = minutes * 60 + self.second\n return seconds",
"def to_seconds(hours,minutes,seconds):\n return hours*3600+minutes*60+seconds",
"def to_seconds(self):\r\n return self.hours * 3600 + self.minutes * 60 + self.seconds",
"def minutes_to_seconds(minutes) -> int:\n return int(minutes) * 60",
"def convert_time(t):\n minutes = int(t/60)\n seconds = int(t-60*minutes)\n return minutes, seconds",
"def minutes_to_seconds( minutes: str ) -> int:\r\n return int(minutes)*60",
"def parse_time_ms(time_string):\n try:\n return int(1000 * parse_duration(time_string))\n except:\n logging.exception('Unable to extract seconds from {}'.format(time_string))\n logging.info('Defaulting time to 1 second.')\n return 1000",
"def to_seconds(self):\n return self.hours * 3600 + self.minutes * 60 + self.seconds",
"def _convert_time(self, duration):\n in_sec = int(int(duration) / 1000)\n in_time = int(in_sec / 60) + (0.01 * (in_sec % 60))\n return in_time",
"def _time_ms(self, dt):\n if dt.tzinfo is None:\n dt = dt.replace(tzinfo=pytz.utc)\n return int((dt - self._EPOCH).total_seconds() * 1000)",
"def _get_milleseconds(self):\n return int(round(time.time() * 1000))",
"def time_to_int(self):\n minutes = self.hour * 60 + self.minute\n secconds = self.minute * 60 + self.second\n return secconds",
"def SECOND(time):\n\n return _make_datetime(time).second"
] | [
"0.76566106",
"0.7349378",
"0.7331618",
"0.7264953",
"0.7230861",
"0.7129717",
"0.7029283",
"0.6987922",
"0.69484216",
"0.68863964",
"0.68600786",
"0.6857339",
"0.68361413",
"0.6827519",
"0.68191534",
"0.6809082",
"0.67981195",
"0.6795517",
"0.6790688",
"0.6744001",
"0.67242783",
"0.6710832",
"0.6709009",
"0.6687226",
"0.66861326",
"0.6654757",
"0.6637209",
"0.66231394",
"0.66088563",
"0.6604409"
] | 0.7594132 | 1 |
Wait for the polling interval, then do the real message check. | def __check_for_messages(self):
# Wait for at least poll_interval sec
polling_interval = self.conf.messaging_server.polling_interval
time.sleep(polling_interval)
if self.conf.messaging_server.debug:
LOG.debug("Topic {}: Checking for new messages".format(
self.target.topic))
self._do()
return True | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def poll(self):\n self.poll_function(self.connection)",
"def poll(self):\n while self.running and reactor._started and not reactor._stopped:\n self.check_response_queue()\n sleep(0.5)",
"async def check():\r\n while True:\r\n if rss.check_new():\r\n item = rss.most_recent()\r\n queue = format_message.format_notes(item)\r\n for message in queue:\r\n await client.send_message(client.get_channel(\"350634825516056577\"), message)\r\n await asyncio.sleep(28800) # Check every 8 hours\r",
"async def _poll(self):\n while True:\n await asyncio.sleep(self._poll_period)\n weight = self.weight\n message = Message(payload=cbor2.dumps(weight), code=CONTENT, content_format=60)\n self.updated_state(message)",
"def should_poll(self):\n return True",
"def should_poll(self):\n return True",
"def should_poll(self):\n return True",
"def should_poll(self):\n return True",
"def should_poll(self):\n return True",
"def should_poll(self):\n return True",
"def should_poll(self):\n return True",
"def should_poll(self):\n return True",
"def should_poll(self):\n return True",
"def should_poll(self):\n return True",
"def should_poll(self):\n return True",
"def should_poll(self):\n return True",
"def should_poll(self):\n return True",
"def should_poll(self):\n return True",
"def should_poll(self):\n return True",
"def setup_poll(self):\n while True:\n try:\n self.do_polling()\n time.sleep(0.01)\n except KeyboardInterrupt:\n print(self.get_stream())\n exit()",
"def should_poll(self):\r\n return False",
"def is_polling_done(self):\n if self.message_request_more:\n return False\n \n if self.message_cache:\n return False\n \n return True",
"def watch_for_heartbeat_messages(self):\n while True:\n message = self.socket_manager.get_heartbeat_message()\n self.ensure_sender_is_known(message)\n if message.direction == \"0\":\n self.respond_to_heartbeat_message(message)\n elif message.direction == \"1\":\n self.handle_heartbeat_response(message)",
"def should_poll(self):\n return False",
"def should_poll(self):\n return False",
"def should_poll(self):\n return False",
"def should_poll(self):\n return False",
"def should_poll(self):\n return False",
"def should_poll(self):\n return False",
"def should_poll(self):\n return False"
] | [
"0.6918109",
"0.6909268",
"0.66404",
"0.6634429",
"0.6591415",
"0.6591415",
"0.6591415",
"0.6591415",
"0.6591415",
"0.6591415",
"0.6591415",
"0.6591415",
"0.6591415",
"0.6591415",
"0.6591415",
"0.6591415",
"0.6591415",
"0.6591415",
"0.6591415",
"0.6586251",
"0.650955",
"0.647684",
"0.64711446",
"0.64369136",
"0.64369136",
"0.64369136",
"0.64369136",
"0.64369136",
"0.64369136",
"0.64369136"
] | 0.74402225 | 0 |
Gracefully stop working on things | def _gracefully_stop(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def stop_check(self):\n pass",
"def aborting(self):\n \n pass",
"def stopclean(self):\n raise Exception(\"Not implemented\")",
"def stop() -> None:",
"def force_stop(self):\n #cancel any current request:\n self._cancel_current_request()",
"def _prepare_to_stop(self):\n pass",
"def _stop(self):",
"def stop(self) -> None:",
"def stop(self) -> None:",
"def _stop(self):\n return True",
"def abort(self):\n try:\n self.acqRunning = False\n except:\n print('Cannot abort properly')",
"def stop(self):\n self._should_run = False",
"def stop(self):\r\n pass",
"def stop(self, force=False):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):\n pass",
"def stop(self):",
"def stop(self):",
"def stop(self):\n\t\tpass",
"def stop():",
"def stop():"
] | [
"0.74943763",
"0.7360043",
"0.7337609",
"0.72221744",
"0.7203809",
"0.7203615",
"0.7172144",
"0.7161631",
"0.7161631",
"0.7149348",
"0.7134612",
"0.713363",
"0.70896405",
"0.7084329",
"0.7068823",
"0.7068823",
"0.7068823",
"0.7068823",
"0.7068823",
"0.7068823",
"0.7068823",
"0.7068823",
"0.7068823",
"0.7068823",
"0.7068823",
"0.7057791",
"0.7057791",
"0.7036937",
"0.70254385",
"0.70254385"
] | 0.81977755 | 0 |
Prepare to restart the RPC Server | def _restart(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _RestartServer( self ):\n with self._gocode_lock:\n self._StopServer()\n self._StartServer()",
"def restart(self):",
"def restart(self) -> None:",
"def restart(self):\n pass",
"def request_shutdown(self, restart=False):",
"def restart(self):\r\n pass",
"def restart(self):\n self.client.post(self.path+'/action', { 'restart': {} })\n return True",
"def repl_restart(restart: bool = True) -> None:",
"def restart():\n stop()\n start()",
"def node_restart(ctx):\n ctx.obj['node'].attempt_restart()",
"async def module_command_restart(self, ctx, parsed):\n if parsed.invoker != ctx.owner:\n return\n reason = \" \".join(parsed.args[\"msg\"] or []) or \"Restarting\"\n self.quit(reason)\n self._restarting = True",
"def restart(self):\n global shouldRestart\n shouldRestart = True\n logging.info(\"Restarting bot\")\n self.die()",
"def restart():\n log.info('restart')\n samuraix.restarting = True\n samuraix.app.stop()",
"async def do_force_restart(self):\n if self.config[\"allow_restart_requests\"]:\n os._exit(42)\n else:\n return self._rpc_failure(\"Restart disallowed by configuration\")",
"async def restart_server(self):\n await self.stop_server()\n self._start()\n await self.send_tag('control', emoji.TRIGGERS['control'], 'Server restarted!')",
"def restart(self):\n self.__init__()\n return",
"def restart(config):\n shutdown(config)\n startup(config)\n return",
"def lz_restart_pondering(self):\n self.lz_wrapper.restart_ponder()",
"def restart(self):\r\n self._safe_close()\r\n self._stopped.clear()\r\n self.reconnect()",
"def restart(self):\n print \"Restarting \" + executable + \" \" + str(argv) \n execl(executable, *([executable]+argv))",
"def restart(self):\n\t\treturn self.reset().start()",
"def restart():\n run('kill -HUP $(cat /tmp/pyar_web.pid)')",
"def restart(self):\n self.stop()\n self.start(init=False)",
"def finished_restarting():\n flags.restarting = False\n group_spawn(qtile.current_group)\n qtile.cmd_spawn(\"nitrogen --restore\")",
"def attempt_restart(self):\n self.controller.publish(self, 'restart')",
"def webserver_restart():\n try:\n run(\"kill -HUP $(cat %s)\" % GUNICORN_PIDFILE)\n except:\n webserver_start()",
"def restartserver(self, port=None):\n if port is not None:\n if port < 0: #code to try a random port\n self.parameters['port'] = random.randint(2223,50000)\n else:\n self.parameters['port'] = port\n return self.startserver()",
"def Restart(self):\n handler = self.get_command_object(\"Restart\")\n handler()",
"def restart(self):\n self.logger.info(\"Received graceful restart request\")\n self._restart = True\n self.stop()",
"def IntrumentFailHook(self):\n #Restart iserver\n #If failed to restart\n #\treturn fail\n pass"
] | [
"0.7340876",
"0.7222521",
"0.7215268",
"0.7143786",
"0.7116937",
"0.70833296",
"0.6916681",
"0.6875465",
"0.67941314",
"0.6769875",
"0.67550653",
"0.6754594",
"0.67220056",
"0.6678545",
"0.6672805",
"0.66473347",
"0.66432434",
"0.66391295",
"0.66269875",
"0.6593943",
"0.6593009",
"0.6584128",
"0.6557689",
"0.6534619",
"0.65342224",
"0.65216756",
"0.6516044",
"0.64925",
"0.64679444",
"0.64428353"
] | 0.77630264 | 0 |
Sets environment variables for a nox session object. | def set_environment_variables(env_dict, session):
for key, value in env_dict.items():
session.env[key] = value | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_env_var(self):\n\n list_env_vars = self.config.items('environment_variables')\n for env_var in list_env_vars:\n os.environ[env_var[0].upper()] = env_var[1]",
"def set_envvars(self):\n # self.logger.trace(\"update os.environ with %s\", self.environ)\n for key in os.environ:\n current = self.environ.get(key)\n if current is None:\n del os.environ[key]\n for key, value in self.environ.items():\n if value is not None:\n os.environ[key] = str(value)",
"def SetEnvironment(env):\n os.environ.clear()\n os.environ.update(env)",
"def environment_vars_set():\n os.environ[\"YESSSSMS_LOGIN\"] = \"03211234567\"\n os.environ[\"YESSSSMS_PASSWD\"] = \"MySecr3t\"\n os.environ[\"YESSSSMS_PROVIDER\"] = \"goood\"\n os.environ[\"YESSSSMS_RECIPIENT\"] = \"066356789789\"",
"def set_environ(self, environ):\n self.environ = environ",
"def set_environ(self, environ):\n self.environ = environ",
"def set(self, shell=None):\n\n # iterate over the env variable objects and set them in the env\n for var in self._vars.itervalues():\n var.set(shell=shell)",
"def env_vars(self, env_vars):\n\n self._env_vars = env_vars",
"def SetEnvironmentVars(self):\n for name, value, section in self._marchConfig():\n fetch_name = self._get_param_name(name, section)\n self._set_env_prop(fetch_name, value)",
"def envs(self, envs):\n self._instructions_setter('ENV', envs)",
"def _setEnv(self):\n try:\n global_env_prfix = \"/GlobalEnv/\"\n if self.etcd_key_prefix is not None:\n global_env_prfix = self.etcd_key_prefix + \"/GlobalEnv/\"\n value = self.etcd.get(global_env_prfix)\n if value[0] is not None:\n jsonConfig = json.loads(value[0].decode('utf-8'))\n for key in jsonConfig.keys():\n os.environ[key] = jsonConfig[key]\n else:\n raise TypeError(\"config manager key {} must be set as \\\n a prerequisite ...\".format(global_env_prfix))\n except Exception as e:\n self.logger.error(\"Exception raised in _setEnv\\\n with error:{}\".format(e))\n raise e",
"def env_init(self, environ) -> None:\n environ.update(self._env)",
"def test_setenv(self, env: yaenv.Env):\n from os import environ\n assert 'EMAIL' not in environ\n env.setenv()\n assert 'EMAIL' in environ",
"def set_env(**kwargs):\n _environ = dict(os.environ)\n os.environ.update(kwargs)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(_environ)",
"def set_env_var(varnames, varvalues):\n try:\n for i in range(len(varnames)):\n os.environ[varnames[i]] = str(varvalues[i]).strip()\n except Exception as e:\n raise j.exceptions.RuntimeError(e)",
"def setenv(name, value):\n os.environ[name] = value",
"def env(self, env):\n\n self._env = env",
"def env(self, env):\n\n self._env = env",
"def set_env(self, env: NoneStr):\n self.env = env or ENV",
"def setenv(self, key, value):\n self._env[key] = value",
"def set_env(self, env):\n\n self.env = env\n self.sim_env = copy.deepcopy(self.env)\n self.sim_env.reset_at_episode_end = False # Avoids expensive re-sampling of jets every time we parse a path\n self.init_episode()",
"def setenv(self, var, value):\n self._log_command([\"export\", \"{}={}\".format(var, value)])\n if not self.dryrun:\n os.environ[var] = value",
"def _set_ci_environment_variables(parent_shell):\n variables_to_set = {\n \"JOBSTAMPS_ALWAYS_USE_HASHES\": \"1\",\n \"CLINT_FORCE_COLOR\": \"1\",\n \"PYTHONDONTWRITEBYTECODE\": \"1\"\n }\n\n for key, value in variables_to_set.items():\n os.environ[key] = value\n parent_shell.overwrite_environment_variable(key, value)",
"def _setenv(self):\n tokens = {}\n tokens[\"CT_TIMESTAMP\"] = self._timestamp\n tokens[\"CT_SUBMITTER\"] = self._node.name()\n # tokens[\"CT_HIPBASE\"] = self._file[\"hipbase\"]\n tokens[\"CT_SCENE\"] = self._scene\n tokens[\"CT_PROJECT\"] = self.project_name\n\n for token in tokens:\n hou.putenv(token, tokens[token])\n\n return tokens",
"def _setup_environment_vars(self, opts):\n # Check that these directories actually exist\n assert os.path.isdir(opts.movie_advisor_home)\n\n #if not 'install-bento' in self.actions: assert os.path.isdir(opts.bento_home)\n\n self.movie_advisor_home = opts.movie_advisor_home\n self.bento_home = opts.bento_home\n self.bento_tgz = opts.bento_tgz\n self.kiji_uri = \"kiji://.env/tutorial\"\n\n # \"express job\" takes a jar file as an argument\n assert os.path.isfile(os.path.join(self.movie_advisor_home, self.express_jar))\n\n # Set the classpath for all of the commands that we'll run\n jarsFullPaths = [os.path.join(self.movie_advisor_home, j) for j in self.jars]\n for jar in jarsFullPaths: assert os.path.isfile(jar)\n\n classpath = \":\".join(jarsFullPaths)\n os.environ['KIJI_CLASSPATH'] = classpath\n\n if opts.show_classpath:\n print(\"export KIJI_CLASSPATH=%s\" % classpath)\n sys.exit(0)",
"def set_env(**environ):\n old_environ = dict(os.environ)\n os.environ.update(environ)\n try:\n yield\n finally:\n os.environ.clear()\n os.environ.update(old_environ)",
"def set_env(self, propagated_env_vars={}):\n os.environ['BUILD_ROOT'] = self.build_root\n # This is how we tell run-test.sh what set of C++ binaries to use for mini-clusters in Java\n # tests.\n for env_var_name, env_var_value in propagated_env_vars.iteritems():\n os.environ[env_var_name] = env_var_value",
"def set_envs(self):\n # pylint:disable=protected-access\n # Need to call sys.__getframe() to get the filename and method/func\n # for logging information.\n\n # Useful for logging\n # Logging output: TIME UTC |TYPE (DEBUG, INFO, WARNING, etc.) |\n # [File : function]| Message\n cur_filename = sys._getframe().f_code.co_filename\n cur_function = sys._getframe().f_code.co_name\n\n self.logger.info('Setting env variables from config file...')\n # Set all the environment variables that are needed by the\n # MET config file.\n\n tmp_amodel = self.c_dict['AMODEL']\n if tmp_amodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_amodel_str = str(tmp_amodel).replace(\"\\'\", \"\\\"\")\n tmp_amodel = ''.join(tmp_amodel_str.split())\n self.add_env_var('AMODEL', tmp_amodel)\n else:\n self.add_env_var('AMODEL', \"[]\")\n\n tmp_bmodel = self.c_dict['BMODEL']\n if tmp_bmodel:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_bmodel_str = str(tmp_bmodel).replace(\"\\'\", \"\\\"\")\n tmp_bmodel = ''.join(tmp_bmodel_str.split())\n self.add_env_var('BMODEL', tmp_bmodel)\n else:\n self.add_env_var('BMODEL', \"[]\")\n\n tmp_desc = self.c_dict['DESC']\n if tmp_desc:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_desc_str = str(tmp_desc).replace(\"\\'\", \"\\\"\")\n tmp_desc = ''.join(tmp_desc_str.split())\n self.add_env_var('DESC', tmp_desc)\n else:\n self.add_env_var('DESC', \"[]\")\n\n tmp_storm_id = self.c_dict['STORM_ID']\n if tmp_storm_id:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_id_str = str(tmp_storm_id).replace(\"\\'\", \"\\\"\")\n tmp_storm_id = ''.join(tmp_storm_id_str.split())\n self.add_env_var('STORM_ID', tmp_storm_id)\n else:\n self.add_env_var('STORM_ID', \"[]\")\n\n tmp_basin = self.c_dict['BASIN']\n if tmp_basin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_basin_str = str(tmp_basin).replace(\"\\'\", \"\\\"\")\n tmp_basin = ''.join(tmp_basin_str.split())\n self.add_env_var('BASIN', tmp_basin)\n else:\n self.add_env_var('BASIN', \"[]\")\n\n tmp_cyclone = self.c_dict['CYCLONE']\n if tmp_cyclone:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_cyclone_str = str(tmp_cyclone).replace(\"\\'\", \"\\\"\")\n tmp_cyclone = ''.join(tmp_cyclone_str.strip())\n self.add_env_var('CYCLONE', tmp_cyclone)\n else:\n self.add_env_var('CYCLONE', \"[]\")\n\n tmp_storm_name = self.c_dict['STORM_NAME']\n if tmp_storm_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_storm_name_str = str(tmp_storm_name).replace(\"\\'\", \"\\\"\")\n tmp_storm_name = ''.join(tmp_storm_name_str.strip())\n self.add_env_var('STORM_NAME', tmp_storm_name)\n else:\n self.add_env_var('STORM_NAME', \"[]\")\n\n if self.c_dict['INIT_BEG']:\n self.add_env_var('INIT_BEG', self.c_dict['INIT_BEG'])\n else:\n self.add_env_var('INIT_BEG', \"\")\n\n if self.c_dict['INIT_END']:\n self.add_env_var('INIT_END', self.c_dict['INIT_END'])\n else:\n self.add_env_var('INIT_END', \"\")\n\n tmp_init_include = self.c_dict['INIT_INCLUDE']\n if tmp_init_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_include_str = str(tmp_init_include).replace(\"\\'\", \"\\\"\")\n tmp_init_include = ''.join(tmp_init_include_str.strip())\n self.add_env_var('INIT_INCLUDE', tmp_init_include)\n else:\n self.add_env_var('INIT_INCLUDE', \"[]\")\n\n tmp_init_exclude = self.c_dict['INIT_EXCLUDE']\n if tmp_init_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_exclude_str = str(tmp_init_exclude).replace(\"\\'\", \"\\\"\")\n tmp_init_exclude = ''.join(tmp_init_exclude_str.strip())\n self.add_env_var('INIT_EXCLUDE', tmp_init_exclude)\n else:\n self.add_env_var('INIT_EXCLUDE', \"[]\")\n\n tmp_init_hour = self.c_dict['INIT_HOUR']\n if tmp_init_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_hour_str = str(tmp_init_hour).replace(\"\\'\", \"\\\"\")\n tmp_init_hour = ''.join(tmp_init_hour_str.split())\n self.add_env_var('INIT_HOUR', tmp_init_hour)\n else:\n self.add_env_var('INIT_HOUR', \"[]\")\n\n tmp_valid_begin = self.c_dict['VALID_BEG']\n if tmp_valid_begin:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_begin_str = str(tmp_valid_begin).replace(\"\\'\", \"\\\"\")\n tmp_valid_begin = ''.join(tmp_valid_begin_str.strip())\n self.add_env_var('VALID_BEG', tmp_valid_begin)\n else:\n self.add_env_var('VALID_BEG', '')\n\n tmp_valid_end = self.c_dict['VALID_END']\n if tmp_valid_end:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_end_str = str(tmp_valid_end).replace(\"\\'\", \"\\\"\")\n tmp_valid_end = ''.join(tmp_valid_end_str.strip())\n self.add_env_var('VALID_END', tmp_valid_end)\n else:\n self.add_env_var('VALID_END', \"\")\n\n tmp_valid_include = self.c_dict['VALID_INCLUDE']\n if tmp_valid_include:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_include_str = str(tmp_valid_include).replace(\"\\'\", \"\\\"\")\n tmp_valid_include = ''.join(tmp_valid_include_str.strip())\n self.add_env_var('VALID_INCLUDE', tmp_valid_include)\n else:\n self.add_env_var('VALID_INCLUDE', \"[]\")\n\n tmp_valid_exclude = self.c_dict['VALID_EXCLUDE']\n if tmp_valid_exclude:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_exclude_str = str(tmp_valid_exclude).replace(\"\\'\", \"\\\"\")\n tmp_valid_exclude = ''.join(tmp_valid_exclude_str.strip())\n self.add_env_var('VALID_EXCLUDE', tmp_valid_exclude)\n else:\n self.add_env_var('VALID_EXCLUDE', \"[]\")\n\n tmp_valid_hour = self.c_dict['VALID_HOUR']\n if tmp_valid_hour:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_hour_str = str(tmp_valid_hour).replace(\"\\'\", \"\\\"\")\n tmp_valid_hour = ''.join(tmp_valid_hour_str.strip())\n self.add_env_var('VALID_HOUR', tmp_valid_hour)\n else:\n self.add_env_var('VALID_HOUR', \"[]\")\n\n tmp_lead_req = self.c_dict['LEAD_REQ']\n if tmp_lead_req:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_req_str = str(tmp_lead_req).replace(\"\\'\", \"\\\"\")\n tmp_lead_req = ''.join(tmp_lead_req_str.strip())\n self.add_env_var('LEAD_REQ', tmp_lead_req)\n else:\n self.add_env_var('LEAD_REQ', \"[]\")\n\n tmp_lead = self.c_dict['LEAD']\n if tmp_lead:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_lead_str = str(tmp_lead).replace(\"\\'\", \"\\\"\")\n tmp_lead = ''.join(tmp_lead_str.strip())\n self.add_env_var('LEAD', tmp_lead)\n else:\n self.add_env_var('LEAD', \"[]\")\n\n tmp_init_mask = self.c_dict['INIT_MASK']\n if tmp_init_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_mask_str = str(tmp_init_mask).replace(\"\\'\", \"\\\"\")\n tmp_init_mask = ''.join(tmp_init_mask_str.strip())\n self.add_env_var('INIT_MASK', tmp_init_mask)\n else:\n self.add_env_var('INIT_MASK', \"[]\")\n\n tmp_valid_mask = self.c_dict['VALID_MASK']\n if tmp_valid_mask:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_valid_mask_str = str(tmp_valid_mask).replace(\"\\'\", \"\\\"\")\n tmp_valid_mask = ''.join(tmp_valid_mask_str.strip())\n self.add_env_var('VALID_MASK', tmp_valid_mask)\n else:\n self.add_env_var('VALID_MASK', \"[]\")\n\n tmp_track_watch_warn = self.c_dict['TRACK_WATCH_WARN']\n if tmp_track_watch_warn:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_track_watch_warn_str = str(tmp_track_watch_warn).replace(\"\\'\",\n \"\\\"\")\n tmp_track_watch_warn = ''.join(tmp_track_watch_warn_str.strip())\n self.add_env_var('TRACK_WATCH_WARN', tmp_track_watch_warn)\n else:\n self.add_env_var('TRACK_WATCH_WARN', \"[]\")\n\n tmp_column_thresh_name = self.c_dict['COLUMN_THRESH_NAME']\n if tmp_column_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_name_str = str(tmp_column_thresh_name).replace(\n \"\\'\", \"\\\"\")\n tmp_column_thresh_name = ''.join(tmp_column_thresh_name_str.strip())\n self.add_env_var('COLUMN_THRESH_NAME', tmp_column_thresh_name)\n else:\n self.add_env_var('COLUMN_THRESH_NAME', \"[]\")\n\n tmp_column_thresh_val = self.c_dict['COLUMN_THRESH_VAL']\n if tmp_column_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_thresh_val_str = str(tmp_column_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_column_thresh_val = ''.join(tmp_column_thresh_val_str.strip())\n self.add_env_var('COLUMN_THRESH_VAL', tmp_column_thresh_val)\n else:\n self.add_env_var('COLUMN_THRESH_VAL', \"[]\")\n\n tmp_column_str_name = self.c_dict['COLUMN_STR_NAME']\n if tmp_column_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_name = str(tmp_column_str_name).replace(\"\\'\",\n \"\\\"\")\n tmp_column_str_name = ''.join(tmp_column_str_name.strip())\n self.add_env_var('COLUMN_STR_NAME', tmp_column_str_name)\n else:\n self.add_env_var('COLUMN_STR_NAME', \"[]\")\n\n tmp_column_str_val = self.c_dict['COLUMN_STR_VAL']\n if tmp_column_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_column_str_val_str = str(tmp_column_str_val).replace(\"\\'\", \"\\\"\")\n tmp_column_str_val = ''.join(tmp_column_str_val_str.strip())\n self.add_env_var('COLUMN_STR_VAL', tmp_column_str_val)\n else:\n self.add_env_var('COLUMN_STR_VAL', \"[]\")\n\n tmp_init_thresh_name = self.c_dict['INIT_THRESH_NAME']\n if tmp_init_thresh_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_name_str = str(tmp_init_thresh_name).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_name = ''.join(tmp_init_thresh_name_str.strip())\n\n self.add_env_var('INIT_THRESH_NAME', tmp_init_thresh_name)\n\n else:\n self.add_env_var('INIT_THRESH_NAME', \"[]\")\n\n tmp_init_thresh_val = self.c_dict['INIT_THRESH_VAL']\n if tmp_init_thresh_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_thresh_val_str = str(tmp_init_thresh_val).replace(\"\\'\",\n \"\\\"\")\n tmp_init_thresh_val = ''.join(tmp_init_thresh_val_str.strip())\n self.add_env_var('INIT_THRESH_VAL', tmp_init_thresh_val)\n else:\n self.add_env_var('INIT_THRESH_VAL', \"[]\")\n\n tmp_init_str_name = self.c_dict['INIT_STR_NAME']\n if tmp_init_str_name:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_name_str = str(tmp_init_str_name).replace(\"\\'\", \"\\\"\")\n tmp_init_str_name = ''.join(tmp_init_str_name_str.strip())\n self.add_env_var('INIT_STR_NAME', tmp_init_str_name)\n else:\n self.add_env_var('INIT_STR_NAME', \"[]\")\n\n tmp_init_str_val = self.c_dict['INIT_STR_VAL']\n if tmp_init_str_val:\n # Replace any single quotes with double quotes and remove any\n # whitespace\n tmp_init_str_val_str = str(tmp_init_str_val).replace(\"\\'\", \"\\\"\")\n tmp_init_str_val = ''.join(tmp_init_str_val_str.strip())\n self.add_env_var('INIT_STR_VAL', tmp_init_str_val)\n else:\n self.add_env_var('INIT_STR_VAL', \"[]\")\n\n # boolean values for WATER_ONLY\n if self.c_dict['WATER_ONLY']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('WATER_ONLY', flag)\n\n # boolean value for LANDFALL\n if self.c_dict['LANDFALL']:\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('LANDFALL', flag)\n\n if self.c_dict['LANDFALL_BEG']:\n self.add_env_var('LANDFALL_BEG',\n self.c_dict['LANDFALL_BEG'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_BEG', '-24')\n\n if self.c_dict['LANDFALL_END']:\n self.add_env_var('LANDFALL_END',\n self.c_dict['LANDFALL_END'])\n else:\n # Set to default\n self.add_env_var('LANDFALL_END', '00')\n\n # boolean value for MATCH_POINTS\n if self.c_dict['MATCH_POINTS'] == 'true':\n flag = \"TRUE\"\n else:\n flag = \"FALSE\"\n self.add_env_var('MATCH_POINTS', flag)\n\n if self.c_dict['CONFIG_FILE']:\n self.add_env_var('CONFIG_FILE',\n self.c_dict['CONFIG_FILE'])\n else:\n self.log_error(\n cur_filename + '|' + cur_function +\n ': no MET TC-Stat config file found. Exiting')\n sys.exit(1)\n\n jobs_list_tmp = self.c_dict['JOBS_LIST']\n if jobs_list_tmp:\n # MET is expecting a string\n jobs_list_str = '\"' + jobs_list_tmp + '\"'\n self.add_env_var('JOBS', jobs_list_str)\n else:\n self.log_error('No jobs list defined. Please check your METplus'\n 'config file. Exiting...')\n sys.exit(1)\n return 0",
"def setEnv(self, name, value=None):\n if value is None:\n try:\n value = os.environ[name]\n except KeyError:\n raise RuntimeError(\"%s does not exist in current environment\", name)\n self.environment[name] = value",
"def set_evar(var, val):\n os.environ[var] = val\n u.verbose(0, \"Setting %s to: %s\" % (var, val))"
] | [
"0.7283772",
"0.7195755",
"0.7000887",
"0.6998755",
"0.6786736",
"0.6786736",
"0.6722491",
"0.6713368",
"0.67089427",
"0.66470826",
"0.65927714",
"0.6552898",
"0.6529187",
"0.6479471",
"0.64337885",
"0.6416746",
"0.64166296",
"0.64166296",
"0.6415311",
"0.64110196",
"0.6353236",
"0.63220435",
"0.62528175",
"0.62454516",
"0.6235168",
"0.622499",
"0.6193817",
"0.617022",
"0.61514443",
"0.6122878"
] | 0.7486853 | 0 |
Check all files against the defined precommit hooks. | def lint(session):
session.install("pre-commit")
session.run("pre-commit", "run", "--all-files") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def precommit(exit=True):\n tmpdir = tempfile.mkdtemp()\n\n try:\n copy_index(tmpdir)\n\n modified = check_output(['git', 'diff', '--cached', '--name-only',\n '--diff-filter=ACMRT'])\n modified = [name.strip() for name in modified.splitlines()]\n path = os.environ['PATH']\n with pushd(tmpdir) as prevdir:\n conf = load_conf()\n # Activate the virtualenv before running checks\n if 'env' in conf:\n binpath = os.path.abspath(os.path.join(prevdir,\n conf['env']['path'],\n 'bin'))\n if binpath not in path.split(os.pathsep):\n path = binpath + os.pathsep + path\n retcode = run_checks(conf.get('hooks_all', []),\n conf.get('hooks_modified', []), modified,\n path)\n\n if exit:\n sys.exit(retcode)\n else:\n return retcode\n finally:\n shutil.rmtree(tmpdir)",
"def _run_hooks(config, repo_hooks, args, environ):\n skips = _get_skips(environ)\n cols = _compute_cols([hook for _, hook in repo_hooks], args.verbose)\n filenames = _all_filenames(args)\n filenames = _filter_by_include_exclude(filenames, '', config['exclude'])\n retval = 0\n for repo, hook in repo_hooks:\n retval |= _run_single_hook(filenames, hook, repo, args, skips, cols)\n if retval and config['fail_fast']:\n break\n if (\n retval and\n args.show_diff_on_failure and\n subprocess.call(('git', 'diff', '--quiet', '--no-ext-diff')) != 0\n ):\n print('All changes made by hooks:')\n subprocess.call(('git', 'diff', '--no-ext-diff'))\n return retval",
"def run_checks(hooks_all, hooks_modified, modified, path):\n retcode = 0\n for command in hooks_all:\n if not isinstance(command, list):\n command = shlex.split(command)\n retcode |= subprocess.call(command, env={'PATH': path})\n\n for pattern, command in hooks_modified:\n if not isinstance(command, list):\n command = shlex.split(command)\n for filename in modified:\n if not fnmatch.fnmatch(filename, pattern):\n continue\n printed_filename = False\n proc = subprocess.Popen(command + [filename],\n env={'PATH': path},\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n output = proc.communicate()[0]\n if proc.returncode != 0:\n if not printed_filename:\n print(filename)\n print('=' * len(filename))\n printed_filename = True\n print(command[0])\n print('-' * len(command[0]))\n print(output)\n retcode |= proc.returncode\n\n return retcode",
"def RunChecks(self):\n results = []\n\n affected_files = self.input_api.AffectedFiles(\n file_filter=self.file_filter, include_deletes=False)\n affected_js_files = filter(\n lambda f: f.LocalPath().endswith('.js'), affected_files)\n\n if affected_js_files:\n self.input_api.logging.info(\n 'Running appengine eslint on %d JS file(s)', len(affected_js_files))\n results += self.RunESLintChecks(affected_js_files)\n\n\n if results:\n results.append(self.output_api.PresubmitNotifyResult(\n 'See the JavaScript style guide at https://goo.gl/Ld1CqR.'))\n\n return results",
"def main(self, *directories):\n if not self.git and len(directories) == 0:\n print (\"ERROR: At least one directory must be provided (or the \"\n \"--git-precommit flag must be passed.\\n\")\n self.help()\n return\n\n if len(directories) > 0:\n find = local['find']\n files = []\n for directory in directories:\n real = os.path.expanduser(directory)\n if not os.path.exists(real):\n raise ValueError(\"{0} does not exist\".format(directory))\n files.extend(find(real, '-not', '-name', '._*', '-name', '*.py').strip().split('\\n'))\n else:\n status = local['git']('status', '--porcelain', '-uno')\n root = local['git']('rev-parse', '--show-toplevel').strip()\n\n # get all modified or added python files\n modified = re.findall(r\"^\\s[AM]\\s+(\\S+\\.py)$\", status, re.MULTILINE)\n\n # now just get the path part, which all should be relative to the\n # root\n files = [os.path.join(root, line.split(' ', 1)[-1].strip())\n for line in modified]\n\n if len(files) > 0:\n print \"Linting {0} python files.\\n\".format(len(files))\n lint(files)\n else:\n print \"No python files found to lint.\\n\"",
"def main(\n files_or_directories: Sequence[Path],\n check: bool,\n stdin: bool,\n commit: bool,\n git_hooks: bool,\n verbose: bool,\n) -> None:\n\n if git_hooks:\n from esss_fix_format.hook_utils import install_pre_commit_hook\n\n install_pre_commit_hook() # uses the current directory by default.\n return\n\n sys.exit(_main(files_or_directories, check=check, stdin=stdin, commit=commit, verbose=verbose))",
"def __gitCheckPatches(self):\n self.vcs.gitApplyCheckPatches(self.project.getProjectPath(),\n check=True)",
"def run(cls, directory: Path) -> None:\n\n if directory.is_dir() is False:\n raise Failure(f\"{directory} is not a valid directory\")\n\n logger.info(\"Running pre-commit hooks on all current files\")\n\n os.chdir(directory)\n run([cls.command, \"run\", \"--all-files\"])",
"def install_git_hooks():\n if os.path.isdir('.git'):\n src = os.path.join(\n ROOT_DIR, \"scripts\", \"internal\", \"git_pre_commit.py\")\n dst = os.path.realpath(\n os.path.join(ROOT_DIR, \".git\", \"hooks\", \"pre-commit\"))\n with open(src, \"rt\") as s:\n with open(dst, \"wt\") as d:\n d.write(s.read())",
"def pre_backup_check(repos):\n for repo in 'local', 'remote':\n repos[repo].check()\n\n # TODO: Check the ordering of this is deterministic\n most_recent_archive = repos[repo].list_archives()[-1]\n repos[repo].check_archive(most_recent_archive)",
"def __runChecks(self):\n runnedChecks = []\n runnedChecks.append(Checks.checksFilesInstalled(self.__targetPath, verbose=True))\n return all(runnedChecks)",
"def add_hooks(pre_commit: str, pre_push: str):\n\n # Detect virtualenv the hooks should use\n\n # Detect virtualenv\n virtual_env = conf.get_env('VIRTUAL_ENV')\n if virtual_env is None:\n log.err(\"You are not inside a virtualenv\")\n confirm_msg = (\n \"Are you sure you want to use global python installation \"\n \"to run your git hooks? [y/N] \"\n )\n click.prompt(confirm_msg, default='')\n if not click.confirm(confirm_msg):\n log.info(\"Cancelling\")\n return\n\n load_venv = ''\n else:\n load_venv = 'source \"{}/bin/activate\"'.format(virtual_env)\n\n commit_hook = conf.proj_path('.git/hooks/pre-commit')\n push_hook = conf.proj_path('.git/hooks/pre-push')\n\n # Write pre-commit hook\n log.info(\"Adding pre-commit hook <33>{}\", commit_hook)\n fs.write_file(commit_hook, util.remove_indent('''\n #!/bin/bash\n PATH=\"/opt/local/libexec/gnubin:$PATH\"\n \n {load_venv}\n \n {command}\n \n '''.format(load_venv=load_venv, command=pre_commit)))\n\n # Write pre-push hook\n log.info(\"Adding pre-push hook: <33>{}\", push_hook)\n fs.write_file(push_hook, util.remove_indent('''\n #!/bin/bash\n PATH=\"/opt/local/libexec/gnubin:$PATH\"\n \n {load_venv}\n \n peltak test --allow-empty\n \n {command}\n \n '''.format(load_venv=load_venv, command=pre_push)))\n\n log.info(\"Making hooks executable\")\n if not context.get('pretend', False):\n os.chmod(conf.proj_path('.git/hooks/pre-commit'), 0o755)\n os.chmod(conf.proj_path('.git/hooks/pre-push'), 0o755)",
"def test_pre_post_hooks(self):\n os.makedirs('/tmp/localhost/pacha_pre')\n os.makedirs('/tmp/localhost/pacha_post')\n pre_script = open('/tmp/localhost/pacha_pre/foo.sh', 'w')\n pre_script.write('''touch /tmp/localhost/pre_got_executed.txt''')\n pre_script.close()\n post_script = open('/tmp/localhost/pacha_post/bar.sh', 'w')\n post_script.write('''touch /tmp/localhost/post_got_executed.txt''')\n post_script.close()\n run = rebuild.Rebuild(hostname='localhost') \n run.pre_hooks()\n run.post_hooks()\n self.assertTrue(os.path.isfile('/tmp/localhost/post_got_executed.txt'))\n self.assertTrue(os.path.isfile('/tmp/localhost/pre_got_executed.txt'))",
"def test_missing_hooks_in_repo(tmp_path):\n ProjectMock(tmp_path).style(\n \"\"\"\n [[\".pre-commit-config.yaml\".repos]]\n repo = \"whatever\"\n \"\"\"\n ).pre_commit(\n \"\"\"\n repos:\n - repo: whatever\n \"\"\"\n ).api_check_then_fix(\n Fuss(False, PRE_COMMIT_CONFIG_YAML, 334, \": missing 'hooks' in repo 'whatever'\")\n )",
"def check_all():\n for name, module in sorted(sys.modules.items()): # module files\n filepath = getattr(module, '__file__', None)\n if filepath is None:\n # we land here when a module is an attribute of another module\n # i.e., it exists twice in the sys.modules table, once as its\n # canonical representation, and again having been imported\n # within another module\n continue\n filepath = filepath.endswith(\".pyc\") and filepath[:-1] or filepath\n check_one(filepath)\n\n for filepath in extras: # additional files\n check_one(filepath)",
"def _RunHooks(self, command, file_list, is_using_git):\n # Hooks only run for these command types.\n if not command in ('update', 'revert', 'runhooks'):\n return\n\n # Hooks only run when --nohooks is not specified\n if self._options.nohooks:\n return\n\n # Get any hooks from the .gclient file.\n hooks = self.GetVar(\"hooks\", [])\n # Add any hooks found in DEPS files.\n hooks.extend(self._deps_hooks)\n\n # If \"--force\" was specified, run all hooks regardless of what files have\n # changed. If the user is using git, then we don't know what files have\n # changed so we always run all hooks.\n if self._options.force or is_using_git:\n for hook_dict in hooks:\n self._RunHookAction(hook_dict, [])\n return\n\n # Run hooks on the basis of whether the files from the gclient operation\n # match each hook's pattern.\n for hook_dict in hooks:\n pattern = re.compile(hook_dict['pattern'])\n matching_file_list = [f for f in file_list if pattern.search(f)]\n if matching_file_list:\n self._RunHookAction(hook_dict, matching_file_list)",
"def test_pre_hooks(self):\n os.makedirs('/tmp/localhost/pacha_pre')\n touch_script = open('/tmp/localhost/pacha_pre/foo.sh', 'w')\n touch_script.write('''touch /tmp/localhost/pre_got_executed.txt''')\n touch_script.close()\n run = rebuild.Rebuild(hostname='localhost') \n run.pre_hooks()\n self.assertTrue(os.path.isfile('/tmp/localhost/pre_got_executed.txt'))",
"def validate_hooks(config):\n _validate_hook(config.pre_hook, \"pre\")\n _validate_hook(config.post_hook, \"post\")\n _validate_hook(config.renew_hook, \"renew\")",
"def process_hooks(self, hooks):\n try:\n enabled_hooks = self.project.HOOKS\n except AttributeError:\n return hooks",
"def test_check():\n for f in cfg.required_files:\n assert os.path.isfile(f)",
"def check_patch_files(version, cfg):\n\n logger = logging.getLogger('check_patch_files')\n\n # Patches should live in /srv/patches/[version]\n patch_path = cfg['patch_path']\n if patch_path is None:\n return\n\n version_base = os.path.join(patch_path, version)\n\n ext_dir = os.path.join(version_base, 'extensions')\n _, extensions, _ = os.walk(ext_dir).next()\n\n patches = utils.get_patches(['core'], version_base)\n patches.update(utils.get_patches(extensions, ext_dir))\n\n git_patch_check = ['/usr/bin/git', 'apply', '--check', '--reverse']\n version_dir = 'php-{}'.format(version)\n apply_dir = os.path.join(cfg['stage_dir'], version_dir)\n\n for extension, diffs in patches.iteritems():\n diff = '\\n'.join(diffs)\n\n if extension != 'core':\n apply_dir = os.path.join(apply_dir, 'extensions', extension)\n\n with utils.cd(apply_dir):\n p = subprocess.Popen(\n git_patch_check, stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n\n p.communicate(diff)\n\n if p.returncode > 0:\n logger.warn(\n 'Patch(s) for {} have not been applied.'.format(apply_dir))",
"def cppcheck_on_files(files, commit):\n cppcheck_cmd = local[\"cppcheck\"][\n \"--quiet\",\n \"-j %d\" % (multiprocessing.cpu_count() * 2),\n \"--template={file}###{line}###{severity}###{message}\"]\n\n # Each line in the output is an issue\n review = {}\n rc, out, err = cppcheck_cmd.run(filter_files(files, CPP_SOURCE_FILES),\n retcode=None)\n if len(err) > 0:\n review[\"message\"] = \"[CPPCHECK] Some issues need to be fixed.\"\n\n review[\"comments\"] = defaultdict(list)\n for c in err.split(\"\\n\"):\n if len(c.strip()) == 0: continue\n\n parts = c.split(\"###\")\n\n # Only add a comment if code was changed in the modified region\n if not line_part_of_commit(parts[0], parts[1], commit): continue\n\n review[\"comments\"][parts[0]].append({\n \"path\": parts[0],\n \"line\": parts[1],\n \"message\": \"[{0}] {1}\".format(parts[2], parts[3])\n })\n\n if len(review[\"comments\"]):\n review[\"labels\"] = {\"Code-Review\": -1}\n return json.dumps(review)\n\n # Check the return code only just now as cppcheck might still have returned\n # some valid comments.\n if rc != 0:\n review[\"message\"] = \"[CPPCHECK] Did not complete successfully: \" + out\n return json.dumps(review)\n\n # Add a review comment that no issues have been found\n review[\"message\"] = \"[CPPCHECK] No issues found. OK\"\n return json.dumps(review)",
"def test_style_missing_hooks_in_repo(tmp_path):\n ProjectMock(tmp_path).style(\n \"\"\"\n [[\".pre-commit-config.yaml\".repos]]\n repo = \"another\"\n \"\"\"\n ).pre_commit(\n \"\"\"\n repos:\n - repo: another\n hooks:\n - id: isort\n \"\"\"\n ).api_check_then_fix(\n Fuss(False, PRE_COMMIT_CONFIG_YAML, 335, \": style file is missing 'hooks' in repo 'another'\")\n )",
"def checkGit(directory):",
"def files_unchanged(self):\n\n passed = []\n failed = []\n ignored = []\n fixed = []\n could_fix = False\n\n # Check that we have the minimum required config\n required_pipeline_config = {\"manifest.name\", \"manifest.description\", \"manifest.author\"}\n missing_pipeline_config = required_pipeline_config.difference(self.nf_config)\n if missing_pipeline_config:\n return {\"ignored\": [f\"Required pipeline config not found - {missing_pipeline_config}\"]}\n try:\n prefix, short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\").split(\"/\")\n except ValueError:\n log.warning(\n \"Expected manifest.name to be in the format '<repo>/<pipeline>'. Will assume it is <pipeline> and default to repo 'nf-core'\"\n )\n short_name = self.nf_config[\"manifest.name\"].strip(\"\\\"'\")\n prefix = \"nf-core\"\n\n # NB: Should all be files, not directories\n # List of lists. Passes if any of the files in the sublist are found.\n files_exact = [\n [\".gitattributes\"],\n [\".prettierrc.yml\"],\n [\"CODE_OF_CONDUCT.md\"],\n [\"LICENSE\", \"LICENSE.md\", \"LICENCE\", \"LICENCE.md\"], # NB: British / American spelling\n [os.path.join(\".github\", \".dockstore.yml\")],\n [os.path.join(\".github\", \"CONTRIBUTING.md\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"bug_report.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"config.yml\")],\n [os.path.join(\".github\", \"ISSUE_TEMPLATE\", \"feature_request.yml\")],\n [os.path.join(\".github\", \"PULL_REQUEST_TEMPLATE.md\")],\n [os.path.join(\".github\", \"workflows\", \"branch.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting_comment.yml\")],\n [os.path.join(\".github\", \"workflows\", \"linting.yml\")],\n [os.path.join(\"assets\", \"email_template.html\")],\n [os.path.join(\"assets\", \"email_template.txt\")],\n [os.path.join(\"assets\", \"sendmail_template.txt\")],\n [os.path.join(\"assets\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_light.png\")],\n [os.path.join(\"docs\", \"images\", f\"nf-core-{short_name}_logo_dark.png\")],\n [os.path.join(\"docs\", \"README.md\")],\n [os.path.join(\"lib\", \"nfcore_external_java_deps.jar\")],\n [os.path.join(\"lib\", \"NfcoreTemplate.groovy\")],\n ]\n files_partial = [\n [\".gitignore\", \".prettierignore\", \"pyproject.toml\"],\n ]\n\n # Only show error messages from pipeline creation\n logging.getLogger(\"nf_core.create\").setLevel(logging.ERROR)\n\n # Generate a new pipeline with nf-core create that we can compare to\n tmp_dir = tempfile.mkdtemp()\n\n # Create a template.yaml file for the pipeline creation\n template_yaml = {\n \"name\": short_name,\n \"description\": self.nf_config[\"manifest.description\"].strip(\"\\\"'\"),\n \"author\": self.nf_config[\"manifest.author\"].strip(\"\\\"'\"),\n \"prefix\": prefix,\n }\n\n template_yaml_path = os.path.join(tmp_dir, \"template.yaml\")\n with open(template_yaml_path, \"w\") as fh:\n yaml.dump(template_yaml, fh, default_flow_style=False)\n\n test_pipeline_dir = os.path.join(tmp_dir, f\"{prefix}-{short_name}\")\n create_obj = nf_core.create.PipelineCreate(\n None, None, None, no_git=True, outdir=test_pipeline_dir, template_yaml_path=template_yaml_path\n )\n create_obj.init_pipeline()\n\n # Helper functions for file paths\n def _pf(file_path):\n \"\"\"Helper function - get file path for pipeline file\"\"\"\n return os.path.join(self.wf_path, file_path)\n\n def _tf(file_path):\n \"\"\"Helper function - get file path for template file\"\"\"\n return os.path.join(test_pipeline_dir, file_path)\n\n # Files that must be completely unchanged from template\n for files in files_exact:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file has an identical match\n else:\n for f in files:\n try:\n if filecmp.cmp(_pf(f), _tf(f), shallow=True):\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n shutil.copy(_tf(f), _pf(f))\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # Files that can be added to, but that must contain the template contents\n for files in files_partial:\n # Ignore if file specified in linting config\n ignore_files = self.lint_config.get(\"files_unchanged\", [])\n if any([f in ignore_files for f in files]):\n ignored.append(f\"File ignored due to lint config: {self._wrap_quotes(files)}\")\n\n # Ignore if we can't find the file\n elif not any([os.path.isfile(_pf(f)) for f in files]):\n ignored.append(f\"File does not exist: {self._wrap_quotes(files)}\")\n\n # Check that the file contains the template file contents\n else:\n for f in files:\n try:\n with open(_pf(f), \"r\") as fh:\n pipeline_file = fh.read()\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n if template_file in pipeline_file:\n passed.append(f\"`{f}` matches the template\")\n else:\n if \"files_unchanged\" in self.fix:\n # Try to fix the problem by overwriting the pipeline file\n with open(_tf(f), \"r\") as fh:\n template_file = fh.read()\n with open(_pf(f), \"w\") as fh:\n fh.write(template_file)\n passed.append(f\"`{f}` matches the template\")\n fixed.append(f\"`{f}` overwritten with template file\")\n else:\n failed.append(f\"`{f}` does not match the template\")\n could_fix = True\n except FileNotFoundError:\n pass\n\n # cleaning up temporary dir\n shutil.rmtree(tmp_dir)\n\n return {\"passed\": passed, \"failed\": failed, \"ignored\": ignored, \"fixed\": fixed, \"could_fix\": could_fix}",
"def flake8_on_files(files, commit):\n style = get_style_guide(config_file=None, quiet=False)\n\n # We need to redirect stdout while generating the JSON to avoid spilling\n # messages to the user.\n old_stdout = sys.stdout\n sys.stdout = open(\"/dev/null\", \"w\")\n review = {}\n for file in filter_files(files, (\".py\", )):\n report = style.check_files((file, ))\n if report.total_errors:\n if not \"comments\" in review:\n review[\"comments\"] = defaultdict(list)\n for line_number, offset, code, text, doc in report._deferred_print:\n if not line_part_of_commit(file, line_number, commit): continue\n review[\"comments\"][file].append({\n \"path\": file,\n \"line\": line_number,\n \"message\": \"[{0}] {1}\".format(code, text)\n })\n if \"comments\" in review and len(review[\"comments\"]):\n review[\"message\"] = \"[FLAKE8] Some issues found.\"\n else:\n review[\"message\"] = \"[FLAKE8] No issues found. OK\"\n sys.stdout = old_stdout\n return json.dumps(review)",
"def test_pylint(self):\n files_list = []\n\n for root, dirnames, filenames in os.walk(PROJECT_DIR):\n if ignore(root):\n continue\n\n for filename in fnmatch.filter(filenames, '*.py'):\n files_list.append(os.path.join(root, filename))\n\n for file in files_list:\n # (pylint_stdout, pylint_stderr) = epylint.py_run(\n # command_options=\"{} --errors-only\".format(file),\n # return_std=True)\n\n # print(pylint_stdout.getvalue())\n # print(pylint_stderr.getvalue())\n\n call([\n 'pylint',\n '--errors-only',\n file])",
"def test_pre_commit_referenced_in_style(tmp_path):\n ProjectMock(tmp_path).style(\n \"\"\"\n [\".pre-commit-config.yaml\"]\n fail_fast = true\n \"\"\"\n ).pre_commit(\"\").api_check_then_fix(Fuss(False, PRE_COMMIT_CONFIG_YAML, 331, \" doesn't have the 'repos' root key\"))",
"def test_provider_system_hook_file_shred(change_dir, clean_files):\n files = ['stuff', 'thing', 'foo']\n for f in files:\n file = open(f, \"w\")\n file.write(f)\n file.close()\n\n tackle('.', no_input=True, context_file='shred.yaml')\n\n for f in files:\n assert not os.path.isfile(f)",
"def test_provider_system_hook_file(change_dir, clean_files):\n tackle(no_input=True)\n assert 'thing.yaml' in os.listdir()\n assert 'stuff' in os.listdir()\n # If the file has been moved properly there should be only one file\n assert len(os.listdir('stuff')) == 3"
] | [
"0.6876963",
"0.68466324",
"0.67995423",
"0.6654954",
"0.6572371",
"0.6481038",
"0.64283687",
"0.63959146",
"0.6318683",
"0.63177663",
"0.6232557",
"0.6230077",
"0.6152871",
"0.6043679",
"0.60293245",
"0.6020104",
"0.6003532",
"0.59991527",
"0.5969695",
"0.59456915",
"0.581716",
"0.5817041",
"0.5797657",
"0.5751834",
"0.5745181",
"0.5737029",
"0.5723159",
"0.57103693",
"0.57101333",
"0.5708681"
] | 0.70069766 | 0 |
Fetch the large training and test data set. | def _fetch_large():
# Large training data:
resource(
target=data_path("eeg", "SMNI_CMI_TRAIN.tar.gz"),
url="https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TRAIN.tar.gz",
)
dependency(
target=data_path("eeg", "train"),
source=data_path("eeg", "SMNI_CMI_TRAIN.tar.gz"),
commands=[
"tar xzf SMNI_CMI_TRAIN.tar.gz",
"mv SMNI_CMI_TRAIN train",
"find train | grep gz$ | xargs gunzip",
],
)
# Large test data:
resource(
target=data_path("eeg", "SMNI_CMI_TEST.tar.gz"),
url="https://kdd.ics.uci.edu/databases/eeg/SMNI_CMI_TEST.tar.gz",
)
dependency(
target=data_path("eeg", "test"),
source=data_path("eeg", "SMNI_CMI_TEST.tar.gz"),
commands=[
"tar xzf SMNI_CMI_TEST.tar.gz",
"mv SMNI_CMI_TEST test",
"find test | grep gz$ | xargs gunzip",
],
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _load_training_and_test_sets(normalize):\n class_labels = []\n test_labels = []\n norm = None\n if normalize == True:\n norm = loading.get_normalize_vector()\n\n for i in range(0, 10):\n [training, test] = loading.load_number_set(i, 0.7, norm_vector=norm)\n labels = [str(i)] * training.shape[0]\n tlabels = [str(i)] * test.shape[0]\n if i == 0:\n train_points = training\n test_points = test\n else:\n train_points = np.concatenate((train_points, training), axis = 0)\n test_points = np.concatenate((test_points, test), axis = 0)\n class_labels.extend(labels)\n test_labels.extend(tlabels)\n\n return train_points, test_points, class_labels, test_labels",
"def load_occupancy_dataset(trainsize=500, testsize=1000):\n filename = 'datasets/numericsequence.csv'\n dataset = loadcsv(filename)\n trainset, testset = splitdataset(dataset, trainsize, testsize)\n return trainset, testset",
"def load_data():\n data = gzip.open(\"mnist.pkl.gz\", \"rb\")\n train_set, valid_set, test_set = cPickle.load(data)\n data.close()\n\n # Combine validation and train folds to recreate the master 60k set.\n new_images = numpy.concatenate((train_set[0], valid_set[0]))\n new_labels = numpy.concatenate((train_set[1], valid_set[1]))\n\n train_set = (new_images, new_labels)\n \n return (train_set, test_set)",
"def load_training_data(config):\n # Load data\n LOGGER.info(\"Loading training data.\")\n train_x = load_data(config['data_source'], config['train_x_filename'])\n train_y = load_data(config['data_source'], config['train_y_filename'])\n val_x = load_data(config['data_source'], config['val_x_filename'])\n val_y = load_data(config['data_source'], config['val_y_filename'])\n LOGGER.info(\"Training data size: %d\", len(train_x))\n LOGGER.info(\"Validation data size: %d\", len(val_x))\n\n # Build datasets and create iterators\n LOGGER.info(\"Building dataset.\")\n train_dataset = get_dataset(\n train_x, train_y, config['batch_size'], config['data_shape'],\n config['n_classes'], True)\n val_dataset = get_dataset(\n val_x, val_y, config['batch_size'], config['data_shape'],\n config['n_classes'])\n\n return train_dataset, val_dataset, len(val_x)",
"def data_set_maker():\n\n # crate a folder in your code directory and name it: \"files\". put the .npy files iside that folder\n\n x_all = np.load(path + '/files/tinyX.npy', 'r') # reads the input file\n y_all = np.load(path + '/files/tinyY.npy', 'r') # reads the input file\n\n # split the data into 10% validation-set and 90% training set\n raw_train, raw_valid, y_train, y_valid = train_test_split(x_all, y_all, test_size=0.2, random_state=43)\n return raw_train, raw_valid, y_train, y_valid",
"def get_dldata(filepath, dlTrainCorpusPath, dlTestCorpusPath, seed=2018, batch_size=16):\r\n\tf = open(\"record/synthetic and academic datasets/testcases_train.pkl\",'rb') #get the testcase ids of train sets and test sets\r\n\ttestcases += pickle.load(f) \r\n\tf.close()\r\n\r\n\tf = open(\"record/synthetic and academic datasets/testcases_test.pkl\",'rb')\r\n\ttestcases += pickle.load(f)\r\n\tf.close()\r\n\t\r\n print(\"produce train dataset...\") \r\n N = 6\r\n num = list(range(N))\r\n for i in num:\r\n train_set = [[], [], [], [], [], []]\r\n for folder_train in folders_train[int(i*len(folders_train)/N) : int((i+1)*len(folders_train)/N)]:\r\n if not folder_train in os.listdir(filepath):\r\n continue\r\n print(\"\\r\"+str(folder_train), end='')\r\n for filename in os.listdir(os.path.join(filepath, folder_train)):\r\n f = open(filepath + folder_train + '/' + filename, 'rb')\r\n data = pickle.load(f)\r\n f.close()\r\n if len(data[0][0]) > MAXLEN:\r\n data[2] = [x for x in data[2] if x <= MAXLEN]\r\n data[0] = cutdata(data[0][0])\r\n if data[0] == None:\r\n continue \r\n for n in range(len(data)):\r\n train_set[n].append(data[n])\r\n train_set[-1].append(folder_train+\"/\"+filename)\r\n f_train = open(dlTrainCorpusPath + \"train_\" + str(i)+ \"_0818.pkl\", 'wb')\r\n pickle.dump(train_set, f_train)\r\n f_train.close()\r\n\r\n del train_set \r\n gc.collect() \r\n\r\n print(\"\\nproduce test dataset...\")\r\n N = 6\r\n num = list(range(N))\r\n for i in num:\r\n test_set = [[], [], [], [], [], []]\r\n for folder_test in folders_test[int(i*len(folders_test)/N) : int((i+1)*len(folders_test)/N)]:\r\n if not folder_test in os.listdir(filepath):\r\n continue\r\n print(\"\\r\"+str(folder_test), end='')\r\n for filename in os.listdir(os.path.join(filepath, folder_test)):\r\n f = open(filepath + folder_test + '/' + filename, 'rb')\r\n data = pickle.load(f)\r\n f.close()\r\n if len(data[0][0]) > MAXLEN:\r\n data[2] = [x for x in data[2] if x <= MAXLEN]\r\n data[0] = cutdata(data[0][0])\r\n if data[0] == None:\r\n continue \r\n for n in range(len(data)):\r\n test_set[n].append(data[n])\r\n test_set[-1].append(folder_test+\"/\"+filename)\r\n \r\n f_test = open(dlTestCorpusPath + \"test_\" + str(i)+ \"_0124.pkl\", 'wb')\r\n pickle.dump(test_set, f_test)\r\n f_test.close()\r\n\r\n del test_set\r\n gc.collect()\r\n return",
"def get_data(dataset, max_train_size=None, max_test_size=None, do_preprocess=True, train_start=0,\n test_start=0, prefix=\"processed\", x_dims=None):\n if max_train_size is None:\n train_end = None\n else:\n train_end = train_start + max_train_size\n if max_test_size is None:\n test_end = None\n else:\n test_end = test_start + max_test_size\n print('load data of:', dataset)\n print(\"train: \", train_start, train_end)\n print(\"test: \", test_start, test_end)\n if x_dims is None:\n x_dim = get_data_dim(dataset)\n else:\n x_dim = x_dims\n f = open(os.path.join(prefix, dataset + '_train.pkl'), \"rb\")\n train_data = pickle.load(f).reshape((-1, x_dim))[train_start:train_end, :]\n f.close()\n try:\n f = open(os.path.join(prefix, dataset + '_test.pkl'), \"rb\")\n test_data = pickle.load(f).reshape((-1, x_dim))[test_start:test_end, :]\n f.close()\n except (KeyError, FileNotFoundError):\n test_data = None\n try:\n f = open(os.path.join(prefix, dataset + \"_test_label.pkl\"), \"rb\")\n test_label = pickle.load(f).reshape((-1))[test_start:test_end]\n f.close()\n except (KeyError, FileNotFoundError):\n test_label = None\n if do_preprocess:\n train_data, test_data = preprocess(train_data, test_data)\n print(\"train set shape: \", train_data.shape)\n print(\"test set shape: \", test_data.shape)\n if test_label is not None:\n print(\"test label shape: \", test_label.shape)\n print()\n return (train_data, None), (test_data, test_label)",
"def load_train_test_transactions(train_size=0.7):\n X, y = features_target_split()\n X_train, X_test, y_train, y_test = train_test_split(X,y,train_size=train_size, random_state=7)\n print('\\nTraining and testing data creation successful\\n')\n return X_train, X_test, y_train,y_test",
"def _load_training_data(self):\n self._save_training_data()",
"def download_train_test_set(save_folder):\n df = extract_notes(os.environ[\"DB_CONFIG\"])\n train_df, test_df = split_df(df)\n\n # if save folder does not exist, create it\n if not os.path.exists(save_folder):\n os.makedirs(save_folder)\n\n # save train_df\n save_data(train_df, save_folder, \"training_mimic.jsonl\")\n\n # save test_df\n save_data(test_df, save_folder, \"testing_mimic.jsonl\")",
"def load_datasets():\n from .dataset import num_classes, image_size\n\n train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)\n\n train_folders = maybe_extract(train_filename)\n test_folders = maybe_extract(test_filename)\n if not (len(train_folders) == len(test_folders) == num_classes):\n raise Exception('Expected %d folders, one per class. Found %d and %d instead.' % (\n num_classes, len(train_folders), len(test_folders)))\n print(\"Dataset folders: %s, %s\" % (train_folders, test_folders))\n\n # load datasets\n train_datasets = maybe_pickle(train_folders, 45000, image_size)\n test_datasets = maybe_pickle(test_folders, 1800, image_size)\n\n return train_datasets, test_datasets",
"def Train_data():\n print (\"loading train data ...\")\n time_start = time.time()\n data_root = '/media/keziwen/86AA9651AA963E1D'\n with h5py.File(join(data_root, './data/train_real2.h5')) as f:\n data_real = f['train_real'][:]\n num, nt, ny, nx = data_real.shape\n data_real = np.transpose(data_real, (0, 1, 3, 2))\n with h5py.File(join(data_root, './data/train_imag2.h5')) as f:\n data_imag = f['train_imag'][:]\n num, nt, ny, nx = data_imag.shape\n data_imag = np.transpose(data_imag, (0, 1, 3, 2))\n data = data_real+1j*data_imag\n num_train = 15000\n num_validate = 2000\n train_data = data[0:num_train]\n validate_data = data[num_train:num_train+num_validate]\n\n train_data = np.random.permutation(train_data)\n\n time_end = time.time()\n print ('dataset has been created using {}s'.format(time_end-time_start))\n return train_data, validate_data",
"def load_data(self):\n with open('data/fordTrain.csv') as f:\n data = csv.reader(f, delimiter=',')\n train = [x for i, x in enumerate(data) if i > 0] \n # Extract features and target variable separately\n trainx = [x[3:] for x in train]\n trainy = [x[2] for x in train]\n\n with open('data/fordTest.csv') as f:\n data = csv.reader(f, delimiter=',')\n testx = [x[3:] for i, x in enumerate(data) if i > 0] \n\n with open('data/Solution.csv') as f:\n data = csv.reader(f, delimiter=',')\n testy = [x[2] for i, x in enumerate(data) if i > 0] \n\n # Extract features and target variable, convert to numpy array\n trainx = np.asarray(trainx, dtype=np.float32)\n trainy = np.asarray(trainy, dtype=np.int8)\n testx = np.asarray(testx, dtype=np.float32)\n testy = np.asarray(testy, dtype=np.int8)\n\n # Return training and test sets\n trainSet = Dataset(trainx, trainy)\n testSet = Dataset(testx, testy)\n return trainSet, testSet",
"def fetch_test_batch(self):\n data = self.data\n # size of train dataset\n num_train = data['train'].shape[0]\n image_size = self.image_size\n # index of test image that is being classified in this batch\n batch_index = self.test_batch_index\n\n # create batch array\n X = np.zeros([2 * num_train, image_size[0], image_size[1]], dtype='uint8')\n # first half are all training images\n X[:num_train, ...] = data['train']\n # second half is copy of a batch_index-th test image to be classified\n X[num_train:, ...] = data['test'][batch_index, ...]\n # true label is extracted from array of indexes where particular class start\n test_label = np.argmax(self.starts['test']>batch_index) - 1\n\n # rescale intensities and center\n X = X / 255.0\n X = X - self.mean_train\n\n X = X[:, np.newaxis]\n X = X.astype(\"float32\")\n\n self.test_batch_index += 1\n\n X = Variable(torch.from_numpy(X)).view(2 * num_train, self.image_size[0], self.image_size[1])\n\n # stack batch by second axis to [batch size, 2 (pair to be compared), image height, image width]\n X1 = X[:num_train] # (B, h, w)\n X2 = X[num_train:] # (B, h, w)\n\n X = torch.stack([X1, X2], dim=1) # (B, 2, h, w)\n\n if use_cuda:\n X = X.cuda()\n # using test dataset size and current index for controlling test loop in test_model.py\n return X, test_label, data['test'].shape[0], self.test_batch_index",
"def run_train_test_split():\n # Load all documents\n conn = sq.connect(config.DB_FILE)\n documents = pd.read_sql_query('select pubmed_id, review_id, included, title, abstract from article ', conn)\n\n # Identify unique review IDs\n review_ids = documents['review_id'].unique()\n\n # Set seed for random sampling\n np.random.seed(2)\n\n # List of Reviews in the partial data set and full data set\n partial_set = list(np.random.choice(review_ids, 10, replace=False))\n full_set = list(review_ids.copy())\n\n # Load array (X) and labels (Y) of all documents\n with (open(config.DOC_TERM_MATRIX, \"rb\")) as openfile:\n X = pickle.load(openfile)\n\n y = documents['included']\n\n # Train-test split of the partial dataset\n train_test_split(X, y, partial_set, 'min_max', 'partial', review_ids)\n train_test_split(X, y, partial_set, 'tf_idf', 'partial', review_ids)\n\n # Train-test split of the full dataset\n train_test_split(X, y, full_set, 'min_max', 'full', review_ids)\n train_test_split(X, y, full_set, 'tf_idf', 'full', review_ids)",
"def load_data():\n t = time()\n print 'loading tweets, please wait...'\n trained_tweets = load_tweets('training_dataset')\n eval_tweets = load_tweets('evaluation_dataset')\n print 'Time taken {}'.format(time() - t)\n t = time()\n print 'loading w2v model, please wait...'\n model = w2v_load_model('GoogleNews-vectors-negative300.bin')\n print 'Time taken {}'.format(time() - t)\n return trained_tweets, eval_tweets, model",
"def load_training_set():\n global training_set\n f = gzip.open('mnist.pkl.gz', 'rb')\n train, valid, test = cPickle.load(f)\n [training_set, training_labels] = train\n [validation_set, validation_labels] = valid\n [testing_set, testing_labels] = test\n training_set = np.concatenate((training_set, validation_set))\n f.close()\n np.random.shuffle(training_set)",
"def get_data(train_path,\n test_path,\n tokenize='spacy',\n max_vocab_size=25000,\n train_valid_split=0.8,\n toy=False):\n train_data = pd.read_csv(train_path)\n test_data = pd.read_csv(test_path)\n\n if toy:\n train_data = train_data.head(100)\n test_data = test_data.head(100)\n\n train_data, test_data = pre_process_df(train_data, test_data)\n\n train_data_path = \"train_processed.csv\"\n test_data_path = \"test_processed.csv\"\n\n train_data.to_csv(train_data_path, header=False, index=False)\n test_data.to_csv(test_data_path, header=False, index=False)\n\n if tokenize == 'spacy':\n TEXT = data.Field(tokenize=tokenize)\n else:\n TEXT = data.Field()\n\n LABEL = data.LabelField(dtype=torch.float)\n train = data.TabularDataset(path=train_data_path,\n format=\"csv\",\n fields=[('text', TEXT),\n ('label', LABEL)])\n test = data.TabularDataset(path=test_data_path,\n format=\"csv\",\n fields=[('text', TEXT),\n ('label', LABEL)])\n\n os.remove(train_data_path)\n os.remove(test_data_path)\n\n train, valid = train.split(train_valid_split)\n\n TEXT.build_vocab(train, max_size=max_vocab_size)\n LABEL.build_vocab(train)\n\n return TEXT, LABEL, train, valid, test",
"def test_cached_dataloader(self):\n\n v = [\"data\", \"target\", \"model_out_sqnet\"]\n\n for data, target in self.train_loader:\n b, c, h, w = data[v[0]].shape\n assert data[v[1]].shape == (b, )\n assert data[v[2]].shape == (b, 100)\n assert data[v[1]].shape == target.shape",
"def test_query_train_jobs_with_large_offset(self, client):\n params = dict(offset=10000, limit=10)\n url = get_url(BASE_URL, params)\n response = client.get(url)\n result = response.get_json()\n assert result.get('total') == SUMMARY_DIR_NUM\n assert len(result.get('train_jobs')) == min(\n max(0, SUMMARY_DIR_NUM - 1000), 10)",
"def load_dataset():\n temp = gzip.open('mnist.pkl.gz')\n train, val , test = pickle.load(temp,encoding='latin1')\n temp.close()\n train_inp = [np.reshape(x, (784,1)) for x in train[0]]\n train_outp = [one_hot(y) for y in train[1]]\n training_data = zip(train_inp, train_outp)\n validation_inp = [np.reshape(x, (784, 1)) for x in val[0]]\n validation_data = zip(validation_inp, val[1])\n test_inp = [np.reshape(x, (784, 1)) for x in test[0]]\n test_data = zip(test_inp, test[1])\n return (training_data,validation_data,test_data)",
"def get_training_and_testing_sets(data, Y):\r\n data = pd.concat([data, Y], axis=1)\r\n x,y=data.shape\r\n train_X_sub1=data[0:x//6]\r\n dev_X_sub1 = data[x//6:x//6 + x//12]\r\n test_X_sub1 = data[x//6 + x//12:x//3]\r\n\r\n train_X_sub2 = data[x//3:x//3+x//6]\r\n dev_X_sub2 = data[x//6 + x//3:x//3 + x//6 + x//12]\r\n test_X_sub2 = data[x//3 + x//6 + x//12:2*x//3]\r\n\r\n train_X_sub3 = data[2*x//3:(2*x//3) +x//6]\r\n dev_X_sub3 = data[x//6 + 2*x//3: (2*x//3) + x//6 + x//12]\r\n test_X_sub3 = data[2*x//3 + x//6 + x//12:x]\r\n\r\n train_X=train_X_sub1.append(train_X_sub2,ignore_index = True)\r\n train_X =train_X.append(train_X_sub3,ignore_index = True)\r\n dev_X= dev_X_sub1.append(dev_X_sub2,ignore_index = True)\r\n dev_X = dev_X.append(dev_X_sub3,ignore_index = True)\r\n test_X = test_X_sub1.append(test_X_sub2,ignore_index = True)\r\n test_X = test_X.append(test_X_sub3,ignore_index = True)\r\n\r\n\r\n train_X = util.shuffle(train_X)\r\n train_X = train_X.reset_index(drop=True)\r\n\r\n dev_X = util.shuffle(dev_X)\r\n dev_X = dev_X.reset_index(drop=True)\r\n\r\n test_X = util.shuffle(test_X)\r\n test_X = test_X.reset_index(drop=True)\r\n\r\n train_X_final=train_X\r\n dev_X_final = dev_X\r\n test_X_final = test_X\r\n x, y = train_X_final.shape\r\n train_X = train_X_final.iloc[:, 0:y - 1]\r\n train_Y = train_X_final.iloc[:, y - 1]\r\n\r\n x, y = test_X_final.shape\r\n test_X = test_X_final.iloc[:, 0:y - 1]\r\n test_Y = test_X_final.iloc[:, y - 1]\r\n\r\n x, y = dev_X_final.shape\r\n dev_X = dev_X_final.iloc[:, 0:y - 1]\r\n dev_Y = dev_X_final.iloc[:, y - 1]\r\n\r\n return train_X, train_Y, dev_X,dev_Y,test_X, test_Y",
"def train(self, force=False):\n return self._fetch_base_data(force)",
"def load_all(): \n training_data = dict() \n for i in range(7):\n training_data[i+1] = load_data(i+1) \n\n return training_data",
"def load_data(self):\n\n self._load_train_data()\n self._load_test_data()",
"def build_data_set(self):\n if not self.assert_data_correct():\n self.download_all_data()\n self.unpack_rename_data()\n self.split_data_characters()\n self.clean_data_fragments()\n self.create_font_data()\n if not self.assert_train_augmented():\n self.augment_train_data()\n if not self.assert_style_data_correct():\n self.download_style_data()\n self.unpack_rename_data()",
"def load_all(test_num=100):\n\ttrain_data = pd.read_csv(\n\t\tconfig.train_rating, \n\t\tsep='\\t', header=None, names=['user', 'item'], \n\t\tusecols=[0, 1], dtype={0: np.int32, 1: np.int32})\n\n\tuser_num = train_data['user'].max() + 1\n\titem_num = train_data['item'].max() + 1\n\n\ttrain_data = train_data.values.tolist()\n\n\t# load ratings as a dok matrix\n\ttrain_mat = sp.dok_matrix((user_num, item_num), dtype=np.float32)\n\tfor x in train_data:\n\t\ttrain_mat[x[0], x[1]] = 1.0\n\n\ttest_data = []\n\twith open(config.test_negative, 'r') as fd:\n\t\tline = fd.readline()\n\t\twhile line != None and line != '':\n\t\t\tarr = line.split('\\t')\n\t\t\tu = eval(arr[0])[0]\n\t\t\ttest_data.append([u, eval(arr[0])[1]])\n\t\t\tfor i in arr[1:]:\n\t\t\t\ttest_data.append([u, int(i)])\n\t\t\tline = fd.readline()\n\treturn train_data, test_data, user_num, item_num, train_mat",
"def read_data(train_data_dir, test_data_dir):\r\n train_clients, train_data = read_dir(train_data_dir)\r\n test_clients, test_data = read_dir(test_data_dir)\r\n # 可能clients读入的顺序不一样\r\n assert train_clients.sort() == test_clients.sort()\r\n\r\n return train_clients, train_data, test_data",
"def load_datasets(self):\n if self.processed_extension == '.npz':\n logger.info(f'Loading sets from npz:')\n \n logger.info(f'train: {self.train_path}')\n self.train_data = sparse.load_npz(self.train_path)\n\n logger.info(f'val: {self.val_path}')\n self.val_data = sparse.load_npz(self.val_path)\n\n logger.info(f'test: {self.test_path}')\n self.test_data = sparse.load_npz(self.test_path)\n \n # Split x and y\n self.train_data = [sparse.lil_matrix(sparse.csr_matrix(self.train_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.train_data)[:,-1])]\n \n self.val_data = [sparse.lil_matrix(sparse.csr_matrix(self.val_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.val_data)[:,-1])]\n \n self.test_data = [sparse.lil_matrix(sparse.csr_matrix(self.test_data)[:,:-1]),\n sparse.lil_matrix(sparse.csr_matrix(self.test_data)[:,-1])]\n \n elif self.processed_extension == '.csv':\n logger.info(f'Loading sets from csv:')\n \n logger.info(f'train: {self.train_path}')\n self.train_data = pd.read_csv(self.train_path)\n train_cols = self.train_data.columns\n self.train_data = [self.train_data[train_cols.difference(['TARGET'])],\n self.train_data['TARGET']]\n \n logger.info(f'val: {self.val_path}')\n self.val_data = pd.read_csv(self.val_path)\n self.val_data = [self.val_data[train_cols.difference(['TARGET'])],\n self.val_data['TARGET']]\n \n logger.info(f'test: {self.test_path}')\n self.test_data = pd.read_csv(self.test_path)\n self.test_data = [self.test_data[train_cols.difference(['TARGET'])],\n self.test_data['TARGET']]\n else:\n raise AttributeError(f'Wrong extension: {self.processed_extension}')\n self.n_train = self.train_data[0].shape[0]\n self.n_val = self.val_data[0].shape[0]\n self.n_test = self.test_data[0].shape[0]\n self.input_size = self.train_data[0].shape[1]\n self.n_examples = self.n_train + self.n_val + self.n_test\n \n logger.info(f'Set sizes:')\n logger.info(f'train: {self.n_train}')\n logger.info(f'val: {self.n_val}')\n logger.info(f'test: {self.n_test}')",
"def load_bottleneck_data(training_file, validation_file):\n print(\"Training file\", training_file)\n print(\"Validation file\", validation_file)\n\n with open(training_file, 'rb') as f:\n train_data = pickle.load(f)\n with open(validation_file, 'rb') as f:\n validation_data = pickle.load(f)\n\n X_train = train_data['features']\n y_train = train_data['labels']\n X_val = validation_data['features']\n y_val = validation_data['labels']\n\n return X_train, y_train, X_val, y_val"
] | [
"0.6598382",
"0.6573951",
"0.6541831",
"0.6520022",
"0.64941293",
"0.6416177",
"0.6404021",
"0.64026076",
"0.63473725",
"0.6340111",
"0.6321149",
"0.6319699",
"0.63178426",
"0.6309703",
"0.6307903",
"0.6301534",
"0.6286118",
"0.62821674",
"0.6274003",
"0.6271432",
"0.62667745",
"0.62584513",
"0.62481356",
"0.62475663",
"0.6244308",
"0.6243212",
"0.6228894",
"0.6226913",
"0.6226353",
"0.6195496"
] | 0.7176988 | 0 |
This function is used to create the container in Phantom using finding data. | def _create_container(self, finding):
container_dict = {}
container_dict['name'] = finding['Title']
container_dict['source_data_identifier'] = finding['Id']
container_dict['description'] = finding['Description']
container_creation_status, container_creation_msg, container_id = self.save_container(container=container_dict)
if phantom.is_fail(container_creation_status):
self.debug_print(container_creation_msg)
self.save_progress('Error while creating container for finding {finding_id}. '
'{error_message}'.format(finding_id=finding['Id'],
error_message=container_creation_msg))
return None
return container_id | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_create_container(self):\n pass",
"def createContainer(tag, data={}): #@NoSelf",
"def container_factory(self, name):",
"def container_factory(self, name):",
"def container_factory(self, name):",
"def container_factory(self, name):",
"def container_factory(self, name):",
"def __create__container(self):\n self.__used_containers.append(contenedor.Arena(self.__blocks_size))",
"def factory(container, name, factory):",
"def test_create(self):\n\n cont_num = len(CLIENT.containers_list)\n\n message = {\"method\": \"create\",\n \"params\": {\"elem\": self.tag_image}}\n response = yield self._get_response(message)\n\n self.assertIsInstance(response, dict)\n self.assertEqual(response[\"method\"], \"create\")\n self.assertIsInstance(response[\"result\"], list)\n self.assertEqual(len(response[\"result\"]), cont_num + 1)",
"def _create(self, variables):\n required_vars = ['container']\n variables_dict = self._get_vars(variables, required=required_vars)\n\n container_name = variables_dict.pop('container')\n container_data = self._create_container(container_name=container_name)\n\n if not container_data:\n container_data = self.swift.head_container(container_name)\n\n return self._facts(facts=[container_data])",
"def test_get_container(self):\n pass",
"def _newcontainer(self, siginfo):\n pass",
"def __init__(self, container):\r\n self.container = container",
"def __init__(self, container):\r\n self.container = container",
"def _process_createContainer(self, data):\r\n try:\r\n self._avatar.createContainer(data['containerTag'],\r\n data.get('containerData', {}))\r\n except KeyError as e:\r\n raise InvalidRequest(\"Can not process 'CreateContainer' request. \"\r\n 'Missing key: {0}'.format(e))",
"def create_container(self, **kwargs):\n story_host = self.get_client(delegate_login=True).story.api_client.external_root_url\n source_url = \"{0}/cache/{1}\".format(story_host, self.nonce)\n empty_parent_div = lxml.html.Element(\"div\", {\n 'class': 'empty-parent bg-light',\n 'style': 'height: 100%; width: 100%, display: block; text-align: left;'\n })\n frame = lxml.html.Element(\"iframe\", {\n 'src': source_url,\n 'frameborder': \"0\",\n 'scrolling': \"auto\",\n 'class': 'd3-responsive-frame',\n 'style': 'max-height: none; max-width: none; height:100%; width: 100%;',\n 'sandbox': 'allow-scripts allow-same-origin'\n })\n empty_parent_div.append(frame)\n return lxml.html.tostring(empty_parent_div).decode('utf-8')",
"def remote_createContainer(self, uid, data):\r\n try:\r\n nr = self._nrs.pop()\r\n except KeyError:\r\n raise MaxNumberExceeded('Can not manage any additional container.')\r\n\r\n container = RCEContainer(self, nr, uid, data)\r\n return container.start().addCallback(lambda _: container)",
"def createBeamspotFinder(config=jobConfig, containerName = \"VxPrimaryCandidate\",suffix=\"\"):\n import AthenaCommon.CfgMgr as CfgMgr\n from AthenaCommon.AppMgr import ToolSvc\n from AthenaCommon.AlgSequence import AlgSequence\n topSequence = AlgSequence()\n\n # Extra options that may not be in default jobConfig\n\n if not 'MinVertexProb' in config:\n config['MinVertexProb'] = 0.01\n if not 'MaxVtxChi2' in config:\n config['MaxVtxChi2'] = 100 \n\n if not 'FixParK' in config:\n config['FixParK'] = False\n\n if not 'MaxSigmaTr' in config:\n config['MaxSigmaTr'] = 100.\n if not 'MaxVtxErrTr' in config:\n config['MaxVtxErrTr'] = 100.\n if not 'OutlierChi2Tr' in config:\n config['OutlierChi2Tr'] = 50.\n\n \n InDetBeamSpotVertex = CfgMgr.InDet__InDetBeamSpotVertex(name= 'InDetBeamSpotVertex_'+containerName+suffix,\n VertexContainer = containerName,\n VertexTypes = config['VertexTypes'],\n MinTracksPerVtx = config['MinTracksPerVtx'], \n MinVtxNum = config['MinVtxNum'],\n MaxOutlierLoops = 30,\n OutlierMaxRejection = 30,\n OutlierWidthFail= 5.1e-3, # in mm\n OutlierRhoFail = 0.8,\n DoHists = doVertexHists,\n OutputLevel = min(INFO,config['outputlevel']),\n VertexTreeName = \"Vertices_\"+containerName+suffix,\n MinVertexProb = config['MinVertexProb'],\n MaxVtxChi2 = config['MaxVtxChi2'],\n MaxSigmaTr = config['MaxSigmaTr'] ,\n MaxVtxErrTr = config['MaxVtxErrTr'] ,\n OutlierChi2Tr = config['OutlierChi2Tr']\n )\n ToolSvc += InDetBeamSpotVertex\n # Will be automatically printed as part of InDetBeamSpotFinder printout\n # print ToolSvc.InDetBeamSpotVertex\n \n # from InDetBeamSpotFinder.InDetBeamSpotFinderConf import InDet__InDetBeamSpotDbWriterTool\n InDetBeamSpotDbWriterTool = CfgMgr.InDet__InDetBeamSpotDbWriterTool(name = 'InDetBeamSpotDbWriterTool_'+containerName+suffix,\n OutputLevel = min(INFO,config['outputlevel']),\n TreeName = \"COOLBeamspot_\"+containerName+suffix,\n Tag = containerName+suffix\n )\n ToolSvc += InDetBeamSpotDbWriterTool\n print ToolSvc.InDetBeamSpotDbWriterTool\n \n #from InDetBeamSpotFinder.InDetBeamSpotFinderConf import InDet__InDetBeamSpotFinder as InDetBeamSpotFinder\n topSequence += CfgMgr.InDet__InDetBeamSpotFinder(name = 'InDetBeamSpotFinder_'+containerName+suffix,\n BeamSpotTool = InDetBeamSpotVertex,\n BeamSpotWriterTool = InDetBeamSpotDbWriterTool,\n MaxCount = config['MaxCount'],\n LumiRange = config['LumiRange'],\n LumiBlockRanges = config['LumiBlockRanges'],\n RunRange = config['RunRange'],\n EventRange = config['EventRange'],\n #ForceRunNumber = 52280,\n DoHists = doBeamspotHist,\n WriteDb = False,\n UseDefaultValues = True,\n #WriteFailed = True,\n Default_SigmaX = 30.0,\n Default_SigmaY = 30.0,\n Default_SigmaZ = 500.0,\n Default_SigmaXY = 0.0,\n OutputLevel = min(INFO,config['outputlevel']),\n BeamSpotRootName = \"Beamspots_\"+containerName+suffix\n )\n try:\n topSequence.InDetBeamSpotFinder.UseLBFromViewed = config['UseLBFromViewed']\n topSequence.InDetBeamSpotFinder.UseLBFromAccepted = config['UseLBFromAccepted']\n except:\n print 'ERROR: You are using an older version of InDetBeamSpotFinder - please update to InDetBeamSpotFinder-01-00-29 or later'\n print topSequence.InDetBeamSpotFinder",
"def create_page_objects(self, data):\n for page in data['pages']:\n self.create_page(page)",
"def create_container(cls, values):\n dbdriver = get_instance()\n return dbdriver.create_container(values)",
"def __init__(self, jinja2_env, info):\n super(Finding, self).__init__(jinja2_env)\n self._info = info",
"def create():",
"def create():",
"def test_show_container(self):\n pass",
"def test_index_containers(self):\n pass",
"def CreateDataContainer(name):\n dc = simpl.DataContainer.New(name)\n return dc",
"def __init__(self, data, url, *args, **kwargs):\n super(RabjContainer, self).__init__()\n self.data = data\n self.url = url\n self.container_factory = RabjContainerFactory(self.url)",
"def generate_phantom(name='PhantomGeneration'):\n inputnode = pe.Node(niu.IdentityInterface(\n fields=['shape', 'hi_matrix', 'lo_matrix', 'snr', 'cortex',\n 'grid_size', 'repetition_id']),\n name='inputnode')\n\n out_lowres = pe.Node(niu.IdentityInterface(\n fields=['out_signal', 'out_mask', 'out_tpms', 'out_surfs',\n 'out_field', 'out_coeff', 'grid_size']),\n name='out_lowres')\n\n out_hires = pe.Node(niu.IdentityInterface(\n fields=['out_signal', 'out_mask', 'out_tpms', 'out_surfs',\n 'out_field', 'out_coeff', 'grid_size']),\n name='out_hires')\n\n refnode = pe.Node(niu.IdentityInterface(\n fields=['out_signal', 'out_mask', 'out_tpms', 'out_surfs']),\n name='refnode')\n\n model = pe.Node(pip.Phantom(), name='GenerateModel')\n split = pe.Node(Split(), name='Split')\n selm0 = pe.Node(niu.Split(splits=[1, 2], squeeze=True),\n name='SepModel0')\n selm1 = pe.Node(niu.Split(splits=[1, 1, 1], squeeze=True),\n name='SepModel1')\n signal0 = pe.Node(pip.SimulateSMRI(), name='Simulate0')\n merge0 = pe.Node(niu.Merge(2), name='SimMerge0')\n\n surf0 = extract_surface('GenSurf0')\n surf0.inputs.inputnode.labels = [1]\n surf0.inputs.inputnode.name = '00.white'\n surf1 = extract_surface('GenSurf1')\n surf1.inputs.inputnode.labels = [1]\n surf1.inputs.inputnode.name = '01.pial'\n msurf = pe.Node(niu.Merge(2), name='MergeSurfs')\n\n down = pe.Node(fs.MRIConvert(), name='Downsample')\n\n dist = bspline_deform(n_tissues=0)\n\n surf2vol0 = pe.Node(Surf2Vol(), name='Surf2Volume_HR')\n surf2vol1 = pe.Node(Surf2Vol(), name='Surf2Volume_LR')\n norm0 = pe.Node(Normalize(), name='NormalizeTPMs_HR')\n norm1 = pe.Node(Normalize(), name='NormalizeTPMs_LR')\n\n tpmmsk0 = pe.Node(niu.Split(splits=[2, 1, 1]), name='TPMsSplit_HR')\n tpmmsk1 = pe.Node(niu.Split(splits=[2, 1, 1]), name='TPMsSplit_LR')\n\n msk0 = pe.Node(niu.Function(function=_bin_n_msk, input_names=['in_files'],\n output_names=['out_file']), name='binNmsk_HR')\n msk1 = pe.Node(niu.Function(function=_bin_n_msk, input_names=['in_files'],\n output_names=['out_file']), name='binNmsk_LR')\n\n selt0 = pe.Node(niu.Split(splits=[1, 1, 1, 1], squeeze=True),\n name='SeparateTissue_HR')\n selt1 = pe.Node(niu.Split(splits=[1, 1, 1, 1], squeeze=True),\n name='SeparateTissue_LR')\n\n merge1 = pe.Node(niu.Merge(2), name='SimMerge_HR')\n merge2 = pe.Node(niu.Merge(2), name='SimMerge_LR')\n\n signal1 = pe.Node(pip.SimulateSMRI(), name='SimulateHR')\n signal2 = pe.Node(pip.SimulateSMRI(), name='SimulateLR')\n\n wf = pe.Workflow(name=name)\n wf.connect([\n (inputnode, model, [('shape', 'shape'),\n ('hi_matrix', 'matrix_size'),\n ('cortex', 'cortex'),\n ('repetition_id', 'seed')]),\n (model, split, [('out_file', 'in_file')]),\n (split, selm1, [('out_files', 'inlist')]),\n (selm1, signal0, [('out1', 'frac_csf'),\n ('out2', 'frac_wm'),\n ('out3', 'frac_gm')]),\n (signal0, surf0, [('out_t1w', 'inputnode.norm')]),\n (selm1, surf0, [('out2', 'inputnode.aseg')]),\n (signal0, surf1, [('out_t1w', 'inputnode.norm')]),\n (model, surf1, [('out_mask', 'inputnode.aseg')]),\n (surf0, msurf, [('outputnode.out_surf', 'in1')]),\n (surf1, msurf, [('outputnode.out_surf', 'in2')]),\n (split, selm0, [('out_files', 'inlist')]),\n (inputnode, dist, [('grid_size', 'inputnode.grid_size')]),\n (msurf, dist, [('out', 'inputnode.in_surfs')]),\n (model, dist, [('out_mask', 'inputnode.in_mask')]),\n (selm0, dist, [('out2', 'inputnode.in_file')]),\n\n (signal0, surf2vol0, [('out_t1w', 'reference')]),\n (dist, surf2vol0, [('outputnode.out_surfs', 'surfaces')]),\n (surf2vol0, norm0, [('out_tpm', 'in_files')]),\n (norm0, selt0, [('out_files', 'inlist')]),\n (selt0, signal1, [('out1', 'frac_wm'),\n ('out2', 'frac_gm'),\n ('out3', 'frac_csf')]),\n (inputnode, signal1, [('snr', 'snr')]),\n (signal1, merge1, [('out_t1w', 'in1'),\n ('out_t2w', 'in2')]),\n (norm0, tpmmsk0, [('out_files', 'inlist')]),\n (tpmmsk0, msk0, [('out1', 'in_files')]),\n\n (signal0, down, [('out_t1w', 'in_file'),\n (('out_t1w', _half_voxsize), 'vox_size')]),\n (down, surf2vol1, [('out_file', 'reference')]),\n (dist, surf2vol1, [('outputnode.out_surfs', 'surfaces')]),\n (surf2vol1, norm1, [('out_tpm', 'in_files')]),\n (norm1, selt1, [('out_files', 'inlist')]),\n (selt1, signal2, [('out1', 'frac_wm'),\n ('out2', 'frac_gm'),\n ('out3', 'frac_csf')]),\n (inputnode, signal2, [('snr', 'snr')]),\n (signal2, merge2, [('out_t1w', 'in1'),\n ('out_t2w', 'in2')]),\n (norm1, tpmmsk1, [('out_files', 'inlist')]),\n (tpmmsk1, msk1, [('out1', 'in_files')]),\n\n # reference outputs\n (signal0, merge0, [('out_t1w', 'in1'),\n ('out_t2w', 'in2')]),\n (msurf, refnode, [('out', 'out_surfs')]),\n (selt0, refnode, [('out2', 'out_tpms')]),\n (model, refnode, [('out_mask', 'out_mask')]),\n (merge0, refnode, [('out', 'out_signal')]),\n\n # distorted outputs\n (inputnode, out_hires, [('grid_size', 'grid_size')]),\n (merge1, out_hires, [('out', 'out_signal')]),\n (msk0, out_hires, [('out_file', 'out_mask')]),\n (tpmmsk0, out_hires, [('out1', 'out_tpms')]),\n (dist, out_hires, [('outputnode.out_field', 'out_field'),\n ('outputnode.out_coeff', 'out_coeff'),\n ('outputnode.out_surfs', 'out_surfs')]),\n\n # distorted outputs\n (inputnode, out_lowres, [('grid_size', 'grid_size')]),\n (merge2, out_lowres, [('out', 'out_signal')]),\n (msk1, out_lowres, [('out_file', 'out_mask')]),\n (tpmmsk1, out_lowres, [('out1', 'out_tpms')]),\n (dist, out_lowres, [('outputnode.out_field', 'out_field'),\n ('outputnode.out_coeff', 'out_coeff'),\n ('outputnode.out_surfs', 'out_surfs')])\n ])\n return wf",
"def _create_container(self, container_name):\n try:\n container = self.swift.head_container(container_name)\n except client.ClientException:\n self.swift.put_container(container_name)\n else:\n return container"
] | [
"0.5875233",
"0.58578813",
"0.5711849",
"0.5711849",
"0.5711849",
"0.5711849",
"0.5711849",
"0.5676269",
"0.5545769",
"0.5408113",
"0.53612626",
"0.5329194",
"0.5291496",
"0.5263945",
"0.5263945",
"0.51851237",
"0.5161798",
"0.5128938",
"0.5122123",
"0.51019007",
"0.50853336",
"0.50628203",
"0.50505215",
"0.50505215",
"0.5045299",
"0.50361085",
"0.5028834",
"0.50070065",
"0.50063205",
"0.49516696"
] | 0.65444756 | 0 |
This function is used to create artifacts in given container using finding data. | def _create_artifacts(self, finding, container_id):
artifacts = []
for resource in finding.pop('Resources'):
resource_artifact = {}
resource_artifact['name'] = '{} Resource Artifact'.format(resource['Type'])
resource_artifact['container_id'] = container_id
resource_artifact['source_data_identifier'] = resource['Id']
resource_artifact['cef'] = {}
# Flatten the JSON, by moving the Details up one level
if 'Details' in resource:
resource_artifact['cef'].update(resource.pop('Details'))
resource_artifact['cef'].update(resource)
resource_artifact['cef_types'] = AWSSECURITYHUB_RESOURCE_CEF_TYPES
# Extract the InstanceId from the ARN
if 'instance/' in resource['Id']:
resource_artifact['cef']['InstanceId'] = resource['Id'].split('instance/')[1]
if resource['Type'] == 'AwsEc2Instance':
resource_artifact['cef_types']['InstanceId'] = ['aws ec2 instance id']
artifacts.append(resource_artifact)
finding_artifact = {}
finding_artifact['name'] = 'Finding Artifact'
finding_artifact['container_id'] = container_id
finding_artifact['source_data_identifier'] = finding['Id']
finding_artifact['cef'] = finding
finding_artifact['cef_types'] = AWSSECURITYHUB_FINDING_CEF_TYPES
artifacts.append(finding_artifact)
create_artifact_status, create_artifact_msg, _ = self.save_artifacts(artifacts)
if phantom.is_fail(create_artifact_status):
return phantom.APP_ERROR, create_artifact_msg
return phantom.APP_SUCCESS, 'Artifacts created successfully' | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _create_container(self, finding):\n\n container_dict = {}\n container_dict['name'] = finding['Title']\n container_dict['source_data_identifier'] = finding['Id']\n container_dict['description'] = finding['Description']\n\n container_creation_status, container_creation_msg, container_id = self.save_container(container=container_dict)\n\n if phantom.is_fail(container_creation_status):\n self.debug_print(container_creation_msg)\n self.save_progress('Error while creating container for finding {finding_id}. '\n '{error_message}'.format(finding_id=finding['Id'],\n error_message=container_creation_msg))\n return None\n\n return container_id",
"def build_container(client):\n client.images.build(path=os.path.join(os.path.abspath(\"\"), \"docker\"), tag=\"scrape_light\")",
"def _find_or_add_artifact(self, i):\n\n console = i.get('out','')=='con'\n\n # Try to find experiment artifact by alias and/or tags\n ii = utils.sub_input(i, self.cmind.cfg['artifact_keys'] + ['tags'])\n ii['action']='find'\n\n ii_copy = copy.deepcopy(ii)\n\n # If artifact is specified, remove tags\n artifact = ii.get('artifact','').strip()\n if artifact!='' and not artifact.endswith(':') \\\n and '*' not in artifact and '?' not in artifact:\n if 'tags' in ii: del(ii['tags'])\n\n r = self.cmind.access(ii)\n if r['return']>0: return r\n\n lst = r['list']\n\n if len(lst)>1:\n print ('More than 1 experiment artifact found:')\n\n lst = sorted(lst, key=lambda x: x.path)\n \n num = 0\n print ('')\n for e in lst:\n print ('{}) {}'.format(num, e.path))\n print (' Tags: {}'.format(','.join(e.meta.get('tags',[]))))\n num += 1\n\n if not console:\n return {'return':1, 'error':'more than 1 experiment artifact found.\\nPlease use \"cm run experiment {name}\" or \"cm run experiment --tags={tags separated by comma}\"'}\n \n print ('')\n x=input('Make your selection or press Enter for 0: ')\n\n x=x.strip()\n if x=='': x='0'\n\n selection = int(x)\n\n if selection < 0 or selection >= num:\n selection = 0\n\n experiment = lst[selection]\n\n elif len(lst)==1:\n experiment = lst[0]\n else:\n # Create new entry\n if i.get('fail_if_not_found',False):\n return {'return':1, 'error':'experiment not found'}\n \n ii = copy.deepcopy(ii_copy)\n ii['action']='add'\n r = self.cmind.access(ii)\n if r['return']>0: return r\n\n experiment_uid = r['meta']['uid']\n\n r = self.cmind.access({'action':'find',\n 'automation':'experiment,a0a2d123ef064bcb',\n 'artifact':experiment_uid})\n if r['return']>0: return r\n\n lst = r['list']\n if len(lst)==0 or len(lst)>1:\n return {'return':1, 'error':'created experiment artifact with UID {} but can\\'t find it - weird'.format(experiment_uid)}\n\n experiment = lst[0]\n \n return {'return':0, 'experiment':experiment}",
"def prepare_artifacts(self):\n\n logger.info(\"Handling artifacts...\")\n target_dir = os.path.join(self.target, 'image')\n fetch_artifacts_url = []\n\n for image in self.images:\n for artifact in image.all_artifacts:\n logger.info(\"Preparing artifact '{}'\".format(artifact['name']))\n\n if isinstance(artifact, _PlainResource) and \\\n config.get('common', 'redhat'):\n try:\n fetch_artifacts_url.append({'md5': artifact['md5'],\n 'url': get_brew_url(artifact['md5']),\n 'target': os.path.join(artifact['target'])})\n artifact['target'] = os.path.join('artifacts', artifact['target'])\n logger.debug(\n \"Artifact '{}' added to fetch-artifacts-url.yaml\".format(artifact['name']))\n except:\n logger.warning(\"Plain artifact {} could not be found in Brew, trying to handle it using lookaside cache\".\n format(artifact['name']))\n artifact.copy(target_dir)\n # TODO: This is ugly, rewrite this!\n artifact['lookaside'] = True\n\n else:\n artifact.copy(target_dir)\n\n fetch_artifacts_file = os.path.join(self.target, 'image', 'fetch-artifacts-url.yaml')\n\n if fetch_artifacts_url:\n with open(fetch_artifacts_file, 'w') as _file:\n yaml.safe_dump(fetch_artifacts_url, _file, default_flow_style=False)\n\n logger.debug(\"Artifacts handled\")",
"def test_get_container_assets(self):\n pass",
"def test_create_container(self):\n pass",
"def createContainer(tag, data={}): #@NoSelf",
"def Run(self, args):\n\n with RecoverFromDiagnosticException(args.image_name):\n img_name = util.GetDigestFromName(args.image_name)\n return container_data_util.ContainerData(\n registry=img_name.registry,\n repository=img_name.repository,\n digest=img_name.digest)",
"def setup(self):\n\n exists = [i for i in self.client.images() if self.image in i['RepoTags']]\n\n # Only pull the image if we don't have it\n if not exists or self.pull:\n self.client.pull(self.image)\n self.logger.debug(\"Pulled {}\".format(self.image))\n\n self.container = self.client.create_container(\n image=self.image,\n host_config=self.host_config,\n name=self.name,\n command=self.command,\n environment=self.environment\n )\n self.logger.debug(\"Created container {}\".format(self.container['Id']))",
"def fill_args_with_artifacts(self, args):\n for art in self.artifacts:\n artifact.add_artifact(args, art)",
"def test_get_artifacts(self):\r\n if os.environ.get('CIRCLE_ARTIFACTS'):\r\n print('\\nCreate test artifacts (screenshots): ', end='', flush=True)\r\n window_sizes = [[300, 600], [700, 600], [800, 600], [1000, 1000], [1300, 1300]]\r\n \r\n repo = Repo('.')\r\n artifacts_path = os.environ.get('CIRCLE_ARTIFACTS') + '/' + str(repo.active_branch)\r\n \r\n if not os.path.exists(artifacts_path):\r\n os.makedirs(artifacts_path)\r\n \r\n driver = self.driver\r\n driver.get(MY_URL)\r\n for w_size in window_sizes:\r\n driver.set_window_size(w_size[0], w_size[1])\r\n filepath = artifacts_path + '/ff_shot_%d_%d.png' % (w_size[0], w_size[1])\r\n driver.save_screenshot(filepath)\r\n print('.', end=\"\", flush=True)\r\n if DEBUG:\r\n print ('Captured %s' % filepath)\r\n else:\r\n print('\\nNo test artifacts generated. ', end='', flush=True)",
"def _constructInstance(self, container, id, *args, **kw):\n file, title = None, ''\n id = container.manage_addProduct['OFSP'].manage_addImage(id, file, title)\n return container.get(id, None)",
"def factory(container, name, factory):",
"def add_artifacts_from_result(args, result):\n for art in result.get_artifacts():\n add_artifact(args, art)",
"def build_artifacts(logger, ctx):\n\n compose_fn = build_compose(logger, ctx)\n logger.debug(f'docker-compose.yml - {compose_fn}')\n try:\n # Must copy the bin directory to the client's folder structure. This directory\n # will be promtly cleaned up after the artifacts are built.\n os.makedirs('./.juni/bin', exist_ok=True)\n shutil.copy(get_artifact_path('package.sh'), './.juni/bin/')\n\n # Use docker as a way to pip install dependencies, and copy the business logic\n # specified in the function definitions.\n subprocess.run([\"docker-compose\", \"-f\", compose_fn, '--project-directory', '.', 'down'])\n subprocess.run([\"docker-compose\", \"-f\", compose_fn, '--project-directory', '.', 'up'])\n finally:\n shutil.rmtree('./.juni', ignore_errors=True)",
"def build_container_image(self) -> None:\n print_version_of_tools()\n try:\n self.fs_watcher.start()\n runner = PluginsRunner(self,\n self.plugins_conf,\n self.plugin_files,\n self.keep_plugins_running,\n plugins_results=self.data.plugins_results)\n runner.run()\n finally:\n self.fs_watcher.finish()",
"def main():\n client = docker.from_env()\n parser = argparse.ArgumentParser(description='')\n parser.add_argument('--package_id', default='0',\n help='provide id for the work package, comma separated if multiple')\n parser.add_argument('--load_quicksave', default=\"no\", help='wanna load? -> yes/no')\n args = parser.parse_args()\n packages = args.package_id.split(\",\")\n print('Building docker container. This might take a while.')\n build_container(client)\n print('Build finished. Starting containers.')\n for package in packages:\n start_container(client, package, args.load_quicksave)\n print('Containers are running. Check Docker Dashboard for container health. Script will exit.')",
"def customization_data(client=None):\n\n # This import data contains:\n # Function inputs:\n # artifact_id\n # attachment_id\n # docker_artifact_type\n # docker_image\n # docker_input\n # docker_operation\n # incident_id\n # task_id\n # DataTables:\n # docker_integration_invocations\n # Message Destinations:\n # fn_docker\n # Functions:\n # docker_run_docker_container\n # Workflows:\n # docker_analyze_artifact_with_docker_container_amass\n # docker_analyze_artifact_with_docker_container_nsrl\n # docker_send_attachment_to_docker_container\n # Rules:\n # Docker: Amass: Search for Subdomains\n # Docker: NSRL: Validate MD5 from Whitelist\n # Docker: Volatility: Analyze Memory Sample\n\n\n yield ImportDefinition(u\"\"\"\neyJzZXJ2ZXJfdmVyc2lvbiI6IHsibWFqb3IiOiAzMSwgIm1pbm9yIjogMCwgImJ1aWxkX251bWJl\nciI6IDQyMzUsICJ2ZXJzaW9uIjogIjMxLjAuNDIzNSJ9LCAiZXhwb3J0X2Zvcm1hdF92ZXJzaW9u\nIjogMiwgImlkIjogODAsICJleHBvcnRfZGF0ZSI6IDE1NTI0OTk3NDk4MjksICJmaWVsZHMiOiBb\neyJpZCI6IDIyMywgIm5hbWUiOiAiaW5jX3RyYWluaW5nIiwgInRleHQiOiAiU2ltdWxhdGlvbiIs\nICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDAsICJ0b29sdGlwIjogIldoZXRoZXIgdGhlIGlu\nY2lkZW50IGlzIGEgc2ltdWxhdGlvbiBvciBhIHJlZ3VsYXIgaW5jaWRlbnQuICBUaGlzIGZpZWxk\nIGlzIHJlYWQtb25seS4iLCAiaW5wdXRfdHlwZSI6ICJib29sZWFuIiwgImhpZGVfbm90aWZpY2F0\naW9uIjogZmFsc2UsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6\nIGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6\nICJjM2YwZTNlZC0yMWUxLTRkNTMtYWZmYi1mZTVjYTMzMDhjY2EiLCAib3BlcmF0aW9ucyI6IFtd\nLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ2YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IHRydWUs\nICJjaGFuZ2VhYmxlIjogdHJ1ZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJp\nbmNpZGVudC9pbmNfdHJhaW5pbmciLCAidGVtcGxhdGVzIjogW10sICJkZXByZWNhdGVkIjogZmFs\nc2V9LCB7ImlkIjogMzAxLCAibmFtZSI6ICJ0YXNrX2lkIiwgInRleHQiOiAidGFza19pZCIsICJw\ncmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDExLCAidG9vbHRpcCI6ICIiLCAicGxhY2Vob2xkZXIi\nOiAiIiwgImlucHV0X3R5cGUiOiAibnVtYmVyIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2Us\nICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxh\nbmtfb3B0aW9uIjogZmFsc2UsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICJiYTMxODI2MS1l\nZDZhLTRhMzgtYTE4Ny05ZTBiNjhkMTYwNGYiLCAib3BlcmF0aW9ucyI6IFtdLCAib3BlcmF0aW9u\nX3Blcm1zIjoge30sICJ2YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJs\nZSI6IHRydWUsICJyaWNoX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi90\nYXNrX2lkIiwgInRlbXBsYXRlcyI6IFtdLCAiZGVwcmVjYXRlZCI6IGZhbHNlfSwgeyJpZCI6IDM1\nMCwgIm5hbWUiOiAiZG9ja2VyX2FydGlmYWN0X3R5cGUiLCAidGV4dCI6ICJkb2NrZXJfYXJ0aWZh\nY3RfdHlwZSIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDExLCAidG9vbHRpcCI6ICJUaGUg\ndHlwZSBvZiBhcnRpZmFjdCB0aGF0IHRoaXMgaW50ZWdyYXRpb24gd2FzIHJhbiBhZ2FpbnN0LiBO\nb3QgdXNlZCBmb3IgYXR0YWNobWVudCB3b3JrZmxvd3MuIiwgInBsYWNlaG9sZGVyIjogIiIsICJp\nbnB1dF90eXBlIjogInRleHQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImNob3NlbiI6\nIGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJibGFua19vcHRpb24i\nOiBmYWxzZSwgImludGVybmFsIjogZmFsc2UsICJ1dWlkIjogIjIwYjVmNjYxLWI1NjItNGE3OC04\nYTQwLTNkOWM5ZjI0Y2I2OCIsICJvcGVyYXRpb25zIjogW10sICJvcGVyYXRpb25fcGVybXMiOiB7\nfSwgInZhbHVlcyI6IFtdLCAicmVhZF9vbmx5IjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwg\nInJpY2hfdGV4dCI6IGZhbHNlLCAiZXhwb3J0X2tleSI6ICJfX2Z1bmN0aW9uL2RvY2tlcl9hcnRp\nZmFjdF90eXBlIiwgInRlbXBsYXRlcyI6IFtdLCAiZGVwcmVjYXRlZCI6IGZhbHNlfSwgeyJpZCI6\nIDI5NywgIm5hbWUiOiAiYXJ0aWZhY3RfaWQiLCAidGV4dCI6ICJhcnRpZmFjdF9pZCIsICJwcmVm\naXgiOiBudWxsLCAidHlwZV9pZCI6IDExLCAidG9vbHRpcCI6ICIiLCAicGxhY2Vob2xkZXIiOiAi\nIiwgImlucHV0X3R5cGUiOiAibnVtYmVyIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJj\naG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtf\nb3B0aW9uIjogZmFsc2UsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICI2MmQ5MzEwNS03MDVk\nLTQ4NzYtOTgxMy1lNjBlZTQzZTE5ZWQiLCAib3BlcmF0aW9ucyI6IFtdLCAib3BlcmF0aW9uX3Bl\ncm1zIjoge30sICJ2YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6\nIHRydWUsICJyaWNoX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9hcnRp\nZmFjdF9pZCIsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX0sIHsiaWQiOiAy\nOTksICJuYW1lIjogImRvY2tlcl9pbnB1dCIsICJ0ZXh0IjogImRvY2tlcl9pbnB1dCIsICJwcmVm\naXgiOiBudWxsLCAidHlwZV9pZCI6IDExLCAidG9vbHRpcCI6ICJBbiBpbnB1dCB0byBiZSBmZWQg\naW50byBhIGRvY2tlciBjb250YWluZXIuIEludGVuZGVkIGZvciB1c2Ugd2l0aCBhcnRpZmFjdCB2\nYWx1ZXMiLCAicGxhY2Vob2xkZXIiOiAiIiwgImlucHV0X3R5cGUiOiAidGV4dCIsICJoaWRlX25v\ndGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9z\nZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiaW50ZXJuYWwiOiBmYWxzZSwg\nInV1aWQiOiAiNGZjMzA5ZjEtMzQwNi00NjRmLWJlNmQtZDM3OWRjMjNkNDExIiwgIm9wZXJhdGlv\nbnMiOiBbXSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidmFsdWVzIjogW10sICJyZWFkX29ubHki\nOiBmYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAicmljaF90ZXh0IjogZmFsc2UsICJleHBvcnRf\na2V5IjogIl9fZnVuY3Rpb24vZG9ja2VyX2lucHV0IiwgInRlbXBsYXRlcyI6IFtdLCAiZGVwcmVj\nYXRlZCI6IGZhbHNlfSwgeyJpZCI6IDM1MywgIm5hbWUiOiAiZG9ja2VyX29wZXJhdGlvbiIsICJ0\nZXh0IjogImRvY2tlcl9vcGVyYXRpb24iLCAicHJlZml4IjogbnVsbCwgInR5cGVfaWQiOiAxMSwg\nInRvb2x0aXAiOiAiQSBwYXJhbSB2YWx1ZSB0byBiZSBmZWQgdG8gYSBjb250YWluZXIncyBydW4g\nY29tbWFuZCBzcGVjaWZ5aW5nIGEgcGFydGljdWxhciBlbnRyeXBvaW50IG9yIGZ1bmN0aW9uIGZv\nciB0aGF0IGltYWdlLiBVc2VkIGZvciBjb250YWluZXJzIHdoaWNoIGhhdmUgbXVsdGlwbGUgcG9z\nc2libGUgb3BlcmF0aW9ucyB5b3UgY2FuIHBlcmZvcm0gaW4gdGhlbSBzdWNoIGFzIFZvbGF0aWxp\ndHkiLCAicGxhY2Vob2xkZXIiOiAiIiwgImlucHV0X3R5cGUiOiAidGV4dCIsICJoaWRlX25vdGlm\naWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogZmFsc2UsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2\nZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IGZhbHNlLCAiaW50ZXJuYWwiOiBmYWxzZSwgInV1\naWQiOiAiZTg5M2UwOGQtOTQwOC00NDQ5LTg5YWItOTI4YzFjZTFlNGQ0IiwgIm9wZXJhdGlvbnMi\nOiBbXSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidmFsdWVzIjogW10sICJyZWFkX29ubHkiOiBm\nYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAicmljaF90ZXh0IjogZmFsc2UsICJleHBvcnRfa2V5\nIjogIl9fZnVuY3Rpb24vZG9ja2VyX29wZXJhdGlvbiIsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJl\nY2F0ZWQiOiBmYWxzZX0sIHsiaWQiOiAzMDAsICJuYW1lIjogImF0dGFjaG1lbnRfaWQiLCAidGV4\ndCI6ICJhdHRhY2htZW50X2lkIiwgInByZWZpeCI6IG51bGwsICJ0eXBlX2lkIjogMTEsICJ0b29s\ndGlwIjogIiIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJudW1iZXIiLCAiaGlk\nZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImNob3NlbiI6IGZhbHNlLCAiZGVmYXVsdF9jaG9zZW5f\nYnlfc2VydmVyIjogZmFsc2UsICJibGFua19vcHRpb24iOiBmYWxzZSwgImludGVybmFsIjogZmFs\nc2UsICJ1dWlkIjogIjE2Nzc3MTZhLWE5NWUtNGY1NS04ZTNlLTUzOTllNmQzYmQ5NiIsICJvcGVy\nYXRpb25zIjogW10sICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInZhbHVlcyI6IFtdLCAicmVhZF9v\nbmx5IjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgInJpY2hfdGV4dCI6IGZhbHNlLCAiZXhw\nb3J0X2tleSI6ICJfX2Z1bmN0aW9uL2F0dGFjaG1lbnRfaWQiLCAidGVtcGxhdGVzIjogW10sICJk\nZXByZWNhdGVkIjogZmFsc2V9LCB7ImlkIjogMzM1LCAibmFtZSI6ICJkb2NrZXJfaW1hZ2UiLCAi\ndGV4dCI6ICJkb2NrZXJfaW1hZ2UiLCAicHJlZml4IjogbnVsbCwgInR5cGVfaWQiOiAxMSwgInRv\nb2x0aXAiOiAiQW4gSW1hZ2UgdG8gYmUgdXNlZCB0byBjcmVhdGUgYSBjb250YWluZXIuIE11c3Qg\nYmUgYW4gYXBwcm92ZWQgaW1hZ2Ugd2hpY2ggaXMgc2V0IGluIHRoZSBhcHAuY29uZmlnLiIsICJw\nbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJzZWxlY3QiLCAicmVxdWlyZWQiOiAiYWx3\nYXlzIiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2UsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1\nbHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogdHJ1ZSwgImludGVy\nbmFsIjogZmFsc2UsICJ1dWlkIjogImU2ZDY2YmFjLTg0MWQtNDAzZi04MmZhLTg2MmRjM2NkMjIy\nZiIsICJvcGVyYXRpb25zIjogW10sICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInZhbHVlcyI6IFt7\nInZhbHVlIjogMTg1MCwgImxhYmVsIjogInZvbGF0aWxpdHkiLCAiZW5hYmxlZCI6IHRydWUsICJw\ncm9wZXJ0aWVzIjogbnVsbCwgInV1aWQiOiAiN2YzNmEyODUtYjJiMC00MDFiLWEwY2EtYTQ3OGFl\nOTBiZTZiIiwgImhpZGRlbiI6IGZhbHNlLCAiZGVmYXVsdCI6IHRydWV9LCB7InZhbHVlIjogMTg1\nMSwgImxhYmVsIjogIm5zcmwiLCAiZW5hYmxlZCI6IHRydWUsICJwcm9wZXJ0aWVzIjogbnVsbCwg\nInV1aWQiOiAiZjY0OTk5YmEtMjc4Ny00YjIxLThjNmMtMWUwZDQ5NzYwMDllIiwgImhpZGRlbiI6\nIGZhbHNlLCAiZGVmYXVsdCI6IGZhbHNlfSwgeyJ2YWx1ZSI6IDE5MDAsICJsYWJlbCI6ICJhbWFz\ncyIsICJlbmFibGVkIjogdHJ1ZSwgInByb3BlcnRpZXMiOiBudWxsLCAidXVpZCI6ICIzNTY2MTlm\nOC0zYWViLTQ3YTMtODRiYi1jMzM1YzFhMTNiYWYiLCAiaGlkZGVuIjogZmFsc2UsICJkZWZhdWx0\nIjogZmFsc2V9XSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJyaWNo\nX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5jdGlvbi9kb2NrZXJfaW1hZ2UiLCAi\ndGVtcGxhdGVzIjogW10sICJkZXByZWNhdGVkIjogZmFsc2V9LCB7ImlkIjogMjk4LCAibmFtZSI6\nICJpbmNpZGVudF9pZCIsICJ0ZXh0IjogImluY2lkZW50X2lkIiwgInByZWZpeCI6IG51bGwsICJ0\neXBlX2lkIjogMTEsICJ0b29sdGlwIjogIiIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlw\nZSI6ICJudW1iZXIiLCAicmVxdWlyZWQiOiAiYWx3YXlzIiwgImhpZGVfbm90aWZpY2F0aW9uIjog\nZmFsc2UsICJjaG9zZW4iOiBmYWxzZSwgImRlZmF1bHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNl\nLCAiYmxhbmtfb3B0aW9uIjogZmFsc2UsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICI4MTFl\nOTlkNy1kMTk0LTRjZTgtODZjYy1hZmY1ZTAxYWI4NWMiLCAib3BlcmF0aW9ucyI6IFtdLCAib3Bl\ncmF0aW9uX3Blcm1zIjoge30sICJ2YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hh\nbmdlYWJsZSI6IHRydWUsICJyaWNoX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiX19mdW5j\ndGlvbi9pbmNpZGVudF9pZCIsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX1d\nLCAiaW5jaWRlbnRfdHlwZXMiOiBbeyJ1cGRhdGVfZGF0ZSI6IDE1NTI1MDA0MDQ3NTYsICJjcmVh\ndGVfZGF0ZSI6IDE1NTI1MDA0MDQ3NTYsICJ1dWlkIjogImJmZWVjMmQ0LTM3NzAtMTFlOC1hZDM5\nLTRhMDAwNDA0NGFhMCIsICJkZXNjcmlwdGlvbiI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChp\nbnRlcm5hbCkiLCAiZXhwb3J0X2tleSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5h\nbCkiLCAibmFtZSI6ICJDdXN0b21pemF0aW9uIFBhY2thZ2VzIChpbnRlcm5hbCkiLCAiZW5hYmxl\nZCI6IGZhbHNlLCAic3lzdGVtIjogZmFsc2UsICJwYXJlbnRfaWQiOiBudWxsLCAiaGlkZGVuIjog\nZmFsc2UsICJpZCI6IDB9XSwgInBoYXNlcyI6IFtdLCAiYXV0b21hdGljX3Rhc2tzIjogW10sICJv\ndmVycmlkZXMiOiBbXSwgIm1lc3NhZ2VfZGVzdGluYXRpb25zIjogW3sibmFtZSI6ICJEb2NrZXIg\nTWVzc2FnZSBEZXN0aW5hdGlvbiIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJmbl9kb2NrZXIiLCAi\nZGVzdGluYXRpb25fdHlwZSI6IDAsICJleHBlY3RfYWNrIjogdHJ1ZSwgInVzZXJzIjogWyJhbGZy\nZWRAd2F5bmVjb3JwLmNvbSIsICJpbnRlZ3JhdGlvbi1zZXJ2ZXIuYWxmcmVkQHdheW5lY29ycC5j\nb20iXSwgInV1aWQiOiAiMDM0NTVlODEtYWFiNC00YzVhLWI1ZDUtMmFhZGQ3Yjk1Zjc5IiwgImV4\ncG9ydF9rZXkiOiAiZm5fZG9ja2VyIn1dLCAiYWN0aW9ucyI6IFt7ImlkIjogODEsICJuYW1lIjog\nIkRvY2tlcjogQW1hc3M6IFNlYXJjaCBmb3IgU3ViZG9tYWlucyIsICJ0eXBlIjogMSwgIm9iamVj\ndF90eXBlIjogImFydGlmYWN0IiwgImNvbmRpdGlvbnMiOiBbeyJtZXRob2QiOiAiZXF1YWxzIiwg\nImZpZWxkX25hbWUiOiAiYXJ0aWZhY3QudHlwZSIsICJ2YWx1ZSI6ICJETlMgTmFtZSIsICJ0eXBl\nIjogbnVsbCwgImV2YWx1YXRpb25faWQiOiBudWxsfV0sICJhdXRvbWF0aW9ucyI6IFtdLCAibWVz\nc2FnZV9kZXN0aW5hdGlvbnMiOiBbXSwgIndvcmtmbG93cyI6IFsiZG9ja2VyX2FuYWx5emVfYXJ0\naWZhY3Rfd2l0aF9kb2NrZXJfY29udGFpbmVyX2FtYXNzIl0sICJ2aWV3X2l0ZW1zIjogW10sICJ0\naW1lb3V0X3NlY29uZHMiOiA4NjQwMCwgInV1aWQiOiAiNzM0MTVmMGUtNTIyNi00OGVjLTgzZjYt\nMWQwMjk3MzU3MGExIiwgImV4cG9ydF9rZXkiOiAiRG9ja2VyOiBBbWFzczogU2VhcmNoIGZvciBT\ndWJkb21haW5zIiwgImxvZ2ljX3R5cGUiOiAiYWxsIn0sIHsiaWQiOiA2MSwgIm5hbWUiOiAiRG9j\na2VyOiBOU1JMOiBWYWxpZGF0ZSBNRDUgZnJvbSBXaGl0ZWxpc3QiLCAidHlwZSI6IDEsICJvYmpl\nY3RfdHlwZSI6ICJhcnRpZmFjdCIsICJjb25kaXRpb25zIjogW3sibWV0aG9kIjogImVxdWFscyIs\nICJmaWVsZF9uYW1lIjogImFydGlmYWN0LnR5cGUiLCAidmFsdWUiOiAiTWFsd2FyZSBNRDUgSGFz\naCIsICJ0eXBlIjogbnVsbCwgImV2YWx1YXRpb25faWQiOiBudWxsfV0sICJhdXRvbWF0aW9ucyI6\nIFtdLCAibWVzc2FnZV9kZXN0aW5hdGlvbnMiOiBbXSwgIndvcmtmbG93cyI6IFsiZG9ja2VyX2Fu\nYWx5emVfYXJ0aWZhY3Rfd2l0aF9kb2NrZXJfY29udGFpbmVyX25zcmwiXSwgInZpZXdfaXRlbXMi\nOiBbXSwgInRpbWVvdXRfc2Vjb25kcyI6IDg2NDAwLCAidXVpZCI6ICJmNDk0NjhlNC1hZmQ2LTRl\nZGQtOWZkYy00NTgxZmRmOTZhYzUiLCAiZXhwb3J0X2tleSI6ICJEb2NrZXI6IE5TUkw6IFZhbGlk\nYXRlIE1ENSBmcm9tIFdoaXRlbGlzdCIsICJsb2dpY190eXBlIjogImFsbCJ9LCB7ImlkIjogMjgs\nICJuYW1lIjogIkRvY2tlcjogVm9sYXRpbGl0eTogQW5hbHl6ZSBNZW1vcnkgU2FtcGxlIiwgInR5\ncGUiOiAxLCAib2JqZWN0X3R5cGUiOiAiYXR0YWNobWVudCIsICJjb25kaXRpb25zIjogW3sibWV0\naG9kIjogImNvbnRhaW5zIiwgImZpZWxkX25hbWUiOiAiYXR0YWNobWVudC5uYW1lIiwgInZhbHVl\nIjogIi52bWVtIiwgInR5cGUiOiBudWxsLCAiZXZhbHVhdGlvbl9pZCI6IG51bGx9XSwgImF1dG9t\nYXRpb25zIjogW10sICJtZXNzYWdlX2Rlc3RpbmF0aW9ucyI6IFtdLCAid29ya2Zsb3dzIjogWyJk\nb2NrZXJfc2VuZF9hdHRhY2htZW50X3RvX2RvY2tlcl9jb250YWluZXIiXSwgInZpZXdfaXRlbXMi\nOiBbXSwgInRpbWVvdXRfc2Vjb25kcyI6IDg2NDAwLCAidXVpZCI6ICI3ZDA2OTI2Zi0yOGEyLTQ4\nY2EtOGRlNS1iZjk2ZDk1MGJiZmQiLCAiZXhwb3J0X2tleSI6ICJEb2NrZXI6IFZvbGF0aWxpdHk6\nIEFuYWx5emUgTWVtb3J5IFNhbXBsZSIsICJsb2dpY190eXBlIjogImFsbCJ9XSwgImxheW91dHMi\nOiBbXSwgIm5vdGlmaWNhdGlvbnMiOiBudWxsLCAidGltZWZyYW1lcyI6IG51bGwsICJsb2NhbGUi\nOiBudWxsLCAiaW5kdXN0cmllcyI6IG51bGwsICJyZWd1bGF0b3JzIjogbnVsbCwgImdlb3MiOiBu\ndWxsLCAidGFza19vcmRlciI6IFtdLCAiYWN0aW9uX29yZGVyIjogW10sICJ0eXBlcyI6IFt7Imlk\nIjogbnVsbCwgInR5cGVfaWQiOiA4LCAidHlwZV9uYW1lIjogImRvY2tlcl9pbnRlZ3JhdGlvbl9p\nbnZvY2F0aW9ucyIsICJmaWVsZHMiOiB7ImRvY2tlcl9saW5rcyI6IHsiaWQiOiAzNDgsICJuYW1l\nIjogImRvY2tlcl9saW5rcyIsICJ0ZXh0IjogIkxpbmtzIiwgInByZWZpeCI6IG51bGwsICJ0eXBl\nX2lkIjogMTAwMSwgInRvb2x0aXAiOiAiUmVsZXZhbnQgbGlua3MgYmFjayB0byB0aGUgdGFzaywg\naWYgdGFzayBiYXNlZCIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0YXJl\nYSIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogdHJ1ZSwgImRlZmF1bHRf\nY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogdHJ1ZSwgImludGVybmFs\nIjogZmFsc2UsICJ1dWlkIjogIjA2ZDg1ZWFjLTVhNGUtNDNhMy05ZjViLWU3NGZlYzFlNjgyMiIs\nICJvcGVyYXRpb25zIjogW10sICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInZhbHVlcyI6IFtdLCAi\ncmVhZF9vbmx5IjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgInJpY2hfdGV4dCI6IHRydWUs\nICJleHBvcnRfa2V5IjogImRvY2tlcl9pbnRlZ3JhdGlvbl9pbnZvY2F0aW9ucy9kb2NrZXJfbGlu\na3MiLCAib3JkZXIiOiA2LCAid2lkdGgiOiA2NSwgInRlbXBsYXRlcyI6IFtdLCAiZGVwcmVjYXRl\nZCI6IGZhbHNlfSwgImRvY2tlcl9hdHRhY2htZW50X25hbWUiOiB7ImlkIjogMzUyLCAibmFtZSI6\nICJkb2NrZXJfYXR0YWNobWVudF9uYW1lIiwgInRleHQiOiAiQXR0YWNobWVudCBOYW1lIiwgInBy\nZWZpeCI6IG51bGwsICJ0eXBlX2lkIjogMTAwMSwgInRvb2x0aXAiOiAiVGhlIG5hbWUgb2YgdGhl\nIGF0dGFjaG1lbnQgdGhhdCB3YXMgc2VudCB0byB0aGUgRG9ja2VyIGNvbnRhaW5lci4gV2lsbCBi\nZSBibGFuayBpZiByYW4gYXQgYW4gYXJ0aWZhY3QgbGV2ZWwuIiwgInBsYWNlaG9sZGVyIjogIiIs\nICJpbnB1dF90eXBlIjogInRleHQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImNob3Nl\nbiI6IHRydWUsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlv\nbiI6IHRydWUsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICI4YmFiMGJkNC1lMWI0LTQxOGEt\nYWY5ZC03OTE2YTg1NGQ2OGIiLCAib3BlcmF0aW9ucyI6IFtdLCAib3BlcmF0aW9uX3Blcm1zIjog\ne30sICJ2YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUs\nICJyaWNoX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiZG9ja2VyX2ludGVncmF0aW9uX2lu\ndm9jYXRpb25zL2RvY2tlcl9hdHRhY2htZW50X25hbWUiLCAib3JkZXIiOiAzLCAid2lkdGgiOiAx\nMDcsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX0sICJkb2NrZXJfdGltZXN0\nYW1wIjogeyJpZCI6IDM0NCwgIm5hbWUiOiAiZG9ja2VyX3RpbWVzdGFtcCIsICJ0ZXh0IjogIklu\ndGVncmF0aW9uIFJ1biBUaW1lIiwgInByZWZpeCI6IG51bGwsICJ0eXBlX2lkIjogMTAwMSwgInRv\nb2x0aXAiOiAiVGhlIHRpbWUgdGhhdCB0aGUgZnVuY3Rpb24gZmluaXNoZWQuIiwgInBsYWNlaG9s\nZGVyIjogIiIsICJpbnB1dF90eXBlIjogImRhdGV0aW1lcGlja2VyIiwgInJlcXVpcmVkIjogImFs\nd2F5cyIsICJoaWRlX25vdGlmaWNhdGlvbiI6IGZhbHNlLCAiY2hvc2VuIjogdHJ1ZSwgImRlZmF1\nbHRfY2hvc2VuX2J5X3NlcnZlciI6IGZhbHNlLCAiYmxhbmtfb3B0aW9uIjogdHJ1ZSwgImludGVy\nbmFsIjogZmFsc2UsICJ1dWlkIjogImVlOTQwNjEwLTY5N2EtNGMzOS05NWRjLTYyMWY2YTU1NjA3\nNyIsICJvcGVyYXRpb25zIjogW10sICJvcGVyYXRpb25fcGVybXMiOiB7fSwgInZhbHVlcyI6IFtd\nLCAicmVhZF9vbmx5IjogZmFsc2UsICJjaGFuZ2VhYmxlIjogdHJ1ZSwgInJpY2hfdGV4dCI6IGZh\nbHNlLCAiZXhwb3J0X2tleSI6ICJkb2NrZXJfaW50ZWdyYXRpb25faW52b2NhdGlvbnMvZG9ja2Vy\nX3RpbWVzdGFtcCIsICJvcmRlciI6IDAsICJ3aWR0aCI6IDkwLCAidGVtcGxhdGVzIjogW10sICJk\nZXByZWNhdGVkIjogZmFsc2V9LCAiZG9ja2VyX2NvbnRhaW5lcl9pZCI6IHsiaWQiOiAzNDUsICJu\nYW1lIjogImRvY2tlcl9jb250YWluZXJfaWQiLCAidGV4dCI6ICJEb2NrZXIgQ29udGFpbmVyIElE\nIiwgInByZWZpeCI6IG51bGwsICJ0eXBlX2lkIjogMTAwMSwgInRvb2x0aXAiOiAiVGhlIElEIG9m\nIHRoZSBjb250YWluZXIgdGhhdCB3YXMgdXNlZC4gIiwgInBsYWNlaG9sZGVyIjogIiIsICJpbnB1\ndF90eXBlIjogInRleHQiLCAiaGlkZV9ub3RpZmljYXRpb24iOiBmYWxzZSwgImNob3NlbiI6IHRy\ndWUsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxzZSwgImJsYW5rX29wdGlvbiI6IHRy\ndWUsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICIxNjJhYWY2MC0wYTJkLTQxYjMtYjQ3My1j\nZTBkOTRkNDY2MDEiLCAib3BlcmF0aW9ucyI6IFtdLCAib3BlcmF0aW9uX3Blcm1zIjoge30sICJ2\nYWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hhbmdlYWJsZSI6IHRydWUsICJyaWNo\nX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiZG9ja2VyX2ludGVncmF0aW9uX2ludm9jYXRp\nb25zL2RvY2tlcl9jb250YWluZXJfaWQiLCAib3JkZXIiOiA0LCAid2lkdGgiOiAxOTYsICJ0ZW1w\nbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX0sICJkb2NrZXJfYXJ0aWZhY3RfdmFsdWUi\nOiB7ImlkIjogMzQ5LCAibmFtZSI6ICJkb2NrZXJfYXJ0aWZhY3RfdmFsdWUiLCAidGV4dCI6ICJB\ncnRpZmFjdCBWYWx1ZSIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDEwMDEsICJ0b29sdGlw\nIjogIlRoZSBhcnRpZmFjdCB0aGF0IHdhcyBzZW50IHRvIHRoZSBEb2NrZXIgY29udGFpbmVyLiBX\naWxsIGJlIGJsYW5rIGlmIHJhbiBhdCBhbiBhdHRhY2htZW50IGxldmVsLiIsICJwbGFjZWhvbGRl\nciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgImhpZGVfbm90aWZpY2F0aW9uIjogZmFsc2Us\nICJjaG9zZW4iOiB0cnVlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2VydmVyIjogZmFsc2UsICJibGFu\na19vcHRpb24iOiB0cnVlLCAiaW50ZXJuYWwiOiBmYWxzZSwgInV1aWQiOiAiMDY5ZDU4NTItZTA0\nMi00MjgxLWI0YzktZjc2OTY3NTNjZjNhIiwgIm9wZXJhdGlvbnMiOiBbXSwgIm9wZXJhdGlvbl9w\nZXJtcyI6IHt9LCAidmFsdWVzIjogW10sICJyZWFkX29ubHkiOiBmYWxzZSwgImNoYW5nZWFibGUi\nOiB0cnVlLCAicmljaF90ZXh0IjogZmFsc2UsICJleHBvcnRfa2V5IjogImRvY2tlcl9pbnRlZ3Jh\ndGlvbl9pbnZvY2F0aW9ucy9kb2NrZXJfYXJ0aWZhY3RfdmFsdWUiLCAib3JkZXIiOiAyLCAid2lk\ndGgiOiAxMDMsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX0sICJkb2NrZXJf\nYXJ0aWZhY3RfdHlwZSI6IHsiaWQiOiAzNTEsICJuYW1lIjogImRvY2tlcl9hcnRpZmFjdF90eXBl\nIiwgInRleHQiOiAiQXJ0aWZhY3QgVHlwZSIsICJwcmVmaXgiOiBudWxsLCAidHlwZV9pZCI6IDEw\nMDEsICJ0b29sdGlwIjogIlRoZSB0eXBlIG9mIGFydGlmYWN0IHRoYXQgd2FzIHVzZWQgYXMgYW4g\naW5wdXQuIFdpbGwgYmUgYmxhbmsgaWYgcmFuIGF0IGFuIGF0dGFjaG1lbnQgbGV2ZWwuIiwgInBs\nYWNlaG9sZGVyIjogIiIsICJpbnB1dF90eXBlIjogInRleHQiLCAiaGlkZV9ub3RpZmljYXRpb24i\nOiBmYWxzZSwgImNob3NlbiI6IHRydWUsICJkZWZhdWx0X2Nob3Nlbl9ieV9zZXJ2ZXIiOiBmYWxz\nZSwgImJsYW5rX29wdGlvbiI6IHRydWUsICJpbnRlcm5hbCI6IGZhbHNlLCAidXVpZCI6ICJlZjQy\nNTdjYy00YzhkLTQ1NGYtOWJkNy00ODVlNTA3MjMwMmUiLCAib3BlcmF0aW9ucyI6IFtdLCAib3Bl\ncmF0aW9uX3Blcm1zIjoge30sICJ2YWx1ZXMiOiBbXSwgInJlYWRfb25seSI6IGZhbHNlLCAiY2hh\nbmdlYWJsZSI6IHRydWUsICJyaWNoX3RleHQiOiBmYWxzZSwgImV4cG9ydF9rZXkiOiAiZG9ja2Vy\nX2ludGVncmF0aW9uX2ludm9jYXRpb25zL2RvY2tlcl9hcnRpZmFjdF90eXBlIiwgIm9yZGVyIjog\nMSwgIndpZHRoIjogNzcsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX0sICJk\nb2NrZXJfaW1hZ2UiOiB7ImlkIjogMzQ2LCAibmFtZSI6ICJkb2NrZXJfaW1hZ2UiLCAidGV4dCI6\nICJEb2NrZXIgSW1hZ2UgJiBPcGVyYXRpb24iLCAicHJlZml4IjogbnVsbCwgInR5cGVfaWQiOiAx\nMDAxLCAidG9vbHRpcCI6ICJUaGUgbmFtZSBvZiB0aGUgaW1hZ2UgdGhhdCB3YXMgdXNlZC4gSW4g\nc29tZSBjYXNlcyBhIHNwZWNpZmllZCBvcGVyYXRpb24gd2lsbCBiZSBzZW50IHRvIHRoZSBjb250\nYWluZXIgaW4gY2FzZXMgd2hlcmUgdGhlcmUgYXJlIG11bHRpcGxlIHBvc3NpYmxlIGVudHJ5cG9p\nbnRzLiIsICJwbGFjZWhvbGRlciI6ICIiLCAiaW5wdXRfdHlwZSI6ICJ0ZXh0IiwgImhpZGVfbm90\naWZpY2F0aW9uIjogZmFsc2UsICJjaG9zZW4iOiB0cnVlLCAiZGVmYXVsdF9jaG9zZW5fYnlfc2Vy\ndmVyIjogZmFsc2UsICJibGFua19vcHRpb24iOiB0cnVlLCAiaW50ZXJuYWwiOiBmYWxzZSwgInV1\naWQiOiAiMDUwNDZlMTgtYTQ5OS00MWNhLTg2NzAtNjM1OTNjMzIyN2I2IiwgIm9wZXJhdGlvbnMi\nOiBbXSwgIm9wZXJhdGlvbl9wZXJtcyI6IHt9LCAidmFsdWVzIjogW10sICJyZWFkX29ubHkiOiBm\nYWxzZSwgImNoYW5nZWFibGUiOiB0cnVlLCAicmljaF90ZXh0IjogZmFsc2UsICJleHBvcnRfa2V5\nIjogImRvY2tlcl9pbnRlZ3JhdGlvbl9pbnZvY2F0aW9ucy9kb2NrZXJfaW1hZ2UiLCAib3JkZXIi\nOiA1LCAid2lkdGgiOiAxMzEsICJ0ZW1wbGF0ZXMiOiBbXSwgImRlcHJlY2F0ZWQiOiBmYWxzZX19\nLCAicHJvcGVydGllcyI6IHsiY2FuX2NyZWF0ZSI6IGZhbHNlLCAiY2FuX2Rlc3Ryb3kiOiBmYWxz\nZSwgImZvcl93aG8iOiBbXX0sICJwYXJlbnRfdHlwZXMiOiBbImluY2lkZW50Il0sICJkaXNwbGF5\nX25hbWUiOiAiRG9ja2VyIEludGVncmF0aW9uIEludm9jYXRpb25zIiwgImZvcl9ub3RpZmljYXRp\nb25zIjogZmFsc2UsICJmb3JfYWN0aW9ucyI6IGZhbHNlLCAiZm9yX2N1c3RvbV9maWVsZHMiOiBm\nYWxzZSwgImV4cG9ydF9rZXkiOiAiZG9ja2VyX2ludGVncmF0aW9uX2ludm9jYXRpb25zIiwgInV1\naWQiOiAiZjQxOGRhYWUtMTg5OC00ODFmLWI2YTItYmRlODgxY2RhZWIzIiwgImFjdGlvbnMiOiBb\nXSwgInNjcmlwdHMiOiBbXX1dLCAic2NyaXB0cyI6IFtdLCAiaW5jaWRlbnRfYXJ0aWZhY3RfdHlw\nZXMiOiBbXSwgIndvcmtmbG93cyI6IFt7IndvcmtmbG93X2lkIjogNTUsICJuYW1lIjogIkV4YW1w\nbGU6IERvY2tlcjpTZW5kIEFydGlmYWN0IFRvIERvY2tlciBDb250YWluZXIgKE5TUkwpIiwgInBy\nb2dyYW1tYXRpY19uYW1lIjogImRvY2tlcl9hbmFseXplX2FydGlmYWN0X3dpdGhfZG9ja2VyX2Nv\nbnRhaW5lcl9uc3JsIiwgIm9iamVjdF90eXBlIjogImFydGlmYWN0IiwgImRlc2NyaXB0aW9uIjog\nIkFuIGV4YW1wbGUgd29ya2Zsb3cgc2NvcGVkIGZvciBBcnRpZmFjdHMgd2hpY2ggd2lsbCwgd2hl\nbiBpbnZva2VkLCBzZW5kIHRoZSBhcnRpZmFjdCB0byBhIERvY2tlciBjb250YWluZXIsIHBlcmZv\ncm0gc29tZSBvcGVyYXRpb24gb24gdGhlIGlucHV0IGFuZCByZXR1cm5zIGluZm9ybWF0aW9uIHRv\nIFJlc2lsaWVudC4iLCAiY3JlYXRvcl9pZCI6ICJhbGZyZWRAd2F5bmVjb3JwLmNvbSIsICJsYXN0\nX21vZGlmaWVkX2J5IjogImFsZnJlZEB3YXluZWNvcnAuY29tIiwgImxhc3RfbW9kaWZpZWRfdGlt\nZSI6IDE1NTE5NTQxMzQwNjAsICJleHBvcnRfa2V5IjogImRvY2tlcl9hbmFseXplX2FydGlmYWN0\nX3dpdGhfZG9ja2VyX2NvbnRhaW5lcl9uc3JsIiwgInV1aWQiOiAiMDI2NGE3MTMtMGFiYi00M2Mx\nLTgzMmUtYjM0MmYzYTgxYzA2IiwgImNvbnRlbnQiOiB7IndvcmtmbG93X2lkIjogImRvY2tlcl9h\nbmFseXplX2FydGlmYWN0X3dpdGhfZG9ja2VyX2NvbnRhaW5lcl9uc3JsIiwgInhtbCI6ICI8P3ht\nbCB2ZXJzaW9uPVwiMS4wXCIgZW5jb2Rpbmc9XCJVVEYtOFwiPz48ZGVmaW5pdGlvbnMgeG1sbnM9\nXCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9CUE1OLzIwMTAwNTI0L01PREVMXCIgeG1sbnM6YnBt\nbmRpPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9ESVwiIHhtbG5zOm9t\nZ2RjPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvREQvMjAxMDA1MjQvRENcIiB4bWxuczpvbWdk\naT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RJXCIgeG1sbnM6cmVzaWxp\nZW50PVwiaHR0cDovL3Jlc2lsaWVudC5pYm0uY29tL2JwbW5cIiB4bWxuczp4c2Q9XCJodHRwOi8v\nd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYVwiIHhtbG5zOnhzaT1cImh0dHA6Ly93d3cudzMub3Jn\nLzIwMDEvWE1MU2NoZW1hLWluc3RhbmNlXCIgdGFyZ2V0TmFtZXNwYWNlPVwiaHR0cDovL3d3dy5j\nYW11bmRhLm9yZy90ZXN0XCI+PHByb2Nlc3MgaWQ9XCJkb2NrZXJfYW5hbHl6ZV9hcnRpZmFjdF93\naXRoX2RvY2tlcl9jb250YWluZXJfbnNybFwiIGlzRXhlY3V0YWJsZT1cInRydWVcIiBuYW1lPVwi\nRXhhbXBsZTogRG9ja2VyOlNlbmQgQXJ0aWZhY3QgVG8gRG9ja2VyIENvbnRhaW5lciAoTlNSTClc\nIj48ZG9jdW1lbnRhdGlvbj5BbiBleGFtcGxlIHdvcmtmbG93IHNjb3BlZCBmb3IgQXJ0aWZhY3Rz\nIHdoaWNoIHdpbGwsIHdoZW4gaW52b2tlZCwgc2VuZCB0aGUgYXJ0aWZhY3QgdG8gYSBEb2NrZXIg\nY29udGFpbmVyLCBwZXJmb3JtIHNvbWUgb3BlcmF0aW9uIG9uIHRoZSBpbnB1dCBhbmQgcmV0dXJu\ncyBpbmZvcm1hdGlvbiB0byBSZXNpbGllbnQuPC9kb2N1bWVudGF0aW9uPjxzdGFydEV2ZW50IGlk\nPVwiU3RhcnRFdmVudF8xNTVhc3htXCI+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xaWJiOTNuPC9v\ndXRnb2luZz48L3N0YXJ0RXZlbnQ+PHNlcnZpY2VUYXNrIGlkPVwiU2VydmljZVRhc2tfMDNocnlz\nNFwiIG5hbWU9XCJEb2NrZXI6IFJ1biBEb2NrZXIgQ29udGFpbmVyXCIgcmVzaWxpZW50OnR5cGU9\nXCJmdW5jdGlvblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1aWQ9\nXCI3YTIyMGJlMy0wNWY3LTRiMTctYTFhNy05N2I0MDc2ZTE0YmVcIj57XCJpbnB1dHNcIjp7XCJl\nNmQ2NmJhYy04NDFkLTQwM2YtODJmYS04NjJkYzNjZDIyMmZcIjp7XCJpbnB1dF90eXBlXCI6XCJz\ndGF0aWNcIixcInN0YXRpY19pbnB1dFwiOntcIm11bHRpc2VsZWN0X3ZhbHVlXCI6W10sXCJzZWxl\nY3RfdmFsdWVcIjpcImY2NDk5OWJhLTI3ODctNGIyMS04YzZjLTFlMGQ0OTc2MDA5ZVwifX19LFwi\ncG9zdF9wcm9jZXNzaW5nX3NjcmlwdFwiOlwibm90ZV90ZXh0X3N0YXJ0ID0gdVxcXCJcXFwiXFxc\nIiZsdDtiJmd0O0RvY2tlciBJbnRlZ3JhdGlvbiZsdDsvYiZndDtcXG4gICAgICAgICAgICAgICZs\ndDticiZndDsmbHQ7YnImZ3Q7QSBjb250YWluZXIgd2FzIHJhbiB1c2luZyB0aGUgaW1hZ2UgJmx0\nO2ImZ3Q7ezB9Jmx0Oy9iJmd0O1xcXCJcXFwiXFxcIi5mb3JtYXQocmVzdWx0cy5pbnB1dHNbXFxc\nImRvY2tlcl9pbWFnZVxcXCJdW1xcXCJuYW1lXFxcIl0pXFxuICAgICAgICAgICAgICBcXG4jIElm\nIHRoZSBBdHRhY2htZW50IGF0dHJpYnV0ZSBvZiB0aGUgY29udGVudCBwYXlsb2FkIGlzIHNldDsg\nd2UgYXJlIGRlYWxpbmcgd2l0aCBhbiBhdHRhY2htZW50XFxuaWYgcmVzdWx0cy5jb250ZW50W1xc\nXCJhdHRhY2htZW50X25hbWVcXFwiXSAhPSBOb25lOlxcbiAgbm90ZV90ZXh0X2F0dGFjaG1lbnQg\nPSB1XFxcIlxcXCJcXFwiJmx0O2JyJmd0OyBPbiBhbiBBdHRhY2htZW50IHdpdGggbmFtZSB7MH0g\nXFxcIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRzLmNvbnRlbnRbXFxcImF0dGFjaG1lbnRfbmFtZVxc\nXCJdKVxcbiAgbm90ZV90ZXh0X3N0YXJ0ICs9IG5vdGVfdGV4dF9hdHRhY2htZW50XFxuXFxuIyBP\ndGhlcndpc2Ugd2UgYXJlIGRlYWxpbmcgd2l0aCBhbiBhcnRpZmFjdFxcbmVsc2U6XFxuICBub3Rl\nX3RleHRfYXJ0aWZhY3QgPSB1XFxcIlxcXCJcXFwiJmx0O2JyJmd0OyBPbiBhbiBBcnRpZmFjdCBv\nZiBUeXBlOiAmbHQ7YiZndDt7MH0mbHQ7L2ImZ3Q7XFxuICAgICAgICAgICAgICAgICAgICAgICAg\nICAmbHQ7YnImZ3Q7IEFydGlmYWN0IFZhbHVlOiAmbHQ7YiZndDt7MX0mbHQ7L2ImZ3Q7XFxcIlxc\nXCJcXFwiLmZvcm1hdChyZXN1bHRzLmlucHV0c1tcXFwiZG9ja2VyX2FydGlmYWN0X3R5cGVcXFwi\nXSwgcmVzdWx0cy5pbnB1dHNbXFxcImRvY2tlcl9pbnB1dFxcXCJdKVxcbiAgbm90ZV90ZXh0X3N0\nYXJ0ICs9IG5vdGVfdGV4dF9hcnRpZmFjdFxcbiAgICAgICAgICAgICAgXFxubm90ZV90ZXh0X2Vu\nZCA9IFxcXCJcXFwiXFxcIiZsdDticiZndDtDb250YWluZXIgSUQgOiAmbHQ7YiZndDt7MH0mbHQ7\nL2ImZ3Q7XFxuICAgICAgICAgICAgICAmbHQ7YnImZ3Q7Q29udGFpbmVyIGV4aXQgY29kZSA6ICZs\ndDtiJmd0O3sxfSZsdDsvYiZndDtcXG4gICAgICAgICAgICAgICZsdDticiZndDsmbHQ7YnImZ3Q7\nIENvbnRhaW5lciBMb2dzIGhhdmUgYmVlbiBzYXZlZCBhcyBhbiBhdHRhY2htZW50LlxcbiAgICAg\nICAgICAgICAgQ29udGFpbmVyIFN0YXRzLCBMb2dzLCBGdW5jdGlvbiBJbnB1dHMgb3IgUnVuIFRp\nbWUgTWV0cmljcyBhcmUgYWxzbyBhdmFpbGFibGUgYXMgcGFydCBvZiB0aGUgcmVzdWx0IHBheWxv\nYWRcXFwiXFxcIlxcXCIuZm9ybWF0KFxcbiAgICAgICAgICAgICAgICByZXN1bHRzLmNvbnRlbnRb\nXFxcImNvbnRhaW5lcl9pZFxcXCJdLCByZXN1bHRzLmNvbnRlbnRbXFxcImNvbnRhaW5lcl9leGl0\nX3N0YXR1c1xcXCJdKVxcblxcbm5vdGVfdGV4dCA9IG5vdGVfdGV4dF9zdGFydCtub3RlX3RleHRf\nZW5kXFxuaW5jaWRlbnQuYWRkTm90ZShoZWxwZXIuY3JlYXRlUmljaFRleHQobm90ZV90ZXh0KSlc\nXG5cXG50cnk6XFxuICAgIGRlcyA9IGFydGlmYWN0LmRlc2NyaXB0aW9uLmNvbnRlbnRcXG5leGNl\ncHQgRXhjZXB0aW9uOlxcbiAgZGVzID0gTm9uZVxcbiAgXFxuaWYgZGVzIGlzIE5vbmU6XFxuICBc\nXG4gIGFydGlmYWN0LmRlc2NyaXB0aW9uID0gdVxcXCJcXFwiXFxcIkRvY2tlciBJbnRlZ3JhdGlv\nbjpcXFxcbiBBcnRpZmFjdCB3YXMgc2Nhbm5lZCBieSBkb2NrZXIgaW1hZ2UgezB9ICBcXFxcbiBS\nZXN1bHRzOlxcXFxuezF9XFxcIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRzLmlucHV0c1tcXFwiZG9j\na2VyX2ltYWdlXFxcIl1bXFxcIm5hbWVcXFwiXSxyZXN1bHRzLmNvbnRlbnRbXFxcImxvZ3NcXFwi\nXSlcXG4gICMgVW5jb21tZW50IHRoaXMgbGluZSB0byBOT1QgaGF2ZSB0aGUgcmVzdWx0cyBhcHBl\nbmRlZCB0byB0aGUgZGVzY3JpcHQgb2YgdGhlIGFydGlmYWN0XFxuICAjYXJ0aWZhY3QuZGVzY3Jp\ncHRpb24gPSB1XFxcIlxcXCJcXFwiRG9ja2VyIEludGVncmF0aW9uOiBBcnRpZmFjdCB3YXMgc2Nh\nbm5lZCBieSBkb2NrZXIgaW1hZ2UgezB9XFxcIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRzLmlucHV0\nc1tcXFwiZG9ja2VyX2ltYWdlXFxcIl1bXFxcIm5hbWVcXFwiXSlcXG5lbHNlOlxcbiAgXFxuICBh\ncnRpZmFjdC5kZXNjcmlwdGlvbiA9IGRlcyArIHVcXFwiXFxcIlxcXCJEb2NrZXIgSW50ZWdyYXRp\nb246IEFydGlmYWN0IHdhcyBzY2FubmVkIGJ5IGRvY2tlciBpbWFnZSB7MH0gXFxcXG4gUmVzdWx0\nczpcXFxcbnsxfVxcXCJcXFwiXFxcIi5mb3JtYXQocmVzdWx0cy5pbnB1dHNbXFxcImRvY2tlcl9p\nbWFnZVxcXCJdW1xcXCJuYW1lXFxcIl0scmVzdWx0cy5jb250ZW50W1xcXCJsb2dzXFxcIl0pXFxu\nICBcXG4gICMgVW5jb21tZW50IHRoaXMgbGluZSB0byBOT1QgaGF2ZSB0aGUgcmVzdWx0cyBhcHBl\nbmRlZCB0byB0aGUgZGVzY3JpcHQgb2YgdGhlIGFydGlmYWN0XFxuICAjYXJ0aWZhY3QuZGVzY3Jp\ncHRpb24gPSBkZXMgKyB1XFxcIlxcXCJcXFwiRG9ja2VyIEludGVncmF0aW9uOiBBcnRpZmFjdCB3\nYXMgc2Nhbm5lZCBieSBkb2NrZXIgaW1hZ2UgezB9XFxcIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRz\nLmlucHV0c1tcXFwiZG9ja2VyX2ltYWdlXFxcIl1bXFxcIm5hbWVcXFwiXSlcXG4gIFxcbiAgXFxu\ncm93ID0gaW5jaWRlbnQuYWRkUm93KFxcXCJkb2NrZXJfaW50ZWdyYXRpb25faW52b2NhdGlvbnNc\nXFwiKVxcbnJvd1tcXFwiZG9ja2VyX3RpbWVzdGFtcFxcXCJdID0gcmVzdWx0c1tcXFwibWV0cmlj\nc1xcXCJdW1xcXCJ0aW1lc3RhbXBfZXBvY2hcXFwiXSBvciAwXFxucm93W1xcXCJkb2NrZXJfY29u\ndGFpbmVyX2lkXFxcIl0gPSByZXN1bHRzLmNvbnRlbnRbXFxcImNvbnRhaW5lcl9pZFxcXCJdXFxu\ncm93W1xcXCJkb2NrZXJfaW1hZ2VcXFwiXSA9IHJlc3VsdHMuaW5wdXRzW1xcXCJkb2NrZXJfaW1h\nZ2VcXFwiXVtcXFwibmFtZVxcXCJdXFxuXFxucm93W1xcXCJkb2NrZXJfYXJ0aWZhY3RfdHlwZVxc\nXCJdID0gcmVzdWx0cy5pbnB1dHNbXFxcImRvY2tlcl9hcnRpZmFjdF90eXBlXFxcIl1cXG5yb3db\nXFxcImRvY2tlcl9hcnRpZmFjdF92YWx1ZVxcXCJdID0gcmVzdWx0cy5pbnB1dHNbXFxcImRvY2tl\ncl9pbnB1dFxcXCJdXFxuXCIsXCJwcmVfcHJvY2Vzc2luZ19zY3JpcHRcIjpcImlucHV0cy5kb2Nr\nZXJfaW5wdXQgPSBhcnRpZmFjdC52YWx1ZVxcbmlucHV0cy5pbmNpZGVudF9pZCA9IGluY2lkZW50\nLmlkIFxcbmlucHV0cy5kb2NrZXJfYXJ0aWZhY3RfdHlwZSA9IGFydGlmYWN0LnR5cGVcIn08L3Jl\nc2lsaWVudDpmdW5jdGlvbj48L2V4dGVuc2lvbkVsZW1lbnRzPjxpbmNvbWluZz5TZXF1ZW5jZUZs\nb3dfMWliYjkzbjwvaW5jb21pbmc+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xbTIyZHAwPC9vdXRn\nb2luZz48L3NlcnZpY2VUYXNrPjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMWliYjkz\nblwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlNlcnZpY2VU\nYXNrXzAzaHJ5czRcIi8+PGVuZEV2ZW50IGlkPVwiRW5kRXZlbnRfMDZxdXA1YlwiPjxpbmNvbWlu\nZz5TZXF1ZW5jZUZsb3dfMW0yMmRwMDwvaW5jb21pbmc+PC9lbmRFdmVudD48c2VxdWVuY2VGbG93\nIGlkPVwiU2VxdWVuY2VGbG93XzFtMjJkcDBcIiBzb3VyY2VSZWY9XCJTZXJ2aWNlVGFza18wM2hy\neXM0XCIgdGFyZ2V0UmVmPVwiRW5kRXZlbnRfMDZxdXA1YlwiLz48dGV4dEFubm90YXRpb24gaWQ9\nXCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCI+PHRleHQ+U3RhcnQgeW91ciB3b3JrZmxvdyBoZXJl\nPC90ZXh0PjwvdGV4dEFubm90YXRpb24+PGFzc29jaWF0aW9uIGlkPVwiQXNzb2NpYXRpb25fMXNl\ndWo0OFwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRhcmdldFJlZj1cIlRleHRB\nbm5vdGF0aW9uXzFreHhpeXRcIi8+PC9wcm9jZXNzPjxicG1uZGk6QlBNTkRpYWdyYW0gaWQ9XCJC\nUE1ORGlhZ3JhbV8xXCI+PGJwbW5kaTpCUE1OUGxhbmUgYnBtbkVsZW1lbnQ9XCJ1bmRlZmluZWRc\nIiBpZD1cIkJQTU5QbGFuZV8xXCI+PGJwbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTdGFy\ndEV2ZW50XzE1NWFzeG1cIiBpZD1cIlN0YXJ0RXZlbnRfMTU1YXN4bV9kaVwiPjxvbWdkYzpCb3Vu\nZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjE2MlwiIHk9XCIxODhcIi8+PGJwbW5k\naTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIwXCIgd2lkdGg9XCI5MFwiIHg9XCIx\nNTdcIiB5PVwiMjIzXCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hhcGU+PGJw\nbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0XCIgaWQ9\nXCJUZXh0QW5ub3RhdGlvbl8xa3h4aXl0X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzMFwi\nIHdpZHRoPVwiMTAwXCIgeD1cIjk5XCIgeT1cIjI1NFwiLz48L2JwbW5kaTpCUE1OU2hhcGU+PGJw\nbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIkFzc29jaWF0aW9uXzFzZXVqNDhcIiBpZD1cIkFz\nc29jaWF0aW9uXzFzZXVqNDhfZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE2OVwiIHhzaTp0eXBl\nPVwib21nZGM6UG9pbnRcIiB5PVwiMjIwXCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMTUzXCIgeHNp\nOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTkVkZ2U+PGJwbW5k\naTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJTZXJ2aWNlVGFza18wM2hyeXM0XCIgaWQ9XCJTZXJ2\naWNlVGFza18wM2hyeXM0X2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCI4MFwiIHdpZHRoPVwi\nMTAwXCIgeD1cIjM4N1wiIHk9XCIxNjZcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBN\nTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3dfMWliYjkzblwiIGlkPVwiU2VxdWVuY2VG\nbG93XzFpYmI5M25fZGlcIj48b21nZGk6d2F5cG9pbnQgeD1cIjE5OFwiIHhzaTp0eXBlPVwib21n\nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlwb2ludCB4PVwiMzg3XCIgeHNpOnR5cGU9\nXCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5k\ncyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwiIHg9XCIyOTIuNVwiIHk9XCIxODRcIi8+PC9icG1u\nZGk6QlBNTkxhYmVsPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVt\nZW50PVwiRW5kRXZlbnRfMDZxdXA1YlwiIGlkPVwiRW5kRXZlbnRfMDZxdXA1Yl9kaVwiPjxvbWdk\nYzpCb3VuZHMgaGVpZ2h0PVwiMzZcIiB3aWR0aD1cIjM2XCIgeD1cIjY0MlwiIHk9XCIxODhcIi8+\nPGJwbW5kaTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiMFwi\nIHg9XCI2NjBcIiB5PVwiMjI3XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1OU2hh\ncGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18xbTIyZHAwXCIg\naWQ9XCJTZXF1ZW5jZUZsb3dfMW0yMmRwMF9kaVwiPjxvbWdkaTp3YXlwb2ludCB4PVwiNDg3XCIg\neHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBvaW50IHg9XCI2\nNDJcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBtbmRpOkJQTU5MYWJl\nbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjU2NC41XCIgeT1c\nIjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PC9icG1uZGk6QlBN\nTlBsYW5lPjwvYnBtbmRpOkJQTU5EaWFncmFtPjwvZGVmaW5pdGlvbnM+IiwgInZlcnNpb24iOiAx\nMX0sICJhY3Rpb25zIjogW119LCB7IndvcmtmbG93X2lkIjogNTYsICJuYW1lIjogIkV4YW1wbGU6\nIERvY2tlcjpTZW5kIEFydGlmYWN0IFRvIERvY2tlciBDb250YWluZXIgKEFtYXNzKSIsICJwcm9n\ncmFtbWF0aWNfbmFtZSI6ICJkb2NrZXJfYW5hbHl6ZV9hcnRpZmFjdF93aXRoX2RvY2tlcl9jb250\nYWluZXJfYW1hc3MiLCAib2JqZWN0X3R5cGUiOiAiYXJ0aWZhY3QiLCAiZGVzY3JpcHRpb24iOiAi\nQW4gZXhhbXBsZSB3b3JrZmxvdyBzY29wZWQgZm9yIEFydGlmYWN0cyB3aGljaCB3aWxsLCB3aGVu\nIGludm9rZWQsIHNlbmQgdGhlIGFydGlmYWN0IHRvIGEgRG9ja2VyIGNvbnRhaW5lciwgcGVyZm9y\nbSBzb21lIG9wZXJhdGlvbiBvbiB0aGUgaW5wdXQgYW5kIHJldHVybnMgaW5mb3JtYXRpb24gdG8g\nUmVzaWxpZW50LiIsICJjcmVhdG9yX2lkIjogImFsZnJlZEB3YXluZWNvcnAuY29tIiwgImxhc3Rf\nbW9kaWZpZWRfYnkiOiAiYWxmcmVkQHdheW5lY29ycC5jb20iLCAibGFzdF9tb2RpZmllZF90aW1l\nIjogMTU1MTk1NDEzMDExMiwgImV4cG9ydF9rZXkiOiAiZG9ja2VyX2FuYWx5emVfYXJ0aWZhY3Rf\nd2l0aF9kb2NrZXJfY29udGFpbmVyX2FtYXNzIiwgInV1aWQiOiAiNDVmZjY4NzgtM2I4YS00ZWQx\nLWI5ZDAtYzc5YmE0MjQ3MzA1IiwgImNvbnRlbnQiOiB7IndvcmtmbG93X2lkIjogImRvY2tlcl9h\nbmFseXplX2FydGlmYWN0X3dpdGhfZG9ja2VyX2NvbnRhaW5lcl9hbWFzcyIsICJ4bWwiOiAiPD94\nbWwgdmVyc2lvbj1cIjEuMFwiIGVuY29kaW5nPVwiVVRGLThcIj8+PGRlZmluaXRpb25zIHhtbG5z\nPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RFTFwiIHhtbG5zOmJw\nbW5kaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvRElcIiB4bWxuczpv\nbWdkYz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RDXCIgeG1sbnM6b21n\nZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwiIHhtbG5zOnJlc2ls\naWVudD1cImh0dHA6Ly9yZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6eHNkPVwiaHR0cDov\nL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRwOi8vd3d3LnczLm9y\nZy8yMDAxL1hNTFNjaGVtYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1cImh0dHA6Ly93d3cu\nY2FtdW5kYS5vcmcvdGVzdFwiPjxwcm9jZXNzIGlkPVwiZG9ja2VyX2FuYWx5emVfYXJ0aWZhY3Rf\nd2l0aF9kb2NrZXJfY29udGFpbmVyX2FtYXNzXCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9\nXCJFeGFtcGxlOiBEb2NrZXI6U2VuZCBBcnRpZmFjdCBUbyBEb2NrZXIgQ29udGFpbmVyIChBbWFz\ncylcIj48ZG9jdW1lbnRhdGlvbj5BbiBleGFtcGxlIHdvcmtmbG93IHNjb3BlZCBmb3IgQXJ0aWZh\nY3RzIHdoaWNoIHdpbGwsIHdoZW4gaW52b2tlZCwgc2VuZCB0aGUgYXJ0aWZhY3QgdG8gYSBEb2Nr\nZXIgY29udGFpbmVyLCBwZXJmb3JtIHNvbWUgb3BlcmF0aW9uIG9uIHRoZSBpbnB1dCBhbmQgcmV0\ndXJucyBpbmZvcm1hdGlvbiB0byBSZXNpbGllbnQuPC9kb2N1bWVudGF0aW9uPjxzdGFydEV2ZW50\nIGlkPVwiU3RhcnRFdmVudF8xNTVhc3htXCI+PG91dGdvaW5nPlNlcXVlbmNlRmxvd18xaWJiOTNu\nPC9vdXRnb2luZz48L3N0YXJ0RXZlbnQ+PHNlcnZpY2VUYXNrIGlkPVwiU2VydmljZVRhc2tfMDNo\ncnlzNFwiIG5hbWU9XCJEb2NrZXI6IFJ1biBEb2NrZXIgQ29udGFpbmVyXCIgcmVzaWxpZW50OnR5\ncGU9XCJmdW5jdGlvblwiPjxleHRlbnNpb25FbGVtZW50cz48cmVzaWxpZW50OmZ1bmN0aW9uIHV1\naWQ9XCI3YTIyMGJlMy0wNWY3LTRiMTctYTFhNy05N2I0MDc2ZTE0YmVcIj57XCJpbnB1dHNcIjp7\nXCJlNmQ2NmJhYy04NDFkLTQwM2YtODJmYS04NjJkYzNjZDIyMmZcIjp7XCJpbnB1dF90eXBlXCI6\nXCJzdGF0aWNcIixcInN0YXRpY19pbnB1dFwiOntcIm11bHRpc2VsZWN0X3ZhbHVlXCI6W10sXCJz\nZWxlY3RfdmFsdWVcIjpcIjM1NjYxOWY4LTNhZWItNDdhMy04NGJiLWMzMzVjMWExM2JhZlwifX19\nLFwicG9zdF9wcm9jZXNzaW5nX3NjcmlwdFwiOlwibm90ZV90ZXh0X3N0YXJ0ID0gdVxcXCJcXFwi\nXFxcIiZsdDtiJmd0O0RvY2tlciBJbnRlZ3JhdGlvbiZsdDsvYiZndDtcXG4gICAgICAgICAgICAg\nICZsdDticiZndDsmbHQ7YnImZ3Q7QSBjb250YWluZXIgd2FzIHJhbiB1c2luZyB0aGUgaW1hZ2Ug\nJmx0O2ImZ3Q7ezB9Jmx0Oy9iJmd0O1xcXCJcXFwiXFxcIi5mb3JtYXQocmVzdWx0cy5pbnB1dHNb\nXFxcImRvY2tlcl9pbWFnZVxcXCJdW1xcXCJuYW1lXFxcIl0pXFxuICAgICAgICAgICAgICBcXG4j\nIElmIHRoZSBBdHRhY2htZW50IGF0dHJpYnV0ZSBvZiB0aGUgY29udGVudCBwYXlsb2FkIGlzIHNl\ndDsgd2UgYXJlIGRlYWxpbmcgd2l0aCBhbiBhdHRhY2htZW50XFxuaWYgcmVzdWx0cy5jb250ZW50\nW1xcXCJhdHRhY2htZW50X25hbWVcXFwiXSAhPSBOb25lOlxcbiAgbm90ZV90ZXh0X2F0dGFjaG1l\nbnQgPSB1XFxcIlxcXCJcXFwiJmx0O2JyJmd0OyBPbiBhbiBBdHRhY2htZW50IHdpdGggbmFtZSB7\nMH0gXFxcIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRzLmNvbnRlbnRbXFxcImF0dGFjaG1lbnRfbmFt\nZVxcXCJdKVxcbiAgbm90ZV90ZXh0X3N0YXJ0ICs9IG5vdGVfdGV4dF9hdHRhY2htZW50XFxuXFxu\nIyBPdGhlcndpc2Ugd2UgYXJlIGRlYWxpbmcgd2l0aCBhbiBhcnRpZmFjdFxcbmVsc2U6XFxuICBu\nb3RlX3RleHRfYXJ0aWZhY3QgPSB1XFxcIlxcXCJcXFwiJmx0O2JyJmd0OyBPbiBhbiBBcnRpZmFj\ndCBvZiBUeXBlOiAmbHQ7YiZndDt7MH0mbHQ7L2ImZ3Q7XFxuICAgICAgICAgICAgICAgICAgICAg\nICAgICAmbHQ7YnImZ3Q7IEFydGlmYWN0IFZhbHVlOiAmbHQ7YiZndDt7MX0mbHQ7L2ImZ3Q7XFxc\nIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRzLmlucHV0c1tcXFwiZG9ja2VyX2FydGlmYWN0X3R5cGVc\nXFwiXSwgcmVzdWx0cy5pbnB1dHNbXFxcImRvY2tlcl9pbnB1dFxcXCJdKVxcbiAgbm90ZV90ZXh0\nX3N0YXJ0ICs9IG5vdGVfdGV4dF9hcnRpZmFjdFxcbiAgICAgICAgICAgICAgXFxubm90ZV90ZXh0\nX2VuZCA9IFxcXCJcXFwiXFxcIiZsdDticiZndDtDb250YWluZXIgSUQgOiAmbHQ7YiZndDt7MH0m\nbHQ7L2ImZ3Q7XFxuICAgICAgICAgICAgICAmbHQ7YnImZ3Q7Q29udGFpbmVyIGV4aXQgY29kZSA6\nICZsdDtiJmd0O3sxfSZsdDsvYiZndDtcXG4gICAgICAgICAgICAgICZsdDticiZndDsmbHQ7YnIm\nZ3Q7IENvbnRhaW5lciBMb2dzIGhhdmUgYmVlbiBzYXZlZCBhcyBhbiBhdHRhY2htZW50LlxcbiAg\nICAgICAgICAgICAgQ29udGFpbmVyIFN0YXRzLCBMb2dzLCBGdW5jdGlvbiBJbnB1dHMgb3IgUnVu\nIFRpbWUgTWV0cmljcyBhcmUgYWxzbyBhdmFpbGFibGUgYXMgcGFydCBvZiB0aGUgcmVzdWx0IHBh\neWxvYWRcXFwiXFxcIlxcXCIuZm9ybWF0KFxcbiAgICAgICAgICAgICAgICByZXN1bHRzLmNvbnRl\nbnRbXFxcImNvbnRhaW5lcl9pZFxcXCJdLCByZXN1bHRzLmNvbnRlbnRbXFxcImNvbnRhaW5lcl9l\neGl0X3N0YXR1c1xcXCJdKVxcblxcbm5vdGVfdGV4dCA9IG5vdGVfdGV4dF9zdGFydCtub3RlX3Rl\neHRfZW5kXFxuaW5jaWRlbnQuYWRkTm90ZShoZWxwZXIuY3JlYXRlUmljaFRleHQobm90ZV90ZXh0\nKSlcXG5cXG50cnk6XFxuICAgIGRlcyA9IGFydGlmYWN0LmRlc2NyaXB0aW9uLmNvbnRlbnRcXG5l\neGNlcHQgRXhjZXB0aW9uOlxcbiAgZGVzID0gTm9uZVxcbiAgXFxuaWYgZGVzIGlzIE5vbmU6XFxu\nICAjIFVuY29tbWVudCB0aGlzIGxpbmUgdG8gaGF2ZSB0aGUgQW1hc3Mgc3ViZG9tYWluIHJlc3Vs\ndHMgYXBwZW5kZWQgdG8gdGhlIGRlc2NyaXB0IG9mIHRoZSBhcnRpZmFjdFxcbiAgI2FydGlmYWN0\nLmRlc2NyaXB0aW9uID0gdVxcXCJcXFwiXFxcIkRvY2tlciBJbnRlZ3JhdGlvbjpcXFxcbiBBcnRp\nZmFjdCB3YXMgc2Nhbm5lZCBieSBkb2NrZXIgaW1hZ2UgezB9ICBcXFxcbiBSZXN1bHRzOlxcXFxu\nezF9XFxcIlxcXCJcXFwiLmZvcm1hdChyZXN1bHRzLmlucHV0c1tcXFwiZG9ja2VyX2ltYWdlXFxc\nIl1bXFxcIm5hbWVcXFwiXSxyZXN1bHRzLmNvbnRlbnRbXFxcImxvZ3NcXFwiXSlcXG4gIFxcbiAg\nYXJ0aWZhY3QuZGVzY3JpcHRpb24gPSB1XFxcIlxcXCJcXFwiRG9ja2VyIEludGVncmF0aW9uOiBB\ncnRpZmFjdCB3YXMgc2Nhbm5lZCBieSBkb2NrZXIgaW1hZ2UgezB9XFxcIlxcXCJcXFwiLmZvcm1h\ndChyZXN1bHRzLmlucHV0c1tcXFwiZG9ja2VyX2ltYWdlXFxcIl1bXFxcIm5hbWVcXFwiXSlcXG5l\nbHNlOlxcbiAgIyBVbmNvbW1lbnQgdGhpcyBsaW5lIHRvIGhhdmUgdGhlIEFtYXNzIHN1YmRvbWFp\nbiByZXN1bHRzIGFwcGVuZGVkIHRvIHRoZSBkZXNjcmlwdCBvZiB0aGUgYXJ0aWZhY3QgXFxuICAj\nYXJ0aWZhY3QuZGVzY3JpcHRpb24gPSBkZXMgKyB1XFxcIlxcXCJcXFwiRG9ja2VyIEludGVncmF0\naW9uOiBBcnRpZmFjdCB3YXMgc2Nhbm5lZCBieSBkb2NrZXIgaW1hZ2UgezB9IFxcXFxuIFJlc3Vs\ndHM6XFxcXG57MX1cXFwiXFxcIlxcXCIuZm9ybWF0KHJlc3VsdHMuaW5wdXRzW1xcXCJkb2NrZXJf\naW1hZ2VcXFwiXVtcXFwibmFtZVxcXCJdLHJlc3VsdHMuY29udGVudFtcXFwibG9nc1xcXCJdKVxc\nbiAgXFxuICBhcnRpZmFjdC5kZXNjcmlwdGlvbiA9IGRlcyArIHVcXFwiXFxcIlxcXCJEb2NrZXIg\nSW50ZWdyYXRpb246IEFydGlmYWN0IHdhcyBzY2FubmVkIGJ5IGRvY2tlciBpbWFnZSB7MH1cXFwi\nXFxcIlxcXCIuZm9ybWF0KHJlc3VsdHMuaW5wdXRzW1xcXCJkb2NrZXJfaW1hZ2VcXFwiXVtcXFwi\nbmFtZVxcXCJdKVxcbiAgXFxuICBcXG5yb3cgPSBpbmNpZGVudC5hZGRSb3coXFxcImRvY2tlcl9p\nbnRlZ3JhdGlvbl9pbnZvY2F0aW9uc1xcXCIpXFxucm93W1xcXCJkb2NrZXJfdGltZXN0YW1wXFxc\nIl0gPSByZXN1bHRzW1xcXCJtZXRyaWNzXFxcIl1bXFxcInRpbWVzdGFtcF9lcG9jaFxcXCJdIG9y\nIDBcXG5yb3dbXFxcImRvY2tlcl9jb250YWluZXJfaWRcXFwiXSA9IHJlc3VsdHMuY29udGVudFtc\nXFwiY29udGFpbmVyX2lkXFxcIl1cXG5yb3dbXFxcImRvY2tlcl9pbWFnZVxcXCJdID0gcmVzdWx0\ncy5pbnB1dHNbXFxcImRvY2tlcl9pbWFnZVxcXCJdW1xcXCJuYW1lXFxcIl1cXG5cXG5yb3dbXFxc\nImRvY2tlcl9hcnRpZmFjdF90eXBlXFxcIl0gPSByZXN1bHRzLmlucHV0c1tcXFwiZG9ja2VyX2Fy\ndGlmYWN0X3R5cGVcXFwiXVxcbnJvd1tcXFwiZG9ja2VyX2FydGlmYWN0X3ZhbHVlXFxcIl0gPSBy\nZXN1bHRzLmlucHV0c1tcXFwiZG9ja2VyX2lucHV0XFxcIl1cXG5cIixcInByZV9wcm9jZXNzaW5n\nX3NjcmlwdFwiOlwiaW5wdXRzLmRvY2tlcl9pbnB1dCA9IGFydGlmYWN0LnZhbHVlXFxuaW5wdXRz\nLmluY2lkZW50X2lkID0gaW5jaWRlbnQuaWQgXFxuaW5wdXRzLmRvY2tlcl9hcnRpZmFjdF90eXBl\nID0gYXJ0aWZhY3QudHlwZVwifTwvcmVzaWxpZW50OmZ1bmN0aW9uPjwvZXh0ZW5zaW9uRWxlbWVu\ndHM+PGluY29taW5nPlNlcXVlbmNlRmxvd18xaWJiOTNuPC9pbmNvbWluZz48b3V0Z29pbmc+U2Vx\ndWVuY2VGbG93XzFtMjJkcDA8L291dGdvaW5nPjwvc2VydmljZVRhc2s+PHNlcXVlbmNlRmxvdyBp\nZD1cIlNlcXVlbmNlRmxvd18xaWJiOTNuXCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVhc3ht\nXCIgdGFyZ2V0UmVmPVwiU2VydmljZVRhc2tfMDNocnlzNFwiLz48ZW5kRXZlbnQgaWQ9XCJFbmRF\ndmVudF8wNnF1cDViXCI+PGluY29taW5nPlNlcXVlbmNlRmxvd18xbTIyZHAwPC9pbmNvbWluZz48\nL2VuZEV2ZW50PjxzZXF1ZW5jZUZsb3cgaWQ9XCJTZXF1ZW5jZUZsb3dfMW0yMmRwMFwiIHNvdXJj\nZVJlZj1cIlNlcnZpY2VUYXNrXzAzaHJ5czRcIiB0YXJnZXRSZWY9XCJFbmRFdmVudF8wNnF1cDVi\nXCIvPjx0ZXh0QW5ub3RhdGlvbiBpZD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRcIj48dGV4dD5T\ndGFydCB5b3VyIHdvcmtmbG93IGhlcmU8L3RleHQ+PC90ZXh0QW5ub3RhdGlvbj48YXNzb2NpYXRp\nb24gaWQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgc291cmNlUmVmPVwiU3RhcnRFdmVudF8xNTVh\nc3htXCIgdGFyZ2V0UmVmPVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwiLz48L3Byb2Nlc3M+PGJw\nbW5kaTpCUE1ORGlhZ3JhbSBpZD1cIkJQTU5EaWFncmFtXzFcIj48YnBtbmRpOkJQTU5QbGFuZSBi\ncG1uRWxlbWVudD1cInVuZGVmaW5lZFwiIGlkPVwiQlBNTlBsYW5lXzFcIj48YnBtbmRpOkJQTU5T\naGFwZSBicG1uRWxlbWVudD1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIGlkPVwiU3RhcnRFdmVudF8x\nNTVhc3htX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdpZHRoPVwiMzZcIiB4PVwi\nMTYyXCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1c\nIjBcIiB3aWR0aD1cIjkwXCIgeD1cIjE1N1wiIHk9XCIyMjNcIi8+PC9icG1uZGk6QlBNTkxhYmVs\nPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlRleHRB\nbm5vdGF0aW9uXzFreHhpeXRcIiBpZD1cIlRleHRBbm5vdGF0aW9uXzFreHhpeXRfZGlcIj48b21n\nZGM6Qm91bmRzIGhlaWdodD1cIjMwXCIgd2lkdGg9XCIxMDBcIiB4PVwiOTlcIiB5PVwiMjU0XCIv\nPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiQXNzb2Np\nYXRpb25fMXNldWo0OFwiIGlkPVwiQXNzb2NpYXRpb25fMXNldWo0OF9kaVwiPjxvbWdkaTp3YXlw\nb2ludCB4PVwiMTY5XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMjBcIi8+PG9tZ2Rp\nOndheXBvaW50IHg9XCIxNTNcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjI1NFwiLz48\nL2JwbW5kaTpCUE1ORWRnZT48YnBtbmRpOkJQTU5TaGFwZSBicG1uRWxlbWVudD1cIlNlcnZpY2VU\nYXNrXzAzaHJ5czRcIiBpZD1cIlNlcnZpY2VUYXNrXzAzaHJ5czRfZGlcIj48b21nZGM6Qm91bmRz\nIGhlaWdodD1cIjgwXCIgd2lkdGg9XCIxMDBcIiB4PVwiMzg3XCIgeT1cIjE2NlwiLz48L2JwbW5k\naTpCUE1OU2hhcGU+PGJwbW5kaTpCUE1ORWRnZSBicG1uRWxlbWVudD1cIlNlcXVlbmNlRmxvd18x\naWJiOTNuXCIgaWQ9XCJTZXF1ZW5jZUZsb3dfMWliYjkzbl9kaVwiPjxvbWdkaTp3YXlwb2ludCB4\nPVwiMTk4XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PG9tZ2RpOndheXBv\naW50IHg9XCIzODdcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48YnBtbmRp\nOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhlaWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjI5\nMi41XCIgeT1cIjE4NFwiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTkVkZ2U+PGJw\nbW5kaTpCUE1OU2hhcGUgYnBtbkVsZW1lbnQ9XCJFbmRFdmVudF8wNnF1cDViXCIgaWQ9XCJFbmRF\ndmVudF8wNnF1cDViX2RpXCI+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIzNlwiIHdpZHRoPVwiMzZc\nIiB4PVwiNjQyXCIgeT1cIjE4OFwiLz48YnBtbmRpOkJQTU5MYWJlbD48b21nZGM6Qm91bmRzIGhl\naWdodD1cIjEzXCIgd2lkdGg9XCIwXCIgeD1cIjY2MFwiIHk9XCIyMjdcIi8+PC9icG1uZGk6QlBN\nTkxhYmVsPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwi\nU2VxdWVuY2VGbG93XzFtMjJkcDBcIiBpZD1cIlNlcXVlbmNlRmxvd18xbTIyZHAwX2RpXCI+PG9t\nZ2RpOndheXBvaW50IHg9XCI0ODdcIiB4c2k6dHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwi\nLz48b21nZGk6d2F5cG9pbnQgeD1cIjY0MlwiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwi\nMjA2XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0\naD1cIjBcIiB4PVwiNTY0LjVcIiB5PVwiMTg0XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5k\naTpCUE1ORWRnZT48L2JwbW5kaTpCUE1OUGxhbmU+PC9icG1uZGk6QlBNTkRpYWdyYW0+PC9kZWZp\nbml0aW9ucz4iLCAidmVyc2lvbiI6IDEyfSwgImFjdGlvbnMiOiBbXX0sIHsid29ya2Zsb3dfaWQi\nOiA1MywgIm5hbWUiOiAiRXhhbXBsZTogRG9ja2VyOlNlbmQgQXR0YWNobWVudCBUbyBEb2NrZXIg\nQ29udGFpbmVyIChWb2xhdGlsaXR5KSIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJkb2NrZXJfc2Vu\nZF9hdHRhY2htZW50X3RvX2RvY2tlcl9jb250YWluZXIiLCAib2JqZWN0X3R5cGUiOiAiYXR0YWNo\nbWVudCIsICJkZXNjcmlwdGlvbiI6ICJBbiBleGFtcGxlIHdvcmtmbG93IHNjb3BlZCBmb3IgQXR0\nYWNobWVudHMgd2hpY2ggd2lsbCwgd2hlbiBpbnZva2VkLCBzZW5kIHRoZSBhdHRhY2htZW50IHRv\nIGEgRG9ja2VyIGNvbnRhaW5lciwgcGVyZm9ybSBzb21lIG9wZXJhdGlvbiBvbiB0aGUgaW5wdXQg\nYW5kIHJldHVybnMgaW5mb3JtYXRpb24gdG8gUmVzaWxpZW50LiIsICJjcmVhdG9yX2lkIjogImFs\nZnJlZEB3YXluZWNvcnAuY29tIiwgImxhc3RfbW9kaWZpZWRfYnkiOiAiYWxmcmVkQHdheW5lY29y\ncC5jb20iLCAibGFzdF9tb2RpZmllZF90aW1lIjogMTU1MjQ5Mjg3OTc4MywgImV4cG9ydF9rZXki\nOiAiZG9ja2VyX3NlbmRfYXR0YWNobWVudF90b19kb2NrZXJfY29udGFpbmVyIiwgInV1aWQiOiAi\nNWM5MjBhM2YtMzIxOC00MzFiLTk2NzItMDRiNTliNmUzYzdiIiwgImNvbnRlbnQiOiB7Indvcmtm\nbG93X2lkIjogImRvY2tlcl9zZW5kX2F0dGFjaG1lbnRfdG9fZG9ja2VyX2NvbnRhaW5lciIsICJ4\nbWwiOiAiPD94bWwgdmVyc2lvbj1cIjEuMFwiIGVuY29kaW5nPVwiVVRGLThcIj8+PGRlZmluaXRp\nb25zIHhtbG5zPVwiaHR0cDovL3d3dy5vbWcub3JnL3NwZWMvQlBNTi8yMDEwMDUyNC9NT0RFTFwi\nIHhtbG5zOmJwbW5kaT1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0JQTU4vMjAxMDA1MjQvRElc\nIiB4bWxuczpvbWdkYz1cImh0dHA6Ly93d3cub21nLm9yZy9zcGVjL0RELzIwMTAwNTI0L0RDXCIg\neG1sbnM6b21nZGk9XCJodHRwOi8vd3d3Lm9tZy5vcmcvc3BlYy9ERC8yMDEwMDUyNC9ESVwiIHht\nbG5zOnJlc2lsaWVudD1cImh0dHA6Ly9yZXNpbGllbnQuaWJtLmNvbS9icG1uXCIgeG1sbnM6eHNk\nPVwiaHR0cDovL3d3dy53My5vcmcvMjAwMS9YTUxTY2hlbWFcIiB4bWxuczp4c2k9XCJodHRwOi8v\nd3d3LnczLm9yZy8yMDAxL1hNTFNjaGVtYS1pbnN0YW5jZVwiIHRhcmdldE5hbWVzcGFjZT1cImh0\ndHA6Ly93d3cuY2FtdW5kYS5vcmcvdGVzdFwiPjxwcm9jZXNzIGlkPVwiZG9ja2VyX3NlbmRfYXR0\nYWNobWVudF90b19kb2NrZXJfY29udGFpbmVyXCIgaXNFeGVjdXRhYmxlPVwidHJ1ZVwiIG5hbWU9\nXCJFeGFtcGxlOiBEb2NrZXI6U2VuZCBBdHRhY2htZW50IFRvIERvY2tlciBDb250YWluZXIgKFZv\nbGF0aWxpdHkpXCI+PGRvY3VtZW50YXRpb24+QW4gZXhhbXBsZSB3b3JrZmxvdyBzY29wZWQgZm9y\nIEF0dGFjaG1lbnRzIHdoaWNoIHdpbGwsIHdoZW4gaW52b2tlZCwgc2VuZCB0aGUgYXR0YWNobWVu\ndCB0byBhIERvY2tlciBjb250YWluZXIsIHBlcmZvcm0gc29tZSBvcGVyYXRpb24gb24gdGhlIGlu\ncHV0IGFuZCByZXR1cm5zIGluZm9ybWF0aW9uIHRvIFJlc2lsaWVudC48L2RvY3VtZW50YXRpb24+\nPHN0YXJ0RXZlbnQgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1cIj48b3V0Z29pbmc+U2VxdWVuY2VG\nbG93XzBtbjBzMTU8L291dGdvaW5nPjwvc3RhcnRFdmVudD48c2VydmljZVRhc2sgaWQ9XCJTZXJ2\naWNlVGFza18xM3l6ZHd5XCIgbmFtZT1cIkRvY2tlcjogUnVuIERvY2tlciBDb250YWluZXJcIiBy\nZXNpbGllbnQ6dHlwZT1cImZ1bmN0aW9uXCI+PGV4dGVuc2lvbkVsZW1lbnRzPjxyZXNpbGllbnQ6\nZnVuY3Rpb24gdXVpZD1cIjdhMjIwYmUzLTA1ZjctNGIxNy1hMWE3LTk3YjQwNzZlMTRiZVwiPntc\nImlucHV0c1wiOntcImU2ZDY2YmFjLTg0MWQtNDAzZi04MmZhLTg2MmRjM2NkMjIyZlwiOntcImlu\ncHV0X3R5cGVcIjpcInN0YXRpY1wiLFwic3RhdGljX2lucHV0XCI6e1wibXVsdGlzZWxlY3RfdmFs\ndWVcIjpbXSxcInNlbGVjdF92YWx1ZVwiOlwiN2YzNmEyODUtYjJiMC00MDFiLWEwY2EtYTQ3OGFl\nOTBiZTZiXCJ9fSxcImU4OTNlMDhkLTk0MDgtNDQ0OS04OWFiLTkyOGMxY2UxZTRkNFwiOntcImlu\ncHV0X3R5cGVcIjpcInN0YXRpY1wiLFwic3RhdGljX2lucHV0XCI6e1wibXVsdGlzZWxlY3RfdmFs\ndWVcIjpbXSxcInRleHRfdmFsdWVcIjpcInBzbGlzdFwifX19LFwicG9zdF9wcm9jZXNzaW5nX3Nj\ncmlwdFwiOlwibm90ZV90ZXh0X3N0YXJ0ID0gdVxcXCJcXFwiXFxcIiZsdDtiJmd0O0RvY2tlciBJ\nbnRlZ3JhdGlvbiZsdDsvYiZndDtcXG4gICAgICAgICAgICAgICZsdDticiZndDsmbHQ7YnImZ3Q7\nQSBjb250YWluZXIgd2FzIHJhbiB1c2luZyB0aGUgaW1hZ2UgJmx0O2ImZ3Q7ezB9Jmx0Oy9iJmd0\nO1xcXCJcXFwiXFxcIi5mb3JtYXQodVxcXCI6XFxcIi5qb2luKFtyZXN1bHRzLmlucHV0c1tcXFwi\nZG9ja2VyX2ltYWdlXFxcIl1bXFxcIm5hbWVcXFwiXSwgcmVzdWx0cy5pbnB1dHNbXFxcImRvY2tl\ncl9vcGVyYXRpb25cXFwiXV0pKVxcbiAgICAgICAgICAgICAgXFxuIyBJZiB0aGUgQXR0YWNobWVu\ndCBhdHRyaWJ1dGUgb2YgdGhlIGNvbnRlbnQgcGF5bG9hZCBpcyBzZXQ7IHdlIGFyZSBkZWFsaW5n\nIHdpdGggYW4gYXR0YWNobWVudFxcbmlmIHJlc3VsdHMuY29udGVudFtcXFwiYXR0YWNobWVudF9u\nYW1lXFxcIl0gIT0gTm9uZTpcXG4gIG5vdGVfdGV4dF9hdHRhY2htZW50ID0gdVxcXCJcXFwiXFxc\nIiZsdDticiZndDsgT24gYW4gQXR0YWNobWVudCB3aXRoIG5hbWUgJmx0O2ImZ3Q7ezB9Jmx0Oy9i\nJmd0O1xcXCJcXFwiXFxcIi5mb3JtYXQocmVzdWx0cy5jb250ZW50W1xcXCJhdHRhY2htZW50X25h\nbWVcXFwiXSlcXG4gIG5vdGVfdGV4dF9zdGFydCArPSBub3RlX3RleHRfYXR0YWNobWVudFxcblxc\nbiMgT3RoZXJ3aXNlIHdlIGFyZSBkZWFsaW5nIHdpdGggYW4gYXJ0aWZhY3RcXG5lbHNlOlxcbiAg\nbm90ZV90ZXh0X2FydGlmYWN0ID0gdVxcXCJcXFwiXFxcIiZsdDticiZndDsgT24gYW4gQXJ0aWZh\nY3Qgb2YgVHlwZTogJmx0O2ImZ3Q7ezB9Jmx0Oy9iJmd0O1xcbiAgICAgICAgICAgICAgICAgICAg\nICAgICAgJmx0O2JyJmd0OyBBcnRpZmFjdCBWYWx1ZTogJmx0O2ImZ3Q7ezF9Jmx0Oy9iJmd0O1xc\nXCJcXFwiXFxcIi5mb3JtYXQocmVzdWx0cy5pbnB1dHNbXFxcImRvY2tlcl9hcnRpZmFjdF90eXBl\nXFxcIl0sIHJlc3VsdHMuaW5wdXRzW1xcXCJkb2NrZXJfaW5wdXRcXFwiXSlcXG4gIG5vdGVfdGV4\ndF9zdGFydCArPSBub3RlX3RleHRfYXJ0aWZhY3RcXG4gICAgICAgICAgICAgIFxcbm5vdGVfdGV4\ndF9lbmQgPSBcXFwiXFxcIlxcXCImbHQ7YnImZ3Q7Q29udGFpbmVyIElEIDogJmx0O2ImZ3Q7ezB9\nJmx0Oy9iJmd0O1xcbiAgICAgICAgICAgICAgJmx0O2JyJmd0O0NvbnRhaW5lciBleGl0IGNvZGUg\nOiAmbHQ7YiZndDt7MX0mbHQ7L2ImZ3Q7XFxuICAgICAgICAgICAgICAmbHQ7YnImZ3Q7Jmx0O2Jy\nJmd0OyBDb250YWluZXIgTG9ncyBoYXZlIGJlZW4gc2F2ZWQgYXMgYW4gYXR0YWNobWVudC5cXG4g\nICAgICAgICAgICAgIENvbnRhaW5lciBTdGF0cywgTG9ncywgRnVuY3Rpb24gSW5wdXRzIG9yIFJ1\nbiBUaW1lIE1ldHJpY3MgYXJlIGFsc28gYXZhaWxhYmxlIGFzIHBhcnQgb2YgdGhlIHJlc3VsdCBw\nYXlsb2FkXFxcIlxcXCJcXFwiLmZvcm1hdChcXG4gICAgICAgICAgICAgICAgcmVzdWx0cy5jb250\nZW50W1xcXCJjb250YWluZXJfaWRcXFwiXSwgcmVzdWx0cy5jb250ZW50W1xcXCJjb250YWluZXJf\nZXhpdF9zdGF0dXNcXFwiXSlcXG5cXG5ub3RlX3RleHQgPSBub3RlX3RleHRfc3RhcnQrbm90ZV90\nZXh0X2VuZFxcblxcbiMgSWYgd2UgYXJlIGRlYWxpbmcgd2l0aCBhIHRhc2sgbGV2ZWwgYXR0YWNo\nbWVudCwgdGhlbiBhZGQgYSBub3RlIHRvIHRoZSB0YXNrIG5vdCB0aGUgaW5jaWRlbnRcXG5pZiB0\nYXNrOlxcbiAgdGFzay5hZGROb3RlKGhlbHBlci5jcmVhdGVSaWNoVGV4dChub3RlX3RleHQpKVxc\nbmVsc2U6XFxuICBpbmNpZGVudC5hZGROb3RlKGhlbHBlci5jcmVhdGVSaWNoVGV4dChub3RlX3Rl\neHQpKVxcblxcbiMgQWRkIGFuIGVudHJ5IHRvIHRoZSBkb2NrZXJfaW50ZWdyYXRpb25faW52b2Nh\ndGlvbnMgRGF0YXRhYmxlXFxucm93ID0gaW5jaWRlbnQuYWRkUm93KFxcXCJkb2NrZXJfaW50ZWdy\nYXRpb25faW52b2NhdGlvbnNcXFwiKVxcblxcbmlmIFxcXCJ0YXNrXFxcIiBpbiByZXN1bHRzLmNv\nbnRlbnRbXFxcInJlc19saW5rc1xcXCJdW1xcXCJyZXNfb2JqZWN0XFxcIl06XFxuICByb3dbXFxc\nImRvY2tlcl9saW5rc1xcXCJdID0gdVxcXCJcXFwiXFxcIiZsdDthIGhyZWY9XFxcInt9XFxcIiZn\ndDt7fSZsdDsvYSZndDtcXFwiXFxcIlxcXCIuZm9ybWF0KHJlc3VsdHMuY29udGVudFtcXFwicmVz\nX2xpbmtzXFxcIl1bXFxcInJlc19vYmplY3RcXFwiXSwgXFxcIlRhc2sgTGlua1xcXCIpXFxuXFxu\ncm93W1xcXCJkb2NrZXJfdGltZXN0YW1wXFxcIl0gPSByZXN1bHRzW1xcXCJtZXRyaWNzXFxcIl1b\nXFxcInRpbWVzdGFtcF9lcG9jaFxcXCJdIG9yIDBcXG5yb3dbXFxcImRvY2tlcl9jb250YWluZXJf\naWRcXFwiXSA9IHJlc3VsdHMuY29udGVudFtcXFwiY29udGFpbmVyX2lkXFxcIl1cXG5yb3dbXFxc\nImRvY2tlcl9pbWFnZVxcXCJdID0gdVxcXCI6XFxcIi5qb2luKFtyZXN1bHRzLmlucHV0c1tcXFwi\nZG9ja2VyX2ltYWdlXFxcIl1bXFxcIm5hbWVcXFwiXSwgcmVzdWx0cy5pbnB1dHNbXFxcImRvY2tl\ncl9vcGVyYXRpb25cXFwiXV0pXFxucm93W1xcXCJkb2NrZXJfYXR0YWNobWVudF9uYW1lXFxcIl0g\nPSByZXN1bHRzLmNvbnRlbnRbXFxcImF0dGFjaG1lbnRfbmFtZVxcXCJdXFxuXCIsXCJwcmVfcHJv\nY2Vzc2luZ19zY3JpcHRcIjpcImlucHV0cy5pbmNpZGVudF9pZCA9IGluY2lkZW50LmlkIFxcblxc\nbiMgSWYgdGhpcyB3b3JrZmxvdyBoYXMgdGhlIHRhc2tfaWQgYXZhaWxhYmxlLCBnYXRoZXIgaXQg\naW5jYXNlIHdlIG5lZWQgaXQuXFxuaWYgdGFzazpcXG4gIGlucHV0cy50YXNrX2lkID0gdGFzay5p\nZFxcbiMgSWYgdGhpcyB3b3JrZmxvdyBoYXMgdGhlIGF0dGFjaG1lbnRfaWQgYXZhaWxhYmxlLCBn\nYXRoZXIgaXQgaW5jYXNlIHdlIG5lZWQgaXQuXFxuaWYgYXR0YWNobWVudDpcXG4gIGlucHV0cy5h\ndHRhY2htZW50X2lkID0gYXR0YWNobWVudC5pZFxcblxcbiMgSWYgdGhpcyB3b3JrZmxvdyBoYXMg\ndGhlIGFydGlmYWN0X2lkIGF2YWlsYWJsZSwgZ2F0aGVyIGl0IGluY2FzZSB3ZSBuZWVkIGl0Llxc\nbnRyeTogXFxuICBpZiBhcnRpZmFjdDpcXG4gICAgaW5wdXRzLmFydGlmYWN0X2lkID0gYXJ0aWZh\nY3QuaWRcXG5leGNlcHQ6XFxuICBwYXNzXCIsXCJyZXN1bHRfbmFtZVwiOlwiXCJ9PC9yZXNpbGll\nbnQ6ZnVuY3Rpb24+PC9leHRlbnNpb25FbGVtZW50cz48aW5jb21pbmc+U2VxdWVuY2VGbG93XzBt\nbjBzMTU8L2luY29taW5nPjxvdXRnb2luZz5TZXF1ZW5jZUZsb3dfMWZoa3ZiMDwvb3V0Z29pbmc+\nPC9zZXJ2aWNlVGFzaz48ZW5kRXZlbnQgaWQ9XCJFbmRFdmVudF8weWNoeGhwXCI+PGluY29taW5n\nPlNlcXVlbmNlRmxvd18xZmhrdmIwPC9pbmNvbWluZz48L2VuZEV2ZW50PjxzZXF1ZW5jZUZsb3cg\naWQ9XCJTZXF1ZW5jZUZsb3dfMWZoa3ZiMFwiIHNvdXJjZVJlZj1cIlNlcnZpY2VUYXNrXzEzeXpk\nd3lcIiB0YXJnZXRSZWY9XCJFbmRFdmVudF8weWNoeGhwXCIvPjxzZXF1ZW5jZUZsb3cgaWQ9XCJT\nZXF1ZW5jZUZsb3dfMG1uMHMxNVwiIHNvdXJjZVJlZj1cIlN0YXJ0RXZlbnRfMTU1YXN4bVwiIHRh\ncmdldFJlZj1cIlNlcnZpY2VUYXNrXzEzeXpkd3lcIi8+PHRleHRBbm5vdGF0aW9uIGlkPVwiVGV4\ndEFubm90YXRpb25fMWt4eGl5dFwiPjx0ZXh0PlN0YXJ0IHlvdXIgd29ya2Zsb3cgaGVyZTwvdGV4\ndD48L3RleHRBbm5vdGF0aW9uPjxhc3NvY2lhdGlvbiBpZD1cIkFzc29jaWF0aW9uXzFzZXVqNDhc\nIiBzb3VyY2VSZWY9XCJTdGFydEV2ZW50XzE1NWFzeG1cIiB0YXJnZXRSZWY9XCJUZXh0QW5ub3Rh\ndGlvbl8xa3h4aXl0XCIvPjwvcHJvY2Vzcz48YnBtbmRpOkJQTU5EaWFncmFtIGlkPVwiQlBNTkRp\nYWdyYW1fMVwiPjxicG1uZGk6QlBNTlBsYW5lIGJwbW5FbGVtZW50PVwidW5kZWZpbmVkXCIgaWQ9\nXCJCUE1OUGxhbmVfMVwiPjxicG1uZGk6QlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiU3RhcnRFdmVu\ndF8xNTVhc3htXCIgaWQ9XCJTdGFydEV2ZW50XzE1NWFzeG1fZGlcIj48b21nZGM6Qm91bmRzIGhl\naWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCIxNjJcIiB5PVwiMTg4XCIvPjxicG1uZGk6QlBN\nTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMFwiIHdpZHRoPVwiOTBcIiB4PVwiMTU3XCIg\neT1cIjIyM1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6\nQlBNTlNoYXBlIGJwbW5FbGVtZW50PVwiVGV4dEFubm90YXRpb25fMWt4eGl5dFwiIGlkPVwiVGV4\ndEFubm90YXRpb25fMWt4eGl5dF9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMzBcIiB3aWR0\naD1cIjEwMFwiIHg9XCI5OVwiIHk9XCIyNTRcIi8+PC9icG1uZGk6QlBNTlNoYXBlPjxicG1uZGk6\nQlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJBc3NvY2lhdGlvbl8xc2V1ajQ4XCIgaWQ9XCJBc3NvY2lh\ndGlvbl8xc2V1ajQ4X2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIxNjlcIiB4c2k6dHlwZT1cIm9t\nZ2RjOlBvaW50XCIgeT1cIjIyMFwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjE1M1wiIHhzaTp0eXBl\nPVwib21nZGM6UG9pbnRcIiB5PVwiMjU0XCIvPjwvYnBtbmRpOkJQTU5FZGdlPjxicG1uZGk6QlBN\nTlNoYXBlIGJwbW5FbGVtZW50PVwiU2VydmljZVRhc2tfMTN5emR3eVwiIGlkPVwiU2VydmljZVRh\nc2tfMTN5emR3eV9kaVwiPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiODBcIiB3aWR0aD1cIjEwMFwi\nIHg9XCIzNzdcIiB5PVwiMTY2XCIvPjwvYnBtbmRpOkJQTU5TaGFwZT48YnBtbmRpOkJQTU5TaGFw\nZSBicG1uRWxlbWVudD1cIkVuZEV2ZW50XzB5Y2h4aHBcIiBpZD1cIkVuZEV2ZW50XzB5Y2h4aHBf\nZGlcIj48b21nZGM6Qm91bmRzIGhlaWdodD1cIjM2XCIgd2lkdGg9XCIzNlwiIHg9XCI2MjdcIiB5\nPVwiMTg4XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxvbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3\naWR0aD1cIjBcIiB4PVwiNjQ1XCIgeT1cIjIyN1wiLz48L2JwbW5kaTpCUE1OTGFiZWw+PC9icG1u\nZGk6QlBNTlNoYXBlPjxicG1uZGk6QlBNTkVkZ2UgYnBtbkVsZW1lbnQ9XCJTZXF1ZW5jZUZsb3df\nMWZoa3ZiMFwiIGlkPVwiU2VxdWVuY2VGbG93XzFmaGt2YjBfZGlcIj48b21nZGk6d2F5cG9pbnQg\neD1cIjQ3N1wiIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxvbWdkaTp3YXlw\nb2ludCB4PVwiNjI3XCIgeHNpOnR5cGU9XCJvbWdkYzpQb2ludFwiIHk9XCIyMDZcIi8+PGJwbW5k\naTpCUE1OTGFiZWw+PG9tZ2RjOkJvdW5kcyBoZWlnaHQ9XCIxM1wiIHdpZHRoPVwiOTBcIiB4PVwi\nNTA3XCIgeT1cIjE4NC41XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48\nYnBtbmRpOkJQTU5FZGdlIGJwbW5FbGVtZW50PVwiU2VxdWVuY2VGbG93XzBtbjBzMTVcIiBpZD1c\nIlNlcXVlbmNlRmxvd18wbW4wczE1X2RpXCI+PG9tZ2RpOndheXBvaW50IHg9XCIxOThcIiB4c2k6\ndHlwZT1cIm9tZ2RjOlBvaW50XCIgeT1cIjIwNlwiLz48b21nZGk6d2F5cG9pbnQgeD1cIjM3N1wi\nIHhzaTp0eXBlPVwib21nZGM6UG9pbnRcIiB5PVwiMjA2XCIvPjxicG1uZGk6QlBNTkxhYmVsPjxv\nbWdkYzpCb3VuZHMgaGVpZ2h0PVwiMTNcIiB3aWR0aD1cIjkwXCIgeD1cIjI0Mi41XCIgeT1cIjE4\nNC41XCIvPjwvYnBtbmRpOkJQTU5MYWJlbD48L2JwbW5kaTpCUE1ORWRnZT48L2JwbW5kaTpCUE1O\nUGxhbmU+PC9icG1uZGk6QlBNTkRpYWdyYW0+PC9kZWZpbml0aW9ucz4iLCAidmVyc2lvbiI6IDUz\nfSwgImFjdGlvbnMiOiBbXX1dLCAicm9sZXMiOiBbXSwgIndvcmtzcGFjZXMiOiBbXSwgImZ1bmN0\naW9ucyI6IFt7ImlkIjogMzQsICJuYW1lIjogImRvY2tlcl9ydW5fZG9ja2VyX2NvbnRhaW5lciIs\nICJkaXNwbGF5X25hbWUiOiAiRG9ja2VyOiBSdW4gRG9ja2VyIENvbnRhaW5lciIsICJkZXNjcmlw\ndGlvbiI6IHsiZm9ybWF0IjogInRleHQiLCAiY29udGVudCI6ICJBIGZ1bmN0aW9uIGludGVuZGVk\nIHRvIGJlIHVzZWQgdG8gY3JlYXRlIGEgRG9ja2VyIENvbnRhaW5lciBmcm9tIGFuIGltYWdlLCBm\nZWVkIGFuIGlucHV0IHRvIHRoZSBjb250YWluZXIgYW5kIHRoZW4gcmV0dXJuIHRoZSByZXN1bHRz\nLiJ9LCAiZGVzdGluYXRpb25faGFuZGxlIjogImZuX2RvY2tlciIsICJleHBvcnRfa2V5IjogImRv\nY2tlcl9ydW5fZG9ja2VyX2NvbnRhaW5lciIsICJ1dWlkIjogIjdhMjIwYmUzLTA1ZjctNGIxNy1h\nMWE3LTk3YjQwNzZlMTRiZSIsICJ2ZXJzaW9uIjogMTEsICJjcmVhdG9yIjogeyJpZCI6IDM5LCAi\ndHlwZSI6ICJ1c2VyIiwgIm5hbWUiOiAiYWxmcmVkQHdheW5lY29ycC5jb20iLCAiZGlzcGxheV9u\nYW1lIjogIkFsZnJlZCBQZW5ueXdvcnRoIn0sICJsYXN0X21vZGlmaWVkX2J5IjogeyJpZCI6IDM5\nLCAidHlwZSI6ICJ1c2VyIiwgIm5hbWUiOiAiYWxmcmVkQHdheW5lY29ycC5jb20iLCAiZGlzcGxh\neV9uYW1lIjogIkFsZnJlZCBQZW5ueXdvcnRoIn0sICJsYXN0X21vZGlmaWVkX3RpbWUiOiAxNTUx\nOTUzNDYxMDc4LCAidmlld19pdGVtcyI6IFt7InN0ZXBfbGFiZWwiOiBudWxsLCAic2hvd19pZiI6\nIG51bGwsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9u\nIiwgImNvbnRlbnQiOiAiNjJkOTMxMDUtNzA1ZC00ODc2LTk4MTMtZTYwZWU0M2UxOWVkIiwgInNo\nb3dfbGlua19oZWFkZXIiOiBmYWxzZX0sIHsic3RlcF9sYWJlbCI6IG51bGwsICJzaG93X2lmIjog\nbnVsbCwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24i\nLCAiY29udGVudCI6ICIxNjc3NzE2YS1hOTVlLTRmNTUtOGUzZS01Mzk5ZTZkM2JkOTYiLCAic2hv\nd19saW5rX2hlYWRlciI6IGZhbHNlfSwgeyJzdGVwX2xhYmVsIjogbnVsbCwgInNob3dfaWYiOiBu\ndWxsLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIs\nICJjb250ZW50IjogIjgxMWU5OWQ3LWQxOTQtNGNlOC04NmNjLWFmZjVlMDFhYjg1YyIsICJzaG93\nX2xpbmtfaGVhZGVyIjogZmFsc2V9LCB7InN0ZXBfbGFiZWwiOiBudWxsLCAic2hvd19pZiI6IG51\nbGwsICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwg\nImNvbnRlbnQiOiAiYmEzMTgyNjEtZWQ2YS00YTM4LWExODctOWUwYjY4ZDE2MDRmIiwgInNob3df\nbGlua19oZWFkZXIiOiBmYWxzZX0sIHsic3RlcF9sYWJlbCI6IG51bGwsICJzaG93X2lmIjogbnVs\nbCwgImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAi\nY29udGVudCI6ICJlNmQ2NmJhYy04NDFkLTQwM2YtODJmYS04NjJkYzNjZDIyMmYiLCAic2hvd19s\naW5rX2hlYWRlciI6IGZhbHNlfSwgeyJzdGVwX2xhYmVsIjogbnVsbCwgInNob3dfaWYiOiBudWxs\nLCAiZWxlbWVudCI6ICJmaWVsZF91dWlkIiwgImZpZWxkX3R5cGUiOiAiX19mdW5jdGlvbiIsICJj\nb250ZW50IjogIjRmYzMwOWYxLTM0MDYtNDY0Zi1iZTZkLWQzNzlkYzIzZDQxMSIsICJzaG93X2xp\nbmtfaGVhZGVyIjogZmFsc2V9LCB7InN0ZXBfbGFiZWwiOiBudWxsLCAic2hvd19pZiI6IG51bGws\nICJlbGVtZW50IjogImZpZWxkX3V1aWQiLCAiZmllbGRfdHlwZSI6ICJfX2Z1bmN0aW9uIiwgImNv\nbnRlbnQiOiAiMjBiNWY2NjEtYjU2Mi00YTc4LThhNDAtM2Q5YzlmMjRjYjY4IiwgInNob3dfbGlu\na19oZWFkZXIiOiBmYWxzZX0sIHsic3RlcF9sYWJlbCI6IG51bGwsICJzaG93X2lmIjogbnVsbCwg\nImVsZW1lbnQiOiAiZmllbGRfdXVpZCIsICJmaWVsZF90eXBlIjogIl9fZnVuY3Rpb24iLCAiY29u\ndGVudCI6ICJlODkzZTA4ZC05NDA4LTQ0NDktODlhYi05MjhjMWNlMWU0ZDQiLCAic2hvd19saW5r\nX2hlYWRlciI6IGZhbHNlfV0sICJ3b3JrZmxvd3MiOiBbeyJ3b3JrZmxvd19pZCI6IDU2LCAibmFt\nZSI6ICJFeGFtcGxlOiBEb2NrZXI6U2VuZCBBcnRpZmFjdCBUbyBEb2NrZXIgQ29udGFpbmVyIChB\nbWFzcykiLCAicHJvZ3JhbW1hdGljX25hbWUiOiAiZG9ja2VyX2FuYWx5emVfYXJ0aWZhY3Rfd2l0\naF9kb2NrZXJfY29udGFpbmVyX2FtYXNzIiwgIm9iamVjdF90eXBlIjogImFydGlmYWN0IiwgImRl\nc2NyaXB0aW9uIjogbnVsbCwgInV1aWQiOiBudWxsLCAiYWN0aW9ucyI6IFtdfSwgeyJ3b3JrZmxv\nd19pZCI6IDU1LCAibmFtZSI6ICJFeGFtcGxlOiBEb2NrZXI6U2VuZCBBcnRpZmFjdCBUbyBEb2Nr\nZXIgQ29udGFpbmVyIChOU1JMKSIsICJwcm9ncmFtbWF0aWNfbmFtZSI6ICJkb2NrZXJfYW5hbHl6\nZV9hcnRpZmFjdF93aXRoX2RvY2tlcl9jb250YWluZXJfbnNybCIsICJvYmplY3RfdHlwZSI6ICJh\ncnRpZmFjdCIsICJkZXNjcmlwdGlvbiI6IG51bGwsICJ1dWlkIjogbnVsbCwgImFjdGlvbnMiOiBb\nXX0sIHsid29ya2Zsb3dfaWQiOiA1MywgIm5hbWUiOiAiRXhhbXBsZTogRG9ja2VyOlNlbmQgQXR0\nYWNobWVudCBUbyBEb2NrZXIgQ29udGFpbmVyIChWb2xhdGlsaXR5KSIsICJwcm9ncmFtbWF0aWNf\nbmFtZSI6ICJkb2NrZXJfc2VuZF9hdHRhY2htZW50X3RvX2RvY2tlcl9jb250YWluZXIiLCAib2Jq\nZWN0X3R5cGUiOiAiYXR0YWNobWVudCIsICJkZXNjcmlwdGlvbiI6IG51bGwsICJ1dWlkIjogbnVs\nbCwgImFjdGlvbnMiOiBbXX1dfV19\n\"\"\"\n )",
"def _process_createContainer(self, data):\r\n try:\r\n self._avatar.createContainer(data['containerTag'],\r\n data.get('containerData', {}))\r\n except KeyError as e:\r\n raise InvalidRequest(\"Can not process 'CreateContainer' request. \"\r\n 'Missing key: {0}'.format(e))",
"def container_factory(self, name):",
"def container_factory(self, name):",
"def container_factory(self, name):",
"def container_factory(self, name):",
"def container_factory(self, name):",
"def main(\n *,\n component: list[str],\n no_cache: bool,\n pull: bool,\n quiet: bool,\n release: str,\n sp_osi: str | None,\n tag_suffix: str | None,\n) -> None:\n\n def build_component(component: str) -> None:\n \"\"\"Rebuild the container for a single component.\"\"\"\n parts: Final = component.split(\"-\", maxsplit=1)\n if len(parts) != 2: # noqa: PLR2004 # this will go away with match/case\n sys.exit(f\"Internal error: build_component() invoked with {component=!r}\")\n kolla_component, kolla_service = parts\n build: Final = prepare.build_dockerfile(cfg, files, kolla_component, kolla_service)\n\n with tempfile.NamedTemporaryFile(\n mode=\"wt\", encoding=\"UTF-8\", prefix=\"Dockerfile.\"\n ) as dockerfile:\n dockerfile.write(build.dockerfile)\n dockerfile.flush()\n subprocess.check_call([\"ls\", \"-l\", \"--\", dockerfile.name])\n subprocess.check_call([\"cat\", \"--\", dockerfile.name])\n\n cmd: Final[list[str | pathlib.Path]] = [\n \"docker\",\n \"build\",\n \"-t\",\n f\"storpool/{build.container_name}{cfg.tag_suffix}\",\n \"--rm\",\n *([\"--no-cache\"] if no_cache else []),\n *([\"--pull\"] if pull else []),\n \"-f\",\n dockerfile.name,\n \"--\",\n datadir,\n ]\n cmd_str: Final = shlex.join(str(word) for word in cmd)\n cfg.diag(lambda: f\"Running `{cmd_str}`\")\n try:\n subprocess.run(cmd, check=True)\n except (OSError, subprocess.CalledProcessError) as err:\n sys.exit(f\"Could not run `{cmd_str}`: {err}\")\n\n if release not in prepare.ALL_RELEASES:\n sys.exit(\n f\"Unsupported release {release!r}, must be one of {' '.join(prepare.ALL_RELEASES)}\"\n )\n if any(comp for comp in component if comp not in ALL_COMPONENTS):\n sys.exit(f\"Unrecognized components, must be one or more of {' '.join(ALL_COMPONENTS)}\")\n cfg: Final = build_config(quiet=quiet, release=release, sp_osi=sp_osi, tag_suffix=tag_suffix)\n\n datadir: Final = cfg.topdir / defs.DATA_DIR\n files: Final = prepare.prepare_data_files(cfg, datadir)\n\n for comp in component:\n build_component(comp)",
"def _build_container(\n self, target_image, odcs, repo_type, repo_list, terminate_event,\n scratch, record):\n self.logger.info(\"Building image: %s\" % target_image)\n cmd_list = [\"rhpkg\", \"--path=%s\" % self.distgit_dir]\n\n if self.runtime.user is not None:\n cmd_list.append(\"--user=%s\" % self.runtime.user)\n\n cmd_list += (\n \"container-build\",\n \"--nowait\",\n )\n\n if odcs:\n if odcs == 'signed':\n odcs = 'release' # convenience option for those used to the old types\n cmd_list.append('--signing-intent')\n cmd_list.append(odcs)\n else:\n if repo_type:\n repo_list = list(repo_list) # In case we get a tuple\n repo_list.append(self.metadata.cgit_url(\".oit/\" + repo_type + \".repo\"))\n\n if repo_list:\n # rhpkg supports --repo-url [URL [URL ...]]\n cmd_list.append(\"--repo-url\")\n cmd_list.extend(repo_list)\n\n if scratch:\n cmd_list.append(\"--scratch\")\n\n # Run the build with --nowait so that we can immediately get information about the brew task\n rc, out, err = exectools.cmd_gather(cmd_list)\n\n if rc != 0:\n # Probably no point in continuing.. can't contact brew?\n self.logger.info(\"Unable to create brew task: out={} ; err={}\".format(out, err))\n return False\n\n # Otherwise, we should have a brew task we can monitor listed in the stdout.\n out_lines = out.splitlines()\n\n # Look for a line like: \"Created task: 13949050\" . Extract the identifier.\n task_id = next((created_line.split(\":\")[1]).strip() for created_line in out_lines if\n created_line.startswith(\"Created task:\"))\n\n record[\"task_id\"] = task_id\n\n # Look for a line like: \"Task info: https://brewweb.engineering.redhat.com/brew/taskinfo?taskID=13948942\"\n task_url = next((info_line.split(\":\", 1)[1]).strip() for info_line in out_lines if\n info_line.startswith(\"Task info:\"))\n\n self.logger.info(\"Build running: {}\".format(task_url))\n\n record[\"task_url\"] = task_url\n\n # Now that we have the basics about the task, wait for it to complete\n error = watch_task(self.logger.info, task_id, terminate_event)\n\n # Looking for something like the following to conclude the image has already been built:\n # BuildError: Build for openshift-enterprise-base-v3.7.0-0.117.0.0 already exists, id 588961\n if error is not None and \"already exists\" in error:\n self.logger.info(\"Image already built against this dist-git commit (or version-release tag): {}\".format(target_image))\n error = None\n\n # Gather brew-logs\n logs_dir = \"%s/%s\" % (self.runtime.brew_logs_dir, self.metadata.name)\n logs_rc, _, logs_err = exectools.cmd_gather([\"brew\", \"download-logs\", \"-d\", logs_dir, task_id])\n\n if logs_rc != 0:\n self.logger.info(\"Error downloading build logs from brew for task %s: %s\" % (task_id, logs_err))\n\n if error is not None:\n # An error occurred. We don't have a viable build.\n self.logger.info(\"Error building image: {}, {}\".format(task_url, error))\n return False\n\n self.logger.info(\"Successfully built image: {} ; {}\".format(target_image, task_url))\n return True",
"def test_get_container_assets_expanded(self):\n pass",
"def Run(self, args):\n\n with RecoverFromDiagnosticException(args.image_name):\n img_name = util.GetDigestFromName(args.image_name)\n return util.TransformContainerAnalysisData(img_name,\n args.occurrence_filter)",
"def setup(self):\n\n folder_name, file_name, url, md5 = self.resource\n dataset_folder = os.path.join(self.data_root, folder_name)\n if not os.path.exists(dataset_folder):\n sh_utils.download_and_extract_archive(url, dataset_folder, md5, file_name)\n\n test_transform = tv_transforms.Compose(\n [\n tv_transforms.ToTensor(),\n tv_transforms.Lambda(lambda x: x.permute(1, 2, 0)),\n ]\n )\n\n dataset_out = tv_datasets.ImageFolder(\n root=dataset_folder, transform=test_transform\n )\n self.images_only_dataset_out = sh_data_torch.IndexedTorchDataset(\n sh_data_torch.ImagesOnlyTorchDataset(dataset_out)\n )",
"def find_artifacts(self, artifact_type: str, match_condition: Dict[str, Any],\n return_documents_only=False) -> List[Dict[str, Any]]:\n raw_documents = Storage.locked_call(\n lambda: self._find_meta(artifact_type, match_condition),\n self._get_lock_path(artifact_type),\n self.lock_timeout,\n )\n\n documents = []\n for document in raw_documents:\n document_id = document.doc_id\n document = dict(document)\n document['id'] = document_id\n\n if not return_documents_only:\n document['artifact'] = torch.load(self._build_artifact_path(artifact_type, document_id))\n documents.append(document)\n return documents"
] | [
"0.62061214",
"0.57416934",
"0.573482",
"0.573116",
"0.56448776",
"0.5568765",
"0.55421925",
"0.55335236",
"0.54728556",
"0.5449445",
"0.54188776",
"0.53918684",
"0.53792435",
"0.5363252",
"0.5348222",
"0.5341792",
"0.52852637",
"0.52717286",
"0.5253826",
"0.52310616",
"0.52310616",
"0.52310616",
"0.52310616",
"0.52310616",
"0.5218492",
"0.51785004",
"0.51544166",
"0.514985",
"0.51282907",
"0.51230794"
] | 0.73780525 | 0 |
Return collected metadata of a dataproduct. | def dataproduct(self, identity, dataproduct_id):
metadata = {}
permissions = self.permission.dataproduct_permissions(
dataproduct_id, identity
) or {}
session = self.config_models.session()
# find Group or Data layer object
OWSLayer = self.config_models.model('ows_layer')
query = session.query(OWSLayer).filter_by(name=dataproduct_id)
ows_layer = query.first()
if ows_layer is not None:
metadata, searchterms = self.dataproduct_metadata(
ows_layer, permissions, session
)
else:
# find DataSetView for basic DataSet
DataSetView = self.config_models.model('data_set_view')
query = session.query(DataSetView).filter_by(name=dataproduct_id)
data_set_view = query.first()
if data_set_view is not None:
if data_set_view.name in permissions.get('basic_datasets', []):
# basic DataSet permitted
metadata = self.basic_dataset_metadata(
data_set_view, session
)
session.close()
return metadata | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_metadata(self):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/recipes/%s/metadata\" % (self.project_key, self.recipe_name))",
"def summarize_metadata(self):\n meta_dict = {}\n for comp in self.dataset.data_vars:\n for mkey, mvalue in self.dataset[comp].attrs.items():\n meta_dict[f\"{comp}.{mkey}\"] = mvalue\n\n return meta_dict",
"def metadata(self) -> 'outputs.DataCollectionEndpointResponseMetadata':\n return pulumi.get(self, \"metadata\")",
"def metadata(self):\n return self.meta.metadata",
"def GetMetadata(self):\n return self.dict['meta']",
"def _get_dsmeta(self, bids):\n # STEP 1: Extract metadata from `dataset_description.json`\n metadata = self._get_bids_dsdescription(bids)\n # STEP 2: Extract README text\n metadata[\"description\"] = self._get_bids_readme()\n # STEP 3: Extract information about entities and add to metadata\n metadata[\"entities\"] = self._get_bids_entities(bids)\n # STEP 4: Extract variable collection information on multiple levels\n metadata[\"variables\"] = self._get_bids_variables(bids)\n # STEP 5: Add context to metadata output\n metadata[\"@context\"] = BIDSCONTEXT\n return metadata",
"def metadata(self, ds_name):\n return self.runinfos.get(self._infer_fqn(ds_name)).metadata._metadata",
"def get_metadata(self):\n return self._metadata",
"def get_metadata(self, attribute):\n return self.metadata.get(attribute, None)",
"def get_dataset_metadata(fields=[]):\n return get_dict_from_db(key='metadata', fields=fields)",
"def get_metadata(self):\n return self.manager.get_metadata(self)",
"def basic_dataset_metadata(self, data_set_view, session):\n metadata = {}\n\n contacts = self.basic_dataset_contacts(data_set_view, session)\n\n metadata = {\n 'identifier': data_set_view.name,\n 'display': data_set_view.data_set.data_set_name,\n 'type': 'datasetview',\n 'description': data_set_view.description,\n 'contacts': contacts,\n 'datatype': 'table'\n }\n\n if data_set_view.facet:\n metadata.update({\n 'searchterms': [data_set_view.facet]\n })\n\n return metadata",
"def metadata(self):\r\n return self._metadata",
"def dataproduct() -> None:\n pass",
"def metadata(self) -> Mapping[str, str]:\n return pulumi.get(self, \"metadata\")",
"def _metadata(self) -> Dict[str, Any]:\n return self.__metadata",
"def metadata(self):\n return self._metadata",
"def metadata(self):\n return self._metadata",
"def metadata(self):\n return self._metadata",
"def metadata(self):\n return self._metadata",
"def metadata(self):\n return self._metadata",
"def metadata(self) -> dict:\n meta = {}\n meta['name'] = self.name\n meta['id'] = self.id\n meta['family'] = self.family\n \n meta['ptd_type'] = []\n meta['pos'] = []\n meta['atype'] = []\n meta['db_vect'] = []\n meta['scale'] = []\n for cp in self.parameters:\n meta['ptd_type'].append(cp.get('ptd_type', None))\n meta['pos'].append(cp.get('pos', None))\n meta['atype'].append(cp.get('atype', None))\n meta['db_vect'].append(cp.get('db_vect', None))\n meta['scale'].append(cp.get('scale', None))\n \n return meta",
"def metadata(self) -> global___SummaryMetadata:",
"def metadata(self):\n self.data_as_dict = {}\n for ele in self.data:\n self.data_as_dict[ele.name] = ele.value\n return self.data_as_dict",
"def extract_metadata(self):\n if self.is_generatable_file:\n logger.debug(\"Converting collected details to dict..\")\n if self.metadata_collector:\n self.metadata = MetadataToDict(\n metadata_collector=self.metadata_collector,\n file_import=self.file_import,\n )\n self.metadata.build_integration_dict()",
"def metadata(self) -> Mapping[str, str]:\r\n return self._metadata",
"def metadata(self) -> Mapping[str, str]:\r\n return self._metadata",
"def metadata(self):\n return {\n \"namespace\": self.namespace,\n \"short_name\": f\"{self.namespace}_{self._dataset_metadata['DatasetCode']}\",\n \"name\": f\"{self._dataset_metadata['DatasetName']} - FAO ({self.publication_year})\",\n \"description\": self._dataset_metadata[\"DatasetDescription\"],\n \"source_name\": \"Food and Agriculture Organization of the United Nations\",\n \"publication_year\": int(self.publication_year),\n \"publication_date\": self._dataset_metadata[\"DateUpdate\"],\n \"date_accessed\": str(dt.date.today()),\n \"url\": self.url,\n \"source_data_url\": self.source_data_url,\n \"file_extension\": \"zip\",\n }",
"def db_metadata(self):\n return self.metadata",
"def get_metadata(self):\n metadata = {}\n for k in self.metadata_keys:\n metadata[k] = copy.copy(getattr(self, k))\n return metadata"
] | [
"0.6686516",
"0.6594631",
"0.65593004",
"0.6541971",
"0.6431339",
"0.6426287",
"0.6409416",
"0.63947934",
"0.638867",
"0.6377498",
"0.63504374",
"0.6335236",
"0.63153744",
"0.6300704",
"0.627984",
"0.6277133",
"0.62743515",
"0.62743515",
"0.62743515",
"0.62743515",
"0.62743515",
"0.6262202",
"0.62358683",
"0.6227598",
"0.62164927",
"0.6213546",
"0.6213546",
"0.6209826",
"0.6203749",
"0.62004197"
] | 0.75031686 | 0 |
Collect metadata of a basic DataSet dataproduct. | def basic_dataset_metadata(self, data_set_view, session):
metadata = {}
contacts = self.basic_dataset_contacts(data_set_view, session)
metadata = {
'identifier': data_set_view.name,
'display': data_set_view.data_set.data_set_name,
'type': 'datasetview',
'description': data_set_view.description,
'contacts': contacts,
'datatype': 'table'
}
if data_set_view.facet:
metadata.update({
'searchterms': [data_set_view.facet]
})
return metadata | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dataproduct(self, identity, dataproduct_id):\n metadata = {}\n\n permissions = self.permission.dataproduct_permissions(\n dataproduct_id, identity\n ) or {}\n\n session = self.config_models.session()\n\n # find Group or Data layer object\n OWSLayer = self.config_models.model('ows_layer')\n query = session.query(OWSLayer).filter_by(name=dataproduct_id)\n ows_layer = query.first()\n if ows_layer is not None:\n metadata, searchterms = self.dataproduct_metadata(\n ows_layer, permissions, session\n )\n else:\n # find DataSetView for basic DataSet\n DataSetView = self.config_models.model('data_set_view')\n query = session.query(DataSetView).filter_by(name=dataproduct_id)\n data_set_view = query.first()\n if data_set_view is not None:\n if data_set_view.name in permissions.get('basic_datasets', []):\n # basic DataSet permitted\n metadata = self.basic_dataset_metadata(\n data_set_view, session\n )\n\n session.close()\n\n return metadata",
"def summarize_metadata(self):\n meta_dict = {}\n for comp in self.dataset.data_vars:\n for mkey, mvalue in self.dataset[comp].attrs.items():\n meta_dict[f\"{comp}.{mkey}\"] = mvalue\n\n return meta_dict",
"def get_dataset_meta(self, output_name, dataset_id):\n return {}",
"def dataproduct() -> None:\n pass",
"def _info(self) -> tfds.core.DatasetInfo:\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n # These are the features of your dataset like images, labels ...\n 'image': tfds.features.Image(shape=(None, None, 1)),\n 'bboxes': tfds.features.Sequence({'bbox': tfds.features.BBoxFeature()}),\n 'image_id': tfds.features.Text(),\n 'series_id': tfds.features.Text(),\n 'study_id': tfds.features.Text(),\n 'category': tfds.features.ClassLabel(names=['negative', 'typical', 'atypical', 'indeterminate'])\n }),\n supervised_keys=('image', 'category'),\n homepage='https://dataset-homepage/',\n citation=_CITATION,\n )",
"def dataset_statistics(dataset):\n print (dataset.describe())",
"def get_dataset_metadata(fields=[]):\n return get_dict_from_db(key='metadata', fields=fields)",
"def _get_dsmeta(self, bids):\n # STEP 1: Extract metadata from `dataset_description.json`\n metadata = self._get_bids_dsdescription(bids)\n # STEP 2: Extract README text\n metadata[\"description\"] = self._get_bids_readme()\n # STEP 3: Extract information about entities and add to metadata\n metadata[\"entities\"] = self._get_bids_entities(bids)\n # STEP 4: Extract variable collection information on multiple levels\n metadata[\"variables\"] = self._get_bids_variables(bids)\n # STEP 5: Add context to metadata output\n metadata[\"@context\"] = BIDSCONTEXT\n return metadata",
"def dataset_statistics(dataset):\n print(dataset.describe())",
"def dataset_statistics(dataset):\n print(dataset.describe())",
"def _info(self) -> tfds.core.DatasetInfo:\n # TODO(kappatng): Specifies the tfds.core.DatasetInfo object\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n # These are the features of your dataset like images, labels ...\n \"image\": tfds.features.Tensor(shape=[41, 41], dtype=tf.float32),\n \"psf\": tfds.features.Tensor(shape=[41, 41], dtype=tf.float32),\n \"variance\": tfds.features.Tensor(shape=[41, 41], dtype=tf.float32),\n \"mask\": tfds.features.Tensor(shape=[41, 41], dtype=tf.int32),\n\t}),\n # If there's a common (input, target) tuple from the\n # features, specify them here. They'll be used if\n # `as_supervised=True` in `builder.as_dataset`.\n supervised_keys=(\"image\", \"image\"),\n homepage='https://dataset-homepage/',\n citation=_CITATION,\n )",
"def _info(self) -> tfds.core.DatasetInfo:\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n 'id': tfds.features.Text(),\n 'program': tfds.features.Text(),\n 'date': tfds.features.Text(),\n 'url': tfds.features.Text(),\n 'summary': tfds.features.Text(),\n 'utt': tfds.features.Sequence(tfds.features.Text()),\n 'speaker': tfds.features.Sequence(tfds.features.Text()),\n }),\n supervised_keys=('utt', 'summary'),\n homepage='https://github.com/zcgzcgzcg1/MediaSum',\n citation=_CITATION,\n )",
"def _add_metadata_as_attrs(data, units, description, dtype_out_vert):\n if isinstance(data, xr.DataArray):\n return _add_metadata_as_attrs_da(data, units, description,\n dtype_out_vert)\n else:\n for name, arr in data.data_vars.items():\n _add_metadata_as_attrs_da(arr, units, description,\n dtype_out_vert)\n return data",
"def metadata(self, run_id, data_type):\n if not data_type in self.provides:\n raise RuntimeError(f\"{data_type} not in {self.provides}?\")\n return dict(\n run_id=run_id,\n data_type=data_type,\n data_kind=self.data_kind_for(data_type),\n dtype=self.dtype_for(data_type),\n lineage_hash=strax.DataKey(\n run_id, data_type, self.lineage).lineage_hash,\n compressor=self.compressor,\n lineage=self.lineage)",
"def metadata(self) -> global___SummaryMetadata:",
"def test_dataset_details():\n with new_test_dataset(2) as test_ds:\n args = build_register_args(test_ds.copy_to_s3())\n ds_name = args['name']\n URLs.run(url_info=URLs.register_url(), json_body=args)\n\n ds_parts = URLs.run(url_info=URLs.dataset_parts_url(ds_name)).json\n assert ds_parts['filenames'] == test_ds.expected_parts.filenames\n expected_columns = json.loads(datafile_schema().to_json())['columns']\n\n ds_short_schema = URLs.run(url_info=URLs.dataset_schema_url(ds_name, full=False)).json\n assert ds_short_schema['columns'] == expected_columns\n\n ds_full_schema = URLs.run(url_info=URLs.dataset_schema_url(ds_name, full=True)).json\n assert ds_full_schema['columns'][DEFAULT_TIMESTAMP_COLUMN]['colattrs']['numericMin'] == BASE_TIME\n\n URLs.run(url_info=URLs.unregister_url(ds_name))",
"def metadata(self, ds_name):\n return self.runinfos.get(self._infer_fqn(ds_name)).metadata._metadata",
"def get_dataset_meta(dataset=None):\n data_meta = {}\n\n data_meta['unlabeled_count'] = len(dataset.unlabel)\n data_meta['labeled_count'] = \\\n len(dataset.train.X) + len(dataset.test.X)\n\n data_meta['train_data'] = {}\n data_meta['test_data'] = {}\n\n data_meta['train_data']['spam_count'] = int(sum(dataset.train.y))\n data_meta['train_data']['ham_count'] = \\\n int(len(dataset.train.y) - sum(dataset.train.y))\n data_meta['train_data']['total_count'] = \\\n data_meta['train_data']['spam_count'] + \\\n data_meta['train_data']['ham_count']\n\n data_meta['test_data']['spam_count'] = int(sum(dataset.test.y))\n data_meta['test_data']['ham_count'] = \\\n int(len(dataset.test.y) - sum(dataset.test.y))\n data_meta['test_data']['total_count'] = \\\n data_meta['test_data']['spam_count'] + \\\n data_meta['test_data']['ham_count']\n\n return data_meta",
"def _info(self) -> tfds.core.DatasetInfo:\n # TODO(mtnt): Specifies the tfds.core.DatasetInfo object\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n # These are the features of your dataset like images, labels ...\n 'src': tfds.features.Text(),\n 'dst': tfds.features.Text(),\n }),\n # If there's a common (input, target) tuple from the\n # features, specify them here. They'll be used if\n # `as_supervised=True` in `builder.as_dataset`.\n supervised_keys=('src', 'dst'), # Set to `None` to disable\n homepage='https://pmichel31415.github.io/mtnt/index.html',\n citation=_CITATION,\n )",
"def _info(self) -> tfds.core.DatasetInfo:\n # TODO(a2o): Specifies the tfds.core.DatasetInfo object\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n features=tfds.features.FeaturesDict({\n # These are the features of your dataset like images, labels ...\n 'image': tfds.features.Image(shape=(None, None, 3)),\n 'label': tfds.features.ClassLabel(names=label_name),\n }),\n # If there's a common (input, target) tuple from the\n # features, specify them here. They'll be used if\n # `as_supervised=True` in `builder.as_dataset`.\n supervised_keys=('image', 'label'), # Set to `None` to disable\n homepage='https://dataset-homepage/',\n citation=_CITATION,\n )",
"def _info(self) -> tfds.core.DatasetInfo:\n return tfds.core.DatasetInfo(\n builder=self,\n description=_DESCRIPTION,\n citation=_CITATION,\n features=tfds.features.FeaturesDict({\n 'image': tfds.features.Image(shape=(None, None, 3)),\n 'label': tfds.features.ClassLabel(names=_CLASS_NAMES),\n }),\n homepage=_HOMEPAGE,\n supervised_keys=('image', 'label'),\n )",
"def metadata(self, df):\n raise NotImplementedError(\"missing metadata() method\")",
"def metadata(self): # -> None:\n ...",
"def dataset_info(self, data_source_id, table_name):\n # NOTE: form field returns 'None' as string if not set\n if not table_name or table_name == 'None':\n # empty table name\n return None\n\n # parse schema and table name\n parts = table_name.split('.')\n if len(parts) > 1:\n schema = parts[0]\n table_name = parts[1]\n else:\n schema = 'public'\n\n return self.postgis_metadata(data_source_id, schema, table_name)",
"def __metadata__(self):\n raise NotImplementedError",
"def getInternalMetadata(self, **kwargs):\n result = JSONDict({})\n with self._getDatasetLock:\n result['driverShortName'] = self.dataset.GetDriver().ShortName\n result['driverLongName'] = self.dataset.GetDriver().LongName\n result['fileList'] = self.dataset.GetFileList()\n result['RasterXSize'] = self.dataset.RasterXSize\n result['RasterYSize'] = self.dataset.RasterYSize\n result['GeoTransform'] = self._getGeoTransform()\n result['Projection'] = self.dataset.GetProjection()\n result['proj4Projection'] = self.getProj4String()\n result['GCPProjection'] = self.dataset.GetGCPProjection()\n if self.dataset.GetGCPs():\n result['GCPs'] = [{\n 'id': gcp.Id, 'line': gcp.GCPLine, 'pixel': gcp.GCPPixel,\n 'x': gcp.GCPX, 'y': gcp.GCPY, 'z': gcp.GCPZ}\n for gcp in self.dataset.GetGCPs()]\n result['Metadata'] = self.dataset.GetMetadata_List()\n for key in ['IMAGE_STRUCTURE', 'SUBDATASETS', 'GEOLOCATION', 'RPC']:\n metadatalist = self.dataset.GetMetadata_List(key)\n if metadatalist:\n result['Metadata_' + key] = metadatalist\n return result",
"def dataset_meta(self, dataset_meta: dict) -> None:\n self._dataset_meta = dataset_meta",
"def metadata(self):\n return {\n \"namespace\": self.namespace,\n \"short_name\": f\"{self.namespace}_{self._dataset_metadata['DatasetCode']}\",\n \"name\": f\"{self._dataset_metadata['DatasetName']} - FAO ({self.publication_year})\",\n \"description\": self._dataset_metadata[\"DatasetDescription\"],\n \"source_name\": \"Food and Agriculture Organization of the United Nations\",\n \"publication_year\": int(self.publication_year),\n \"publication_date\": self._dataset_metadata[\"DateUpdate\"],\n \"date_accessed\": str(dt.date.today()),\n \"url\": self.url,\n \"source_data_url\": self.source_data_url,\n \"file_extension\": \"zip\",\n }",
"def metadata(self, run_id):\n return dict(\n run_id=run_id,\n data_type=self.provides,\n data_kind=self.data_kind,\n dtype=self.dtype,\n compressor=self.compressor,\n lineage=self.lineage)",
"def understand_the_data(self,dataset):\n shape = dataset.shape\n description = dataset.describe\n print(shape)\n print(description)"
] | [
"0.67701876",
"0.6577738",
"0.6516718",
"0.6471045",
"0.62507623",
"0.62314963",
"0.62190706",
"0.6218088",
"0.62123156",
"0.62123156",
"0.61886233",
"0.6174235",
"0.6168924",
"0.6141858",
"0.612278",
"0.6120039",
"0.60999846",
"0.60504276",
"0.6031817",
"0.59935963",
"0.5977308",
"0.5971374",
"0.59614295",
"0.5960434",
"0.5946467",
"0.5943077",
"0.59330446",
"0.5930753",
"0.59134865",
"0.58633316"
] | 0.7206746 | 0 |
Return contacts metadata for a basic DataSet dataproduct. | def basic_dataset_contacts(self, data_set_view, session):
# collect contacts for basic DataSet and related GDI resources
gdi_oids = [
data_set_view.gdi_oid, data_set_view.data_set.gdi_oid_data_source
]
return self.contacts(gdi_oids, session) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def basic_dataset_metadata(self, data_set_view, session):\n metadata = {}\n\n contacts = self.basic_dataset_contacts(data_set_view, session)\n\n metadata = {\n 'identifier': data_set_view.name,\n 'display': data_set_view.data_set.data_set_name,\n 'type': 'datasetview',\n 'description': data_set_view.description,\n 'contacts': contacts,\n 'datatype': 'table'\n }\n\n if data_set_view.facet:\n metadata.update({\n 'searchterms': [data_set_view.facet]\n })\n\n return metadata",
"def contact_info(self, sensitive=True):\n account_id = self.account_id()\n retry_count = 5\n\n req_url = self.get(\"/accounts/{}/contacts\".format(account_id))['ResultUrl']\n resp = self.get(req_url)\n tries = 0\n while 'Contacts' not in resp and tries < retry_count:\n resp = self.get(req_url)\n tries += 1\n time.sleep(1)\n contacts = resp['Contacts']\n\n contact_data = list()\n for contact in contacts:\n row_data = {\n 'ContactId': contact['Id'],\n 'Email': \"*****@****.***\" if sensitive else contact['Email'],\n 'FirstName': \"*****\" if sensitive else contact['FirstName'],\n 'LastName': \"*****\" if sensitive else contact['LastName'],\n 'Status': contact.get('Status'),\n 'MembeshipEnabled': contact.get('MembershipEnabled'),\n 'TermsOfUseAccepted': contact['TermsOfUseAccepted'],\n }\n\n if 'MembershipLevel' in contact:\n row_data['MembershipLevel'] = contact['MembershipLevel']['Name']\n\n # Map all field values into a dict for convenience\n field_values = {val['FieldName']: val['Value']\n for val in contact['FieldValues']}\n\n # Get list of authorizations\n if 'Managed Authorizations' in field_values:\n authorizations = [i['Label']\n for i in field_values['Managed Authorizations']]\n row_data['Authorizations'] = authorizations\n\n contact_data.append(row_data)\n self.__contact_df = pd.DataFrame(contact_data).set_index('ContactId')\n return self.__contact_df",
"def dataproduct_contacts(self, ows_layer, session):\n # collect contacts for layer and related GDI resources\n gdi_oids = [ows_layer.gdi_oid]\n if ows_layer.type == 'data':\n # include data source\n gdi_oids.append(\n ows_layer.data_set_view.data_set.gdi_oid_data_source\n )\n\n return self.contacts(gdi_oids, session)",
"def getContactsData(service, groupResourceName, maxMembers):\n # get the ids of the contacts inside the specified group\n contactsIDs = service.contactGroups().get(\n resourceName=groupResourceName, \n maxMembers=maxMembers).execute()[\"memberResourceNames\"]\n\n # get data of the contacts that correspond to the ids obtained\n contactsData = service.people().getBatchGet(\n resourceNames=contactsIDs,\n personFields='names,emailAddresses').execute()[\"responses\"]\n\n # extract the names and the emailAddresses of the contacts\n namessList = [] \n mailsList = []\n for contact in contactsData:\n try:\n namessList.append(contact[\"person\"][\"names\"][0][\"displayName\"])\n except:\n raise Exception(\"All contacts must have a name associated\")\n mailsList.append(contact[\"person\"][\"emailAddresses\"][0][\"value\"])\n return namessList, mailsList",
"def dataproduct(self, identity, dataproduct_id):\n metadata = {}\n\n permissions = self.permission.dataproduct_permissions(\n dataproduct_id, identity\n ) or {}\n\n session = self.config_models.session()\n\n # find Group or Data layer object\n OWSLayer = self.config_models.model('ows_layer')\n query = session.query(OWSLayer).filter_by(name=dataproduct_id)\n ows_layer = query.first()\n if ows_layer is not None:\n metadata, searchterms = self.dataproduct_metadata(\n ows_layer, permissions, session\n )\n else:\n # find DataSetView for basic DataSet\n DataSetView = self.config_models.model('data_set_view')\n query = session.query(DataSetView).filter_by(name=dataproduct_id)\n data_set_view = query.first()\n if data_set_view is not None:\n if data_set_view.name in permissions.get('basic_datasets', []):\n # basic DataSet permitted\n metadata = self.basic_dataset_metadata(\n data_set_view, session\n )\n\n session.close()\n\n return metadata",
"def contact_info(self):\n return [\n {\n 'contact_info': c.get('contactInfo'),\n 'type': c.get('type'),\n 'primary': c.get('primary'),\n 'verified': c.get('verified'),\n }\n for c in self.entity_payload.get('contactInfo')]",
"def get_contacts(self):\n contacts = Membership.objects.filter(entity = self, key_contact = True).order_by('importance_to_entity')\n return contacts",
"def get_organisation_metadata() -> pd.DataFrame:\n return GETTER.organisationmetadata",
"def getcontacts():\n contacts = {}\n\n try:\n #get list of contact ids\n contactids = r.smembers(\"contacts\")\n\n #for each contact id get data\n for contactid in contactids:\n contacts.update(_getcontact(str(contactid)))\n return contacts\n except:\n print \"Unexpected error:\", sys.exc_info()[0]\n raise",
"def dataset_info(self, data_source_id, table_name):\n # NOTE: form field returns 'None' as string if not set\n if not table_name or table_name == 'None':\n # empty table name\n return None\n\n # parse schema and table name\n parts = table_name.split('.')\n if len(parts) > 1:\n schema = parts[0]\n table_name = parts[1]\n else:\n schema = 'public'\n\n return self.postgis_metadata(data_source_id, schema, table_name)",
"def _get_dsmeta(self, bids):\n # STEP 1: Extract metadata from `dataset_description.json`\n metadata = self._get_bids_dsdescription(bids)\n # STEP 2: Extract README text\n metadata[\"description\"] = self._get_bids_readme()\n # STEP 3: Extract information about entities and add to metadata\n metadata[\"entities\"] = self._get_bids_entities(bids)\n # STEP 4: Extract variable collection information on multiple levels\n metadata[\"variables\"] = self._get_bids_variables(bids)\n # STEP 5: Add context to metadata output\n metadata[\"@context\"] = BIDSCONTEXT\n return metadata",
"def metadata(self):\n return {\n \"namespace\": self.namespace,\n \"short_name\": f\"{self.namespace}_{self._dataset_metadata['DatasetCode']}\",\n \"name\": f\"{self._dataset_metadata['DatasetName']} - FAO ({self.publication_year})\",\n \"description\": self._dataset_metadata[\"DatasetDescription\"],\n \"source_name\": \"Food and Agriculture Organization of the United Nations\",\n \"publication_year\": int(self.publication_year),\n \"publication_date\": self._dataset_metadata[\"DateUpdate\"],\n \"date_accessed\": str(dt.date.today()),\n \"url\": self.url,\n \"source_data_url\": self.source_data_url,\n \"file_extension\": \"zip\",\n }",
"def getData(self):\r\n return personData(\r\n self.title.getVal(),\r\n self.first.getVal(),\r\n self.middle.getVal(),\r\n self.last.getVal(),\r\n self.suffix.getVal(),\r\n self.phone.getVal(),\r\n self.ext.getVal(),\r\n self.email.getVal(),\r\n self.affiliation.getVal())",
"def get_dataset_metadata(fields=[]):\n return get_dict_from_db(key='metadata', fields=fields)",
"def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")",
"def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")",
"def contact_details(self) -> 'outputs.ContactDetailsResponse':\n return pulumi.get(self, \"contact_details\")",
"def contact_info(self):\n return self._contact_info",
"def summarize_metadata(self):\n meta_dict = {}\n for comp in self.dataset.data_vars:\n for mkey, mvalue in self.dataset[comp].attrs.items():\n meta_dict[f\"{comp}.{mkey}\"] = mvalue\n\n return meta_dict",
"def get_description(self):\n return self['contact_name']",
"def contact_details(self):\n return self.data.get(\"contactDetails\")",
"def get_all(self):\n total_contacts = []\n get_count = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': 'RECORDNO'\n },\n 'pagesize': '1'\n }\n }\n\n response = self.format_and_send_request(get_count)\n count = int(response['data']['@totalcount'])\n pagesize = 2000\n offset = 0\n for i in range(0, count, pagesize):\n data = {\n 'query': {\n 'object': 'CONTACT',\n 'select': {\n 'field': [\n 'RECORDNO',\n 'CONTACTNAME',\n 'COMPANYNAME',\n 'FIRSTNAME',\n 'LASTNAME',\n 'INITIAL',\n 'PRINTAS',\n 'TAXABLE',\n 'MAILADDRESS.ADDRESS1'\n ]\n },\n 'pagesize': pagesize,\n 'offset': offset\n }\n }\n contacts = self.format_and_send_request(data)['data']['CONTACT']\n total_contacts = total_contacts + contacts\n offset = offset + pagesize\n return total_contacts",
"def info(dataset, indent, meta_member, verbose, quiet):\n verbosity = verbose - quiet\n configure_logging(verbosity)\n table = bcdata.validate_name(dataset)\n wfs = WebFeatureService(url=bcdata.OWS_URL, version=\"2.0.0\")\n info = {}\n info[\"name\"] = table\n info[\"count\"] = bcdata.get_count(table)\n info[\"schema\"] = wfs.get_schema(\"pub:\" + table)\n if meta_member:\n click.echo(info[meta_member])\n else:\n click.echo(json.dumps(info, indent=indent))",
"def get_dataset_meta(self, output_name, dataset_id):\n return {}",
"def present_data(self, data=None):\n print('--------------------------------------------------------------------------')\n print('{:<10}{:<10}{:<15}{:<17}{:<17}'.\n format(\n 'index',\n 'name',\n 'surname',\n 'email',\n 'phone'\n )\n )\n print('--------------------------------------------------------------------------')\n\n data = data if data else self.contacts\n for contact in data:\n print('{:<10}{:<10}{:<15}{:<17}{:<17}'.\n format(\n contact[0],\n contact[1],\n contact[2],\n contact[3],\n contact[4]\n )\n )",
"def data_and_metadata(self):\n data = self.data\n if self._metadata is not None and not self._metadata.empty:\n data = [self._metadata, data]\n data = pd.concat(data, axis=1)\n return data",
"def get_contacts(self):\n\n\t\treturn self.__contacts",
"def get_contacts_data(self) -> ContactsData:\n if self.contacts_data.should_update():\n ok = self._update_contacts_repo()\n if ok:\n self.contacts_data.update(contacts_reader.get_contacts_data(self.contacts_file))\n else:\n self.contacts_data.try_again()\n\n return self.contacts_data.data",
"def _getMetadataName(self):\n return \"%s_processCoadd_metadata\" % (self.config.coaddName,)",
"def make_crossref_metadata(dataset: ObservatoryDataset) -> List[Dict]:\n\n records = []\n\n for paper in dataset.papers:\n # Create funders\n funders = []\n for funder in paper.funders:\n funders.append({\"name\": funder.name, \"DOI\": funder.doi, \"award\": None, \"doi_asserted_by\": None})\n\n # Add Crossref record\n records.append(\n {\n \"type\": paper.type,\n \"title\": [paper.title],\n \"DOI\": paper.doi,\n \"is_referenced_by_count\": len(paper.cited_by),\n \"issued\": {\n \"date_parts\": [paper.published_date.year, paper.published_date.month, paper.published_date.day]\n },\n \"funder\": funders,\n \"publisher\": paper.publisher.name,\n }\n )\n\n return records"
] | [
"0.70957017",
"0.6029492",
"0.60244423",
"0.5971579",
"0.5823789",
"0.5810419",
"0.5684084",
"0.5576861",
"0.55693126",
"0.5546394",
"0.554111",
"0.5482603",
"0.54492116",
"0.54473823",
"0.5446493",
"0.5446493",
"0.5446493",
"0.54323864",
"0.5393266",
"0.538147",
"0.5356055",
"0.53452325",
"0.5331861",
"0.5328119",
"0.5297685",
"0.52781934",
"0.5259632",
"0.52386767",
"0.5227865",
"0.5225508"
] | 0.72183543 | 0 |
Return primary key, geometry columns, types and srids from a PostGIS table. | def postgis_metadata(self, data_source_id, schema, table_name):
metadata = {}
try:
engine = self.engine_for_data_source(data_source_id)
if engine is None:
return {
'error': "FEHLER: DataSource nicht gefunden"
}
# connect to data_source
conn = engine.connect()
# get primary key
# build query SQL
sql = sql_text("""
SELECT a.attname
FROM pg_index i
JOIN pg_attribute a ON a.attrelid = i.indrelid
AND a.attnum = ANY(i.indkey)
WHERE i.indrelid = '{schema}.{table}'::regclass
AND i.indisprimary;
""".format(schema=schema, table=table_name))
# execute query
primary_key = None
result = conn.execute(sql)
for row in result:
primary_key = row['attname']
# get geometry column and srid
# build query SQL
sql = sql_text("""
SELECT f_geometry_column, srid, type
FROM geometry_columns
WHERE f_table_schema = '{schema}' AND f_table_name = '{table}';
""".format(schema=schema, table=table_name))
# execute query
geometry_columns = []
result = conn.execute(sql)
for row in result:
geometry_columns.append({
'geometry_column': row['f_geometry_column'],
'geometry_type': row['type'],
'srid': row['srid']
})
# close database connection
conn.close()
metadata = {
'schema': schema,
'table': table_name,
'primary_key': primary_key,
'geometry_columns': geometry_columns
}
except OperationalError as e:
self.logger.error(e.orig)
return {
'error': "OperationalError: %s" % e.orig
}
except ProgrammingError as e:
self.logger.error(e.orig)
return {
'error': "ProgrammingError: %s" % e.orig
}
return metadata | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_table_info(self):\n epsg = None\n meta = MetaData()\n table_obj = Table(self._table, meta,\n autoload=True, autoload_with=self._engine)\n if not self._columns:\n self._columns = table_obj.columns.keys()\n geo_cols = [(col.name, col.type) for col in table_obj.columns\n if hasattr(col.type, 'srid')]\n if geo_cols:\n geo_col = geo_cols[0]\n self._geom_column = geo_col[0]\n geo_obj = geo_col[1]\n if self._geom_column not in self._columns:\n self._columns.append(self._geom_column)\n if hasattr(geo_obj, 'srid'):\n epsg = geo_obj.srid\n if epsg == -1:\n epsg = 4326\n if hasattr(geo_obj, 'geometry_type'):\n self._geometry_type = geo_obj.geometry_type\n\n self._epsg = epsg\n self._table_obj = table_obj\n self._meta = meta",
"def get_table_info(self):\n epsg = None\n meta = MetaData()\n table_obj = Table(self.table, meta,\n autoload=True, autoload_with=self.engine)\n if not self.columns:\n self.columns = table_obj.columns.keys()\n geo_cols = [(col.name, col.type) for col in table_obj.columns\n if hasattr(col.type, 'srid')]\n if geo_cols:\n geo_col = geo_cols[0]\n self.geom_column = geo_col[0]\n geo_obj = geo_col[1]\n if self.geom_column not in self.columns:\n self.columns.append(self.geom_column)\n if hasattr(geo_obj, 'srid'):\n epsg = geo_obj.srid\n if epsg == -1:\n epsg = 4326\n if hasattr(geo_obj, 'geometry_type'):\n self.geometry_type = geo_obj.geometry_type\n\n self.epsg = epsg\n self.table_obj = table_obj\n self.meta = meta",
"def get_geometries ( self, object_class_table, spatial_column, select_column, select_id ) :\n stmt = 'select sdo_util.to_wktgeometry(' + str(spatial_column) + ') from ' + str(object_class_table) + ' where ' + str(select_column) + ' = ' + str(select_id)\n self.oracle_cursor.execute( stmt )\n resultset = self.oracle_cursor.fetchall()\n return resultset",
"def primary_key(table_name: str) -> str:\n\n return f\"\"\"\n SELECT\n a.attname AS column_name,\n format_type(a.atttypid, a.atttypmod) AS data_type\n FROM\n pg_index i\n JOIN\n pg_attribute a\n ON\n a.attrelid = i.indrelid AND\n a.attnum = ANY(i.indkey)\n WHERE\n i.indrelid = '{table_name}'::regclass AND\n i.indisprimary\n \"\"\"",
"def get_geometry_type(self, table_name, description):\n with self.connection.cursor() as cursor:\n cursor.execute(\n \"\"\"\n SELECT t.coord_dimension, t.srid, t.type FROM (\n SELECT * FROM geometry_columns\n UNION ALL\n SELECT * FROM geography_columns\n ) AS t WHERE t.f_table_name = %s AND t.f_geometry_column = %s\n \"\"\",\n (table_name, description.name),\n )\n row = cursor.fetchone()\n if not row:\n raise Exception(\n 'Could not find a geometry or geography column for \"%s\".\"%s\"'\n % (table_name, description.name)\n )\n dim, srid, field_type = row\n # OGRGeomType does not require GDAL and makes it easy to convert\n # from OGC geom type name to Django field.\n field_type = OGRGeomType(field_type).django\n # Getting any GeometryField keyword arguments that are not the default.\n field_params = {}\n if self.postgis_oid_lookup.get(description.type_code) == \"geography\":\n field_params[\"geography\"] = True\n if srid != 4326:\n field_params[\"srid\"] = srid\n if dim != 2:\n field_params[\"dim\"] = dim\n return field_type, field_params",
"def _primary_key_columns(cls):\n return [col for col in cls._columns() if getattr(cls, col).primary_key]",
"def get_primary_keys(schema_name, table_name):\n sql = \"\"\"\n SELECT\n DISTINCT kcu.TABLE_SCHEMA, kcu.TABLE_NAME, tc.CONSTRAINT_NAME, kcu.COLUMN_NAME, kcu.ORDINAL_POSITION\n FROM INFORMATION_SCHEMA.TABLE_CONSTRAINTS AS tc\n INNER JOIN INFORMATION_SCHEMA.KEY_COLUMN_USAGE AS kcu ON kcu.CONSTRAINT_NAME = tc.CONSTRAINT_NAME\n WHERE tc.CONSTRAINT_TYPE = 'PRIMARY KEY'\n AND kcu.TABLE_SCHEMA = '{0}'\n AND kcu.TABLE_NAME = '{1}'\n ORDER BY kcu.ORDINAL_POSITION ASC\n \"\"\"\n\n results = fetch_rows(sql.format(schema_name, table_name))\n\n columns = []\n for row in results:\n columns.append({\n 'column_name': row['column_name'],\n 'column_sort_order': 'ASC'\n })\n\n return columns",
"def primary_keys(class_):\n for column in class_.__table__.c:\n if column.primary_key:\n yield column",
"def plot_postgres_db(postgres_engine: Engine):\n # Table level SQL, schema name, table name, row count\n table_sql = pd.read_sql(\n \"\"\"SELECT s.schemaname, tablename AS table_name, hasindexes, n_live_tup AS row_count\n FROM pg_stat_user_tables s\n JOIN pg_tables t ON t.tablename = s.relname AND t.schemaname = s.schemaname ORDER BY 1,2;\"\"\",\n postgres_engine,\n )\n version_sql = pd.read_sql(\"\"\"SELECT version();\"\"\", postgres_engine)\n # View level SQL\n view_sql = pd.read_sql(\n \"\"\"SELECT schemaname, v.viewname AS view_name, definition FROM pg_class c\nJOIN pg_views v on v.viewname = c.relname AND c.relnamespace = v.schemaname::regnamespace::oid\nWHERE v.schemaname != 'pg_catalog' AND v.schemaname != 'information_schema' AND relkind = 'v' ORDER BY 1,2\"\"\",\n postgres_engine,\n )\n # PK/FK constraints\n pk_fk = pd.read_sql(\n \"\"\"SELECT conname as constraint_name,\n CASE\n WHEN contype = 'p' THEN 'primary key'\n WHEN contype = 'f' THEN 'foreign key'\n WHEN contype = 'u' THEN 'unique key'\n END AS constraint_type\n , conrelid::regclass AS \"table_name\"\n , CASE WHEN pg_get_constraintdef(c.oid) LIKE 'FOREIGN KEY %%' THEN substring(pg_get_constraintdef(c.oid), 14, position(')' in pg_get_constraintdef(c.oid))-14) WHEN pg_get_constraintdef(c.oid) LIKE 'PRIMARY KEY %%' THEN substring(pg_get_constraintdef(c.oid), 14, position(')' in pg_get_constraintdef(c.oid))-14) END AS \"col_name\"\n , CASE WHEN pg_get_constraintdef(c.oid) LIKE 'FOREIGN KEY %%' THEN substring(pg_get_constraintdef(c.oid), position(' REFERENCES ' in pg_get_constraintdef(c.oid))+12, position('(' in substring(pg_get_constraintdef(c.oid), 14))-position(' REFERENCES ' in pg_get_constraintdef(c.oid))+1) END AS \"ref_table\"\n , CASE WHEN pg_get_constraintdef(c.oid) LIKE 'FOREIGN KEY %%' THEN substring(pg_get_constraintdef(c.oid), position('(' in substring(pg_get_constraintdef(c.oid), 14))+14, position(')' in substring(pg_get_constraintdef(c.oid), position('(' in substring(pg_get_constraintdef(c.oid), 14))+14))-1) END AS \"ref_col\"\n , pg_get_constraintdef(c.oid) as constraint_def,\n CASE\n WHEN confupdtype = 'a' THEN 'NO ACTION'\n WHEN confupdtype = 'r' THEN 'RESTRICT'\n WHEN confupdtype = 'c' THEN 'CASCADE'\n WHEN confupdtype = 'n' THEN 'SET NULL'\n WHEN confupdtype = 'd' THEN 'SET DEFAULT'\n END AS update_rule,\n CASE\n WHEN confdeltype = 'a' THEN 'NO ACTION'\n WHEN confdeltype = 'r' THEN 'RESTRICT'\n WHEN confdeltype = 'c' THEN 'CASCADE'\n WHEN confdeltype = 'n' THEN 'SET NULL'\n WHEN confdeltype = 'd' THEN 'SET DEFAULT'\n END AS delete_rule\n FROM pg_constraint c\n JOIN pg_namespace n ON n.oid = c.connamespace\n WHERE contype IN ('f', 'p', 'u')\n ORDER BY conrelid::regclass::text, contype DESC;\"\"\",\n postgres_engine,\n )\n # List of schemas and tables\n schema_list = list(table_sql[\"schemaname\"])\n schema_str = \",\".join(set(schema_list))\n table_list = list(table_sql[\"table_name\"])\n view_list = list(view_sql[\"view_name\"])\n overview_dict = {}\n # Show the stats for schemas, tables and PK/FK\n overview_dict[\"num_of_schemas\"] = len(set(schema_list))\n overview_dict[\"schema_names\"] = list(set(schema_list))\n overview_dict[\"table_schema\"] = dict(zip(table_sql[\"table_name\"], table_sql[\"schemaname\"]))\n overview_dict[\"num_of_tables\"] = len(table_list)\n overview_dict[\"table_names\"] = table_list\n overview_dict[\"num_of_views\"] = len(view_list)\n overview_dict[\"view_names\"] = view_list\n overview_dict[\"connection_url\"] = postgres_engine.url\n overview_dict[\"tables_no_index\"] = list(\n table_sql[table_sql[\"hasindexes\"] == False][\"table_name\"]\n )\n overview_dict[\"num_of_pk\"] = len(pk_fk[pk_fk[\"constraint_type\"] == \"primary key\"])\n overview_dict[\"num_of_fk\"] = len(pk_fk[pk_fk[\"constraint_type\"] == \"foreign key\"])\n overview_dict[\"num_of_uk\"] = len(pk_fk[pk_fk[\"constraint_type\"] == \"unique key\"])\n overview_dict[\"view_schema\"] = dict(zip(view_sql[\"view_name\"], view_sql[\"schemaname\"]))\n overview_dict[\"product_version\"] = re.findall(\"[\\d\\.]+\\d+\", version_sql.values[0][0])[0]\n\n # Stats for column level stats\n all_cols = pd.read_sql(\n \"\"\"select attrelid::regclass AS table_name, f.attname AS col_name,\n pg_catalog.format_type(f.atttypid,f.atttypmod) AS type, attnotnull,\n CASE\n WHEN f.atthasdef = 't' THEN pg_get_expr(d.adbin, d.adrelid)\n END AS default, description,\n CASE\n WHEN pg_get_expr(d.adbin, d.adrelid) LIKE 'nextval%%' THEN True\n ELSE False\n END AS auto_increment, null_frac * c.reltuples AS num_null, null_frac AS perc_of_null,\n CASE WHEN s.n_distinct < 0\n THEN -s.n_distinct * c.reltuples\n ELSE s.n_distinct\n END AS num_of_distinct,\n CASE WHEN s.n_distinct < 0\n THEN round((-s.n_distinct * 100)::numeric, 2)\n ELSE round((s.n_distinct / c.reltuples * 100)::numeric, 2)\n END AS perc_of_distinct\n FROM pg_attribute f\n JOIN pg_class c ON c.oid = f.attrelid\n --JOIN pg_type t ON t.oid = f.atttypid\n LEFT JOIN pg_namespace n ON n.oid = c.relnamespace\n LEFT JOIN pg_attrdef d ON d.adrelid = c.oid AND d.adnum = f.attnum\n LEFT JOIN pg_description de on de.objoid = c.oid\n LEFT JOIN pg_stats s on s.schemaname::regnamespace::oid = c.relnamespace AND s.tablename = c.relname AND s.attname = f.attname\n WHERE (c.relkind = 'v'::char or c.relkind = 'r'::char)\n AND f.attnum > 0\n AND attisdropped is False\n AND n.nspname in ('{}');\"\"\".format(\n schema_str\n ),\n postgres_engine,\n )\n # Split into intermediate result dictionary form - table\n table_dict = {}\n for i in table_list:\n indices = {}\n index = pd.read_sql(\n \"SELECT * FROM pg_indexes WHERE tablename= \" + \"'\" + str(i) + \"'\" + \";\",\n postgres_engine,\n )\n for (idx, row) in index.iterrows():\n current_index = row.loc[\"indexname\"]\n indices[current_index] = {}\n index_type, col_name = (row.loc[\"indexdef\"].split(\"USING \", 1)[1]).split(\" \", 1)\n col_name = col_name.replace(\"(\", \"\")\n col_name = col_name.replace(\")\", \"\")\n indices[current_index][\"Column_name\"] = col_name\n indices[current_index][\"Index_type\"] = index_type\n temp = {}\n temp_cols = (\n all_cols[all_cols[\"table_name\"] == i]\n .drop(columns=[\"table_name\"])\n .to_dict(orient=\"records\")\n )\n for j in temp_cols:\n temp[j[\"col_name\"]] = {}\n element = j.pop(\"col_name\")\n temp[element] = j\n temp[element][\"children\"] = list(\n pk_fk[(pk_fk[\"ref_table\"] == i) & (pk_fk[\"ref_col\"] == element)][\"table_name\"]\n )\n temp[element][\"parents\"] = list(\n pk_fk[\n (pk_fk[\"table_name\"] == i)\n & (pk_fk[\"col_name\"] == element)\n & (pk_fk[\"constraint_type\"] == \"foreign key\")\n ][\"ref_table\"]\n )\n temp[\"num_of_parents\"] = int(\n len(pk_fk[(pk_fk[\"table_name\"] == i) & (pk_fk[\"constraint_type\"] == \"foreign key\")])\n )\n temp[\"num_of_children\"] = int(len(pk_fk[(pk_fk[\"ref_table\"] == i)]))\n temp[\"num_of_rows\"] = int(table_sql[table_sql[\"table_name\"] == i][\"row_count\"].values[0])\n temp[\"num_of_cols\"] = int(len(all_cols[all_cols[\"table_name\"] == i]))\n temp[\"constraints\"] = {}\n temp_pk_fk = (\n pk_fk[pk_fk[\"table_name\"] == i].drop(columns=[\"table_name\"]).to_dict(orient=\"records\")\n )\n for j in temp_pk_fk:\n temp[\"constraints\"][j[\"constraint_name\"]] = {}\n element = j.pop(\"constraint_name\")\n temp[\"constraints\"][element] = j\n temp[\"indices\"] = indices\n table_dict[i] = temp\n # Split into intermediate result dictionary form - view\n view_dict = {}\n for i in view_list:\n temp = {}\n temp_cols = (\n all_cols[all_cols[\"table_name\"] == i]\n .drop(columns=[\"table_name\"])\n .to_dict(orient=\"records\")\n )\n for j in temp_cols:\n temp[j[\"col_name\"]] = {}\n element = j.pop(\"col_name\")\n temp[element] = j\n temp[\"num_of_cols\"] = len(all_cols[all_cols[\"table_name\"] == i])\n temp[\"definition\"] = view_sql[view_sql[\"view_name\"] == i][\"definition\"].values[0]\n view_dict[i] = temp\n\n return overview_dict, table_dict, view_dict",
"def PostgresTableToDict(sqldef, conn):\n df = pd.read_sql(sqldef, con=conn)\n return dict(zip(df.iloc[:, 0], df.iloc[:, 1]))",
"def zip_geom():\r\n engine = get_sql_engine()\r\n zipgeom = text(\r\n \"\"\"\r\n SELECT zip_code, geom\r\n FROM philly_zipcode\r\n \"\"\"\r\n )\r\n zipgeom = gpd.read_postgis(zipgeom, con=engine)\r\n return zipgeom",
"def df_from_postgis(engine, query, params, geocolumn, epsg):\n data = geopandas.GeoDataFrame.from_postgis(\n query,\n engine,\n geom_col=geocolumn,\n crs={'init': 'epsg:{}'.format(epsg)},\n params=params)\n return data",
"def transform_schema(pgschema):\n datatypes = {}\n for field in pgschema:\n if 'cartodb_id' in field:\n continue\n datatypes[field] = map_dtypes(pgschema[field]['type'])\n return datatypes",
"def getGeometryColumnDef(self, schema, table, column):\r\n defs = self.fetchSqlRecords(\r\n \"select type, srid from geometry_columns where f_table_schema='{}' and f_table_name='{}' and f_geometry_column='{}'\".format(schema, table, column))\r\n if not len(defs) == 1:\r\n return None\r\n\r\n return 'geometry({},{})'.format(defs[0][0], defs[0][1])",
"def get_pk_fields(self):\n\n return QueryBuilder.columns_to_dict(self, self.pk_columns, filtered=False)",
"def get_geom_type(carto_sql_client, tablename):\n geomtypes = {'ST_Point': 'point',\n 'ST_MultiPoint': 'point',\n 'ST_LineString': 'line',\n 'ST_MultiLineString': 'line',\n 'ST_Polygon': 'polygon',\n 'ST_MultiPolygon': 'polygon'}\n\n # NOTE: assumes one geometry type per table\n result = carto_sql_client.send('''\n SELECT ST_GeometryType(the_geom) As geomtype\n FROM \"{tablename}\"\n WHERE the_geom IS NOT NULL\n LIMIT 1'''.format(tablename=tablename))\n try:\n return geomtypes[result['rows'][0]['geomtype']]\n except (KeyError, IndexError):\n print((\"Cannot create a map from `{tablename}` because this table \"\n \"does not have geometries ({geomreported})\").format(\n tablename=tablename,\n geomreported=None))\n return None\n except Exception as err:\n print(\"ERROR: {}\".format(err))\n return None",
"def searchColPrimaryKey(self, table: Table) -> Column:\n columns = []\n if table:\n for col in table.columns:\n if col.primaryKey == True:\n columns.append(col)\n return columns",
"def get_table_primary_key(table_name):\n return table_spec[table_name]['primary_key_column']",
"def get_srid_info(srid):\n # SRID=-1 is a common convention for indicating the geometry has no\n # spatial reference information associated with it. Thus, we will\n # return all None values without raising an exception.\n if srid == -1: return None, None, None\n\n # Getting the spatial reference WKT associated with the SRID from the\n # `spatial_ref_sys` (or equivalent) spatial database table. This query\n # cannot be executed using the ORM because this information is needed\n # when the ORM cannot be used (e.g., during the initialization of \n # `GeometryField`).\n from django.db import connection\n cur = connection.cursor()\n qn = connection.ops.quote_name\n stmt = 'SELECT %(table)s.%(wkt_col)s FROM %(table)s WHERE (%(table)s.%(srid_col)s = %(srid)s)'\n stmt = stmt % {'table' : qn(SpatialRefSys._meta.db_table),\n 'wkt_col' : qn(SpatialRefSys.wkt_col()),\n 'srid_col' : qn('srid'),\n 'srid' : srid,\n }\n cur.execute(stmt)\n \n # Fetching the WKT from the cursor; if the query failed raise an Exception.\n fetched = cur.fetchone()\n if not fetched:\n raise ValueError('Failed to find spatial reference entry in \"%s\" corresponding to SRID=%s.' % \n (SpatialRefSys._meta.db_table, srid))\n srs_wkt = fetched[0]\n\n # Getting metadata associated with the spatial reference system identifier.\n # Specifically, getting the unit information and spheroid information \n # (both required for distance queries).\n unit, unit_name = SpatialRefSys.get_units(srs_wkt)\n spheroid = SpatialRefSys.get_spheroid(srs_wkt)\n return unit, unit_name, spheroid",
"def GetSqlData2(select,bycolumn=True):\n #connect to database and execute sql and retrieve data\n conn,cur = ConnectDb()\n cur.execute(select)\n fields = [d.name for d in cur.description]\n\n data = cur.fetchall()\n if len(data)==0:return None\n\n #print N.c_[fields,data[0]]\n\n if bycolumn:\n data = zip(*data)\n #print fields, len(data),len(data[0]),data[0][0] \n dic = {}\n while fields:\n field = fields.pop(0)\n \n #IF DATA IS GEOM OR GEOG\n if re.search('geog|geom',field,re.IGNORECASE):\n #print field, len(data),len(data[0]),data[0][0]\n geoms = data.pop(0)\n dic[field] = [ppygis.Geometry.read_ewkb(poly) for poly in geoms]\n if hasattr(dic[field][0], 'polygons'):\n #print dir()\n outerring = dic[field][0].polygons[0].rings.pop(0)\n dic['outer'] = [[point.x,point.y] for point in outerring.points]\n dic['inner'] = [[[point.x,point.y] for point in ring.points] for ring in dic[field][0].polygons[0].rings]\n #dic[field][0].polygons[0].rings[0].points]\n elif hasattr(dic[field][0], 'x'):\n dic['x'] = [item.x for item in dic[field]]\n dic['y'] = [item.y for item in dic[field]]\n else:dic[field] = N.array(data.pop(0))\n \n return dic\n else:\n lst = [] \n while data:\n dic = {}\n row = data.pop(0)\n \n for i,field in enumerate(fields):\n \n #IF DATA IS GEOM OR GEOG\n if re.search('geog|geom',field,re.IGNORECASE):\n #print 'here'\n dic[field] = ppygis.Geometry.read_ewkb(row[i])\n #if hasattr(dic[field], 'polygons'):\n outerring = dic[field].polygons[0].rings.pop(0)\n dic['outer'] = [[point.x,point.y] for point in outerring.points]\n dic['inner'] = [[[point.x,point.y] for point in ring.points] for ring in dic[field].polygons[0].rings]\n #elif hasattr(dic[field], 'x'):\n # dic['x'] = [item.x for item in dic[field]]\n # dic['y'] = [item.y for item in dic[field]]\n\n elif type(row[i]) == list or type(row[i]) == tuple:\n dic[field] = N.array(row[i])\n else:\n dic[field] = row[i]\n lst.append(dic)\n return lst",
"def _get_geometry(self):\r\n if self._geometry_column_name not in self.columns:\r\n raise AttributeError(\"Geometry Column Not Present: %s\" % self._geometry_column_name)\r\n return self[self._geometry_column_name]",
"def run(self):\n command = (\"shp2pgsql -I -s 4326 -d {} {}.{}|psql\").format(self.shpname(),\n self.schema,\n self.tablename())\n\n self.pgw.shell(command)",
"def _to_gisdb(self):\n self._ways.to_postgis(name=\"ways\", con=self._gisdb, if_exists=\"append\")\n self._nodes.to_sql(name=\"nodes\", con=self._gisdb, if_exists=\"append\")\n gdf_nodes, gdf_edges = osmnx.utils_graph.graph_to_gdfs(self._graph, node_geometry=False,\n fill_edge_geometry=False)\n gdf_edges[['id', 'length', 'u', 'v', 'key']].to_postgis(name=\"graph_edges\", con=self._gisdb, if_exists=\"append\")\n gdf_nodes[['id']].to_postgis(name=\"graph_nodes\", con=self._gisdb, if_exists=\"append\")\n self._nodes.to_sql(name=\"nodes\", con=self._gisdb, if_exists=\"append\")",
"def queryToPDTable(postgreSql_selectQuery):\n\n import os\n import psycopg2\n import pandas as pd\n\n #basic query function to the database using environmental variables for\n #the user name and password\n conn=psycopg2.connect(host=\"postgis1\",\n dbname=\"sdad\",\n user=os.environ.get('UVA_uname'),\n password=os.environ.get('UVA_pass'))\n\n #convert it to a pandas dataframe\n dataOut=pd.read_sql_query(postgreSql_selectQuery,conn)\n\n return dataOut",
"def columns(self, table):\n cur = self.connection.cursor()\n res = cur.execute(\"PRAGMA TABLE_INFO(%s)\" % table)\n columns = {}\n for row in res:\n columns[row[1]] = row[2]\n return columns",
"def point_coords(geom):\n # Return a tuple with the x/y point coordinate for a GeoDataFrame geometry\n return list(geom.coords)[0] # Just get first tuple in list, since it's a point",
"def _get_primary_keys(self, table_name, num_rows):\n primary_key = self.metadata.get_primary_key(table_name)\n primary_key_values = None\n\n if primary_key:\n field = self.metadata.get_fields(table_name)[primary_key]\n\n generator = self.primary_key.get(table_name)\n\n if generator is None:\n if field['type'] != 'id':\n raise ValueError('Only columns with type `id` can be primary keys')\n\n subtype = field.get('subtype', 'integer')\n if subtype == 'integer':\n generator = itertools.count()\n remaining = np.inf\n elif subtype == 'string':\n regex = field.get('regex', r'^[a-zA-Z]+$')\n generator = exrex.generate(regex)\n remaining = exrex.count(regex)\n elif subtype == 'datetime':\n raise NotImplementedError('Datetime ids are not yet supported')\n else:\n raise ValueError('Only `integer` or `string` id columns are supported.')\n\n self.primary_key[table_name] = generator\n self.remaining_primary_key[table_name] = remaining\n\n else:\n remaining = self.remaining_primary_key[table_name]\n\n if remaining < num_rows:\n raise ValueError(\n 'Not enough unique values for primary key of table {}'\n ' to generate {} samples.'.format(table_name, num_rows)\n )\n\n self.remaining_primary_key[table_name] -= num_rows\n primary_key_values = pd.Series([x for i, x in zip(range(num_rows), generator)])\n\n return primary_key, primary_key_values",
"def geojson2postgis(self, filepath, table_name, geo_type):\n map_data = gpd.GeoDataFrame.from_file(filepath)\n # Maybe you want to change link address\n link = \"postgresql://{0}:{1}@{3}:5432/{2}\".format(self.username, self.password, self.dbname, self.host)\n engine = create_engine(link, encoding='utf-8')\n map_data = self.dict_to_json(map_data)\n map_data['geometry'] = map_data['geometry'].apply(lambda x: WKTElement(x.wkt, 4326))\n # Maybe you want to change 'replace' to 'append' in the future\n map_data.to_sql(\n name=table_name,\n con=engine,\n if_exists='replace',\n dtype={'geometry': Geometry(geometry_type=geo_type, srid=4326)}\n )",
"def get_geometry(self):\n rows, cols = self.get_gridspec().get_geometry()\n return rows, cols, self.num1, self.num2",
"def create_postgis_template():\n\n require('environment', provided_by=env.environments)\n share_dir = run('pg_config --sharedir').strip()\n env.postgis_path = '%s/contrib' % share_dir\n sudo('createdb -E UTF8 %(template_db)s' % env, user='postgres')\n sudo('createlang -d %(template_db)s plpgsql' % env, user='postgres')\n # Allows non-superusers the ability to create from this template\n sudo('psql -d postgres -c \"UPDATE pg_database SET datistemplate=\\'true\\' WHERE datname=\\'%(template_db)s\\';\"' % env, user='postgres')\n # Loading the PostGIS SQL routines\n sudo('psql -d %(template_db)s -f %(postgis_path)s/postgis.sql' % env, user='postgres')\n sudo('psql -d %(template_db)s -f %(postgis_path)s/spatial_ref_sys.sql' % env, user='postgres')\n # Enabling users to alter spatial tables.\n sudo('psql -d %(template_db)s -c \"GRANT ALL ON geometry_columns TO PUBLIC;\"' % env, user='postgres')\n #sudo('psql -d %(template_db)s -c \"GRANT ALL ON geography_columns TO PUBLIC;\"' % env, user='postgres')\n sudo('psql -d %(template_db)s -c \"GRANT ALL ON spatial_ref_sys TO PUBLIC;\"' % env, user='postgres')"
] | [
"0.67692256",
"0.675476",
"0.62519324",
"0.60473233",
"0.5883852",
"0.58504605",
"0.5701166",
"0.5491535",
"0.5480299",
"0.5475951",
"0.5452421",
"0.53603435",
"0.5338191",
"0.5314427",
"0.52559847",
"0.52456456",
"0.5243877",
"0.5240255",
"0.52377796",
"0.5215144",
"0.521342",
"0.5204098",
"0.5203814",
"0.5194389",
"0.5186381",
"0.5128195",
"0.5115994",
"0.50956637",
"0.5084397",
"0.5065147"
] | 0.696644 | 0 |
Return SQLAlchemy engine for a data_source. | def engine_for_data_source(self, data_source_id):
engine = None
# find data_source
DataSource = self.config_models.model('data_source')
session = self.config_models.session()
query = session.query(DataSource) \
.filter_by(gdi_oid=data_source_id)
data_source = query.first()
session.close()
if data_source is not None:
engine = self.db_engine.db_engine(data_source.connection)
return engine | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_database_engine() -> Engine:\n return engine",
"def get_db_engine():\n # get database connection url\n connection_url = get_db_connection_url()\n\n # Create engine from connection url\n engine = create_engine(connection_url)\n\n return engine",
"def get_engine(self, db_name):\n pass",
"def get_sql_engine(cls, db_uri: str) -> Engine:\n return create_engine(db_uri)",
"def get_engine(db_params: Dict[str, str]) -> sa.engine:\r\n db_uri = get_uri(db_params)\r\n return sa.create_engine(db_uri)",
"def _get_engine(**kwargs):\n engine_name = 'MySQL'\n return engine_name",
"def get_engine(db_url):\n check_db_url(db_url)\n return create_engine(db_url)",
"def get_engine(self, connection_string):\n if connection_string not in sqlengines:\n sqlengines[connection_string] = create_engine(\n self.get_connection_string())\n return sqlengines[connection_string]",
"def get_engine(self, connection_string):\n if connection_string not in sqlengines:\n sqlengines[connection_string] = create_engine(\n self.get_connection_string())\n return sqlengines[connection_string]",
"def get_engine(self):\n\t\treturn self.__engine",
"def shared_db_engine_with_source_data(shared_db_engine):\n populate_source_data(shared_db_engine)\n yield shared_db_engine",
"def get_engine(username, password, ipaddress, database):\n #TODO(rnirmal):Based on permissions issues being resolved we may revert\n #url = URL(drivername='mysql', host='localhost',\n # query={'read_default_file': '/etc/mysql/my.cnf'})\n global ENGINE\n if ENGINE:\n return ENGINE\n if database:\n ENGINE = sqlalchemy.create_engine(\"mysql://%s:%s@%s:3306/%s\" %\n (username, password, ipaddress,database),\n pool_recycle=7200,\n listeners=[KeepAliveConnection()])\n else:\n ENGINE = sqlalchemy.create_engine(\"mysql://%s:%s@%s:3306\" %\n (username, password, ipaddress),\n pool_recycle=7200,\n listeners=[KeepAliveConnection()])\n return ENGINE",
"def engine(db_url=None):\n db_url = db_url or os.getenv(\"DB_URL\")\n if not db_url:\n raise ValueError(\"database URL is required\")\n print(f\"Returning an engine for {db_url}\")\n return create_engine(db_url)",
"def get_engine(settings: dict) -> sqlalchemy.engine.base.Engine:\n engine = create_engine(settings['sqlalchemy.url'], pool_recycle=3600)\n return engine",
"def create_engine(self):\n connection_string = f'postgresql://{self.user}:{self.password}@{self.host}/{self.database_name}'\n return create_engine(connection_string)",
"def get_engine(self):\n return self._engine",
"def engine(self):\n return self._engine",
"def get_engine(db_credentials):\n\n url = 'postgresql://{user}:{passwd}@{host}:{port}/{db}'.format(\n user=db_credentials['user'], passwd=db_credentials['pwd'], host=db_credentials['host'], \n port=db_credentials['port'], db=db_credentials['db'])\n engine = create_engine(url, pool_size = 50)\n \n return engine",
"def sql_alch_engine(tunnel):\n\n port = str(tunnel.local_bind_port)\n\n # Create a database connection using sqlalchemy\n connection_addr = ('postgresql://'\n + config.dbreddit['user']\n + ':'\n + config.dbreddit['password']\n + '@localhost:'\n + port\n + '/'\n + config.dbreddit['dbname'])\n try:\n engine = create_engine(connection_addr)\n return engine\n except Exception as e:\n print(e)",
"def get_engine(prefix=KEY_PREFIX, db=REDIS_DB, engine_class=None):\n if engine_class is None:\n engine_class = redis_completion.RedisEngine\n engine = engine_class(prefix='netdevices', db=1)\n return engine",
"def logic_db_engine(self):\n try:\n boto_session = boto3.Session(profile_name='loidsig')\n except:\n boto_session = boto3.Session()\n sm_client = boto_session.client(\n service_name='secretsmanager',\n region_name='us-east-1',\n endpoint_url='https://secretsmanager.us-east-1.amazonaws.com'\n )\n get_secret_value_response = sm_client.get_secret_value(SecretId='Loidsig_DB')\n cred_dict = ast.literal_eval(get_secret_value_response['SecretString'])\n db_user, db_pass = cred_dict['username'], cred_dict['password']\n db_host, db_port, db_name = cred_dict['host'], cred_dict['port'], cred_dict['dbname']\n try:\n postgres_engine = create_engine(f'postgresql://{db_user}:{db_pass}@{db_host}:{db_port}/{db_name}')\n except Exception as e:\n print(\"Unable to connect to postgres! Error: {}\".format(e))\n raise\n return postgres_engine",
"def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))",
"def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))",
"def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))",
"def db_connect():\n return create_engine(get_project_settings().get(\"CONNECTION_STRING\"))",
"def create_engine(self):\n return create_engine('sqlite:///' + self.database_name, echo=True)",
"def register_engine(self, poolclass=NullPool):\r\n engine = create_engine('{conn}{db}'.format(db=common.TEST_DATABASE,\r\n conn=common.DB_CONNECTION),\r\n poolclass=poolclass)\r\n BASE.metadata.bind = engine\r\n return engine",
"def get_engine(self):\n return str(self.engine)",
"def db_connect():\n return create_engine(URL(**product_crawlers.settings.DATABASE))",
"def engine(self) -> str:\n return self._engine"
] | [
"0.7026651",
"0.6842614",
"0.67209196",
"0.6679287",
"0.6633262",
"0.6460196",
"0.6328281",
"0.6285054",
"0.6285054",
"0.62150884",
"0.61740327",
"0.61366487",
"0.61350334",
"0.6116982",
"0.60951006",
"0.60835916",
"0.6050043",
"0.5965481",
"0.5956415",
"0.59335893",
"0.5884907",
"0.58635217",
"0.58635217",
"0.58635217",
"0.58635217",
"0.5830086",
"0.5793456",
"0.5791313",
"0.5707873",
"0.56947744"
] | 0.87228316 | 0 |
Recursively check if layer is a WMS layer. | def layer_in_ows(self, ows_layer, root_layer):
if root_layer is None:
# no WMS root layer
return False
in_wms = False
# get parent groups
parents = [p.group for p in ows_layer.parents]
for parent in parents:
if parent.gdi_oid == root_layer.gdi_oid:
# parent is WMS root layer
in_wms = True
else:
# check if parent group is a WMS layer
in_wms = in_wms or self.layer_in_ows(parent, root_layer)
if in_wms:
break
return in_wms | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_layer(obj):\n # TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).\n return hasattr(obj, \"_is_layer\") and not isinstance(obj, type)",
"def is_feature_layer(layer):\n return getattr(layer, '_is_feature_layer', False)",
"def IsLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_IsLayer(self, *args)",
"def test_ww_layer_iterator(self):\n\t\t\n\t\texpected_num_layers = 21 # I think 16 is the flattened layer\n\t\tlayer_iterator = ww.WeightWatcher().make_layer_iterator(self.model)\n\t\t\n\t\tself.assertTrue(layer_iterator is not None)\n\t\tnum_layers = 0\n\t\tfor ww_layer in layer_iterator:\n\t\t\tnum_layers += 1\n\t\tself.assertEqual(expected_num_layers, num_layers)\n\t\t\n\t\t\n\t\texpected_type = \"<class 'weightwatcher.weightwatcher.WWLayer'>\"\n\t\tactual_type = str(type(ww_layer))\n\t\tself.assertEqual(expected_type, actual_type)",
"def test_ww_layer_attributes(self):\n\t\t\n\t\tww_layer = self._get_resnet_fc_layer()\n\t\t\t\t\t\n\t\texpected_type = \"<class 'weightwatcher.weightwatcher.WWLayer'>\"\n\t\tactual_type = str(type(ww_layer))\n\t\tself.assertEqual(expected_type, actual_type)\n\t\t\n\t\t# RESET FOR WW_FLATFILES vs PYSTATEDICT vs ...\n\t\texpected_name = self.fc_layer_name \n\t\tactual_name = ww_layer.name\n\t\tself.assertEqual(expected_name, actual_name)\n\t\t\n\t\tframework_layer = ww_layer.framework_layer\n\t\tself.assertTrue(framework_layer is not None)\n\t\t\n\t\t# RESET FOR WW_FLATFILES vs PYSTATEDICT vs ...\n\t\texpected_type = self.fc_layer_type \n\t\tactual_type = str(type(framework_layer))\n\t\tself.assertEqual(expected_type, actual_type)\n\t\n\t\tself.assertEqual(ww_layer.name, framework_layer.name)\n\t\t\n\t\t\n\t\thas_weights, weights, has_biases, biases = ww_layer.get_weights_and_biases()\n\t\tself.assertTrue(has_weights)\n\t\tself.assertTrue(has_biases)\n\t\tself.assertTrue(weights is not None)\n\t\tself.assertTrue(biases is not None)\n\t\t\n\t\texpected_W_shape = (1000, 512)\n\t\texpected_B_shape = (1000,)\n\t\tactual_W_shape = weights.shape\n\t\tactual_B_shape = biases.shape\n\t\t\n\t\tself.assertEqual(expected_W_shape, actual_W_shape)\n\t\tself.assertEqual(expected_B_shape, actual_B_shape)\n\t\t\n\t\treturn",
"def check_layer(self, service: Service):\n wms_helper = WmsHelper(service)\n urls_to_check = [\n (wms_helper.get_get_map_url(), True),\n (wms_helper.get_get_styles_url(), False),\n (wms_helper.get_get_feature_info_url(), False),\n (wms_helper.get_describe_layer_url(), False),\n ]\n for url in urls_to_check:\n if url[0] is None:\n continue\n self.check_service(url[0], check_image=url[1])",
"def test_ww_layer_iterator(self):\n\n\t\texpected_num_layers = 21 # I think 16 is the flattened layer\n\t\tlayer_iterator = ww.WeightWatcher().make_layer_iterator(self.model)\n\t\tself.assertTrue(layer_iterator is not None)\n\t\tnum_layers = 0\n\t\tfor ww_layer in layer_iterator:\n\t\t\tnum_layers += 1\n\t\tself.assertEqual(expected_num_layers, num_layers)\n\t\t\n\t\t\n\t\texpected_type = \"<class 'weightwatcher.weightwatcher.WWLayer'>\"\n\t\tactual_type = str(type(ww_layer))\n\t\tself.assertEqual(expected_type, actual_type)",
"def _check_layer_exists(self) -> None:\n layer_exists = (\n self.viewer.layer_dict[self.layer_type][self.layer_name][\n self.layer_subtype\n ][\"layer\"]\n is not None\n )\n # hide button if layer doesn't exist\n if layer_exists:\n self.layout.display = \"block\"\n else:\n self.layout.display = \"none\"\n self.logger.debug(\n (\n \"LayerButtonWidget hidden for %s of %s. \"\n \"(type: %s). Layer doesn't exist.\"\n ),\n self.layer_subtype,\n self.layer_name,\n self.layer_type,\n )",
"def test_ww_layer_iterator(self):\n\t\t\n\t\t# this wont work for Resnet models because we dont support lazy loading of Conv2D yet\n\t\t\t\t\n\t\tlogger = logging.getLogger(ww.__name__)\n\t\tlogger.setLevel(logging.DEBUG)\n\t\t\n\t\texpected_num_layers = 21 # I think 16 is the flattened layer\n\n\t\tlayer_iterator = ww.WeightWatcher().make_layer_iterator(self.model)\n\t\t\n\t\tself.assertTrue(layer_iterator is not None)\n\t\tnum_layers = 0\n\t\tfor ww_layer in layer_iterator:\n\t\t\tnum_layers += 1\n\t\tself.assertEqual(expected_num_layers, num_layers)\n\t\tprint(num_layers)\n\t\t\n\t\t\n\t\texpected_type = \"<class 'weightwatcher.weightwatcher.WWLayer'>\"\n\t\tactual_type = str(type(ww_layer))\n\t\tself.assertEqual(expected_type, actual_type)\n\n\t\t\n\t\treturn",
"def has_wcs(self):\n return self.wcs is not None",
"def test_addon_layer(self):\n layers = [l.getName() for l in registered_layers()]\n self.assertIn('IBriefyPloneLayer', layers)",
"def check_layer_name(field):\n \n hygienize = field.replace(\"\\\"\", \"\")\n layer_name = (hygienize.split(\".\"))[0]\n \n if layer_name in layer_names:\n return True\n return False",
"def check_layers(self, layer_param, params, permitted_layers, mandatory):\n exception = None\n\n requested_layers = params.get(layer_param)\n if requested_layers:\n requested_layers = requested_layers.split(',')\n for layer in requested_layers:\n # allow only permitted layers\n if layer and not layer.startswith('EXTERNAL_WMS:') and layer not in permitted_layers:\n exception = {\n 'code': \"LayerNotDefined\",\n 'message': (\n 'Layer \"%s\" does not exist or is not permitted'\n % layer\n )\n }\n break\n elif mandatory:\n # mandatory layers param is missing or blank\n exception = {\n 'code': \"MissingParameterValue\",\n 'message': (\n '%s is mandatory for %s operation'\n % (layer_param, params.get('REQUEST'))\n )\n }\n\n return exception",
"def IsRenderLayersOn(self):\n\n renderLayers = pm.ls(exactType=\"renderLayer\")\n referenceLayers = pm.ls(exactType=\"renderLayer\", rn=1)\n return ((len(renderLayers) - len(referenceLayers)) > 1)",
"def test_ww_layer_iterator(self):\n\t\t\n\t\texpected_num_layers = 16\n\t\tlayer_iterator = ww.WeightWatcher().make_layer_iterator(self.model)\n\t\tself.assertTrue(layer_iterator is not None)\n\t\tnum_layers = 0\n\t\tfor ww_layer in layer_iterator:\n\t\t\tnum_layers += 1\n\t\t\tprint(ww_layer)\n\t\tself.assertEqual(expected_num_layers, num_layers)\n\t\t\n\t\t\n\t\texpected_type = \"<class 'weightwatcher.weightwatcher.WWLayer'>\"\n\t\tactual_type = str(type(ww_layer))\n\t\tself.assertEqual(expected_type, actual_type)",
"def has_weights(obj):\n # TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).\n has_weight = (hasattr(type(obj), \"trainable_weights\")\n and hasattr(type(obj), \"non_trainable_weights\"))\n\n return has_weight and not isinstance(obj, type)",
"def check_recursive(self, summary_list: List[\"LayerInfo\"]) -> None:\n if list(self.module.named_parameters()):\n for other_layer in summary_list:\n if self.layer_id == other_layer.layer_id:\n self.is_recursive = True",
"def test_model_layer_types_ww2x(self):\n \n\t\tdetails = self.watcher.describe(pool=False, min_evals=1)\n\t\t\n\t\tdenseLayers = details[details.layer_type==str(LAYER_TYPE.DENSE)]\n\t\tdenseCount = len(denseLayers)\n\t\tself.assertEqual(denseCount, 3, \"3 dense layers, but {} found\".format(denseCount))\n \t\t\t\n\t\n\t\tconv2DLayers = details[details.layer_type==str(LAYER_TYPE.CONV2D)]\n\t\tconv2DCount = len(conv2DLayers)\n\t\tself.assertEqual(conv2DCount, 8*9, \"8*9 conv2D layers, but {} found\".format(denseCount))",
"def check_conv(extract):\n call = extract\n clip_found = False\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"nn.relu\":\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n elif call.op.name == \"clip\":\n clip_found = True\n if call.attrs[\"a_min\"] != 0.0 or call.attrs[\"a_max\"] != 6.0:\n return False\n call = call.args[0]\n if isinstance(call, tvm.relay.expr.TupleGetItem):\n call = call.tuple_value\n\n while call.op.name != \"nn.conv2d\":\n call = call.args[0]\n\n attrs, args = call.attrs, call.args\n if attrs.data_layout != \"NCHW\":\n return False\n\n if (\n (not clip_found)\n and (attrs.kernel_size[0] == 3)\n and (attrs.dilation[0] != 1)\n and (attrs.groups != 1)\n and (attrs.channels == attrs.groups)\n ):\n return False\n\n data_typ = args[0].checked_type\n kernel_typ = args[1].checked_type\n is_depthwise = is_depthwise_conv2d(\n data_typ.shape,\n attrs[\"data_layout\"],\n kernel_typ.shape,\n attrs[\"kernel_layout\"],\n attrs[\"groups\"],\n )\n if attrs.groups != 1 and not is_depthwise:\n return False\n return True",
"def is_layering(layering):\n def filt(item):\n return filter_by_layering(item, layering)\n return filt",
"def is_norm(layer, exclude=None):\r\n if exclude is not None:\r\n if not isinstance(exclude, tuple):\r\n exclude = (exclude, )\r\n if not isinstance(exclude, tuple):\r\n raise TypeError(\r\n f'\"exclude\" must be either None or type or a tuple of types, '\r\n f'but got {type(exclude)}: {exclude}')\r\n\r\n if exclude and isinstance(layer, exclude):\r\n return False\r\n\r\n all_norm_bases = (_BatchNorm, _InstanceNorm, nn.GroupNorm, nn.LayerNorm)\r\n return isinstance(layer, all_norm_bases)",
"def _check_wcs_structure(self, wcs):\n if wcs is None:\n return False, \"WCS cannot be None.\"\n\n if not wcs.is_celestial:\n return False, \"WCS must be exclusively a celestial WCS.\"\n\n wcs = wcs.deepcopy()\n naxis1, naxis2 = wcs.pixel_shape\n\n # check mapping of corners and CRPIX:\n pts = np.array([[1.0, 1.0], [1.0, naxis2], [naxis1, 1.0],\n [naxis1, naxis2], wcs.wcs.crpix])\n\n sky_all = wcs.all_pix2world(pts, 1)\n foc_all = wcs.pix2foc(pts, 1)\n\n # strip all *known* distortions:\n wcs.cpdis1 = None\n wcs.cpdis2 = None\n wcs.det2im1 = None\n wcs.det2im2 = None\n wcs.sip = None\n\n # check that pix2foc includes no other distortions besides the ones\n # that we have turned off above:\n if not np.allclose(pts, wcs.pix2foc(pts, 1)):\n False, \"'pix2foc' contains unknown distortions\"\n\n wcs.wcs.set()\n\n # check that pix2foc contains all known distortions:\n if not np.allclose(wcs.all_world2pix(sky_all, 1), foc_all, atol=1e-3,\n rtol=0):\n return False, \"'WCS.pix2foc()' does not include all distortions.\"\n\n return True, ''",
"def test_addon_layer(self):\n from spirit.plone.theming.interfaces import ISpiritPloneThemingLayer\n self.assertIn(ISpiritPloneThemingLayer, registered_layers())",
"def check_shape(layer1, layer2, attr):\n attr1 = getattr(layer1, attr, None)\n attr2 = getattr(layer2, attr, None)\n if not attr1:\n return not attr2\n return all(attr1.shape.eval() == attr2.shape.eval())",
"def _match_layer(self, layer, pattern):\n\n if self.candidate_layers and \\\n layer['config']['name'] not in self.candidate_layers:\n return False\n\n if not self._match_pattern(layer['class_name'], pattern.class_name):\n return False\n\n layer_config = layer['config']\n for key, value in pattern.config.items():\n # Either the provided value should equal the config value, or\n # be a regex match to str(value).\n if not (self._match_pattern(str(layer_config.get(key)), str(value)) or \\\n layer_config.get(key) == value):\n return False\n\n return True",
"def is_wcsaxes(axes):\n return isinstance(axes, wcsaxes.WCSAxes)",
"def test_all_layer_types(self):\n\n\t\tdetails = self.watcher.describe()\n\t\t\n\t\tdenseLayers = details[details.layer_type==str(LAYER_TYPE.DENSE)]\n\t\tdenseCount = len(denseLayers)\n\t\tself.assertEqual(denseCount, 3, \"3 dense layers, but {} found\".format(denseCount))\t\t\n\t\n\t\tconv2DLayers = details[details.layer_type==str(LAYER_TYPE.CONV2D)]\n\t\tconv2DCount = len(conv2DLayers)\n\t\tself.assertEqual(conv2DCount, 8, \"8 conv2D layers, but {} found\".format(denseCount))",
"def is_wdl(schema_obj):\n\n if isinstance(schema_obj, schema.Field):\n if schema_obj.data_type == schema.Field.DataType.ENUM:\n return is_wdl(schema_obj.enum_type)\n elif schema_obj.data_type == schema.Field.DataType.STRUCT:\n return is_wdl(schema_obj.struct_type)\n else:\n wdl_prefixes = (\n 'wdl.',\n 'weave.common.',\n )\n return schema_obj.full_name.startswith(wdl_prefixes)",
"def find_content_layer(psd):\n for layer in psd.descendants():\n if layer.kind == 'smartobject':\n return layer\n\n return None",
"def is_under_main_root(self, workunit):\r\n return workunit.root() == self._main_root_workunit"
] | [
"0.69694364",
"0.6235193",
"0.6218354",
"0.593918",
"0.5923161",
"0.58760947",
"0.5860823",
"0.5807114",
"0.5681459",
"0.55938524",
"0.5574623",
"0.5512457",
"0.5449315",
"0.54430646",
"0.53812546",
"0.53716415",
"0.53391325",
"0.5313924",
"0.5287635",
"0.52501976",
"0.52482057",
"0.5234495",
"0.52324563",
"0.5230119",
"0.52080685",
"0.51624733",
"0.5129059",
"0.51270884",
"0.51226974",
"0.5075568"
] | 0.76703054 | 0 |
Return ows_metadata for a layer. | def ows_metadata(self, layer):
ows_metadata = {}
if layer.ows_metadata:
try:
# load JSON from ows_metadata
ows_metadata = json.loads(layer.ows_metadata)
except ValueError as e:
self.logger.warning(
"Invalid JSON in ows_metadata of layer %s: %s" %
(layer.name, e)
)
return ows_metadata | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def dataproduct_metadata(self, ows_layer, permissions, session):\n metadata = {}\n\n # type\n sublayers = None\n data_set_view = None\n searchterms = []\n if ows_layer.type == 'group':\n if ows_layer.name not in permissions.get('group_layers', []):\n # group layer not permitted\n return (metadata, searchterms)\n\n if ows_layer.facade:\n dataproduct_type = 'facadelayer'\n else:\n dataproduct_type = 'layergroup'\n\n # collect sub layers\n sublayers = []\n for group_layer in ows_layer.sub_layers:\n sub_layer = group_layer.sub_layer\n submetadata, subsearchterms = self.dataproduct_metadata(\n sub_layer, permissions, session\n )\n if submetadata:\n sublayers.append(submetadata)\n searchterms += subsearchterms\n\n if not sublayers:\n # sub layers not permitted, remove empty group\n return (metadata, searchterms)\n else:\n if ows_layer.name not in permissions.get('data_layers', []):\n # data layer not permitted\n return (metadata, searchterms)\n\n dataproduct_type = 'datasetview'\n # find matching DataSetView\n DataSetView = self.config_models.model('data_set_view')\n query = session.query(DataSetView).filter_by(name=ows_layer.name)\n data_set_view = query.first()\n\n contacts = self.dataproduct_contacts(ows_layer, session)\n datasource = self.dataproduct_datasource(ows_layer, session)\n wms_datasource = self.dataproduct_wms(ows_layer, session)\n ows_metadata = self.ows_metadata(ows_layer)\n description = ows_metadata.get('abstract')\n\n # qml\n qml = None\n if ows_layer.type == 'data':\n qml = ows_layer.client_qgs_style or ows_layer.qgs_style\n # embed any uploaded symbols in QML\n qml = self.update_qml(qml)\n\n metadata = {\n 'identifier': ows_layer.name,\n 'display': ows_layer.title,\n 'type': dataproduct_type,\n 'synonyms': self.split_values(ows_layer.synonyms),\n 'keywords': self.split_values(ows_layer.keywords),\n 'description': description,\n 'contacts': contacts,\n 'wms_datasource': wms_datasource,\n 'qml': qml,\n 'sublayers': sublayers\n }\n if data_set_view:\n if data_set_view.facet:\n metadata.update({\n 'searchterms': [data_set_view.facet]\n })\n searchterms.append(data_set_view.facet)\n elif len(searchterms) > 0:\n metadata.update({\n 'searchterms': searchterms\n })\n metadata.update(datasource)\n\n return (metadata, searchterms)",
"def get_metadata(self):\n return gdal.Open(self.filename).GetMetadata()",
"def get_metadata_for(layer_index):\n try:\n layer = CatalogLayer.objects.get(id=layer_index)\n meta = layer.metadata\n except CatalogLayer.DoesNotExist:\n return {'success': 'false', 'message':\n '{0} is not a valid index for CatalogLayer'.format(layer_index)}\n except LayerMeta.DoesNotExist:\n return {'success': 'false', 'message':\n 'No metadata found for CatalogLayer {0}'.format(layer_index)}\n # fixme: is 'requested' actually useful?\n return {'success': 'true', 'requested': layer.serialize(),\n 'data': meta.serialize()}",
"def get_metadata(self):\n return self._metadata",
"def dataproduct_wms(self, ows_layer, session):\n wms_datasource = None\n\n # get WMS root layer\n root_layer = None\n WmsWfs = self.config_models.model('wms_wfs')\n query = session.query(WmsWfs).filter_by(ows_type='WMS')\n # eager load relation\n query = query.options(\n joinedload(WmsWfs.root_layer)\n )\n wms_wfs = query.first()\n if wms_wfs is not None:\n root_layer = wms_wfs.root_layer\n\n if self.layer_in_ows(ows_layer, root_layer):\n wms_datasource = {\n 'service_url': WMS_SERVICE_URL,\n 'name': ows_layer.name\n }\n\n return wms_datasource",
"def get_metadata(self):\n self.metadata = Metadata()\n document = openxmllib.openXmlDocument(path=self.path)\n self.metadata.add(document.allProperties, \"ooxml\")\n return self.metadata",
"def get_metadata(self) -> DeepDict:\n metadata = get_default_nwbfile_metadata()\n for interface in self.data_interface_objects.values():\n interface_metadata = interface.get_metadata()\n metadata = dict_deep_update(metadata, interface_metadata)\n return metadata",
"def get_metadata(self, scope, name, *, session: \"Session\"):\n if not json_implemented(session=session):\n raise NotImplementedError\n\n try:\n row = session.query(models.DidMeta).filter_by(scope=scope, name=name).one()\n meta = getattr(row, 'meta')\n return json_lib.loads(meta) if session.bind.dialect.name in ['oracle', 'sqlite'] else meta\n except NoResultFound:\n return {}",
"def get_metadata(self):\n return {}",
"def dataproduct_datasource(self, ows_layer, session):\n metadata = {}\n\n if ows_layer.type == 'group':\n # group layer\n return metadata\n\n data_set = ows_layer.data_set_view.data_set\n data_source = data_set.data_source\n if data_source.connection_type == 'database':\n # vector DataSet\n\n # get table metadata\n postgis_datasource = None\n pg_metadata = self.dataset_info(\n data_source.gdi_oid, data_set.data_set_name\n )\n if 'error' not in pg_metadata:\n data_set_name = \"%s.%s\" % (\n pg_metadata.get('schema'), pg_metadata.get('table')\n )\n\n primary_key = pg_metadata.get('primary_key')\n if primary_key is None:\n # get primary key if view\n primary_key = data_set.primary_key\n\n geom = {}\n if len(pg_metadata.get('geometry_columns')) > 1:\n used_col = ows_layer.data_set_view.geometry_column\n for geom_col in pg_metadata.get('geometry_columns'):\n # get used geometry column if multiple\n if geom_col.get('geometry_column') == used_col:\n geom = geom_col\n break\n elif len(pg_metadata.get('geometry_columns')) == 1:\n # use sole geometry column\n geom = pg_metadata.get('geometry_columns')[0]\n\n postgis_datasource = {\n 'dbconnection': data_source.connection,\n 'data_set_name': data_set_name,\n 'primary_key': primary_key,\n 'geometry_field': geom.get('geometry_column'),\n 'geometry_type': geom.get('geometry_type'),\n 'srid': geom.get('srid')\n }\n else:\n # show error message\n postgis_datasource = {\n 'error': pg_metadata.get('error')\n }\n\n metadata = {\n 'bbox': DEFAULT_EXTENT,\n 'crs': 'EPSG:2056',\n 'datatype': 'vector',\n 'postgis_datasource': postgis_datasource\n }\n else:\n # raster DataSet\n\n # modify connection dir\n connection = re.sub(\n RASTER_DATASOURCE_PATTERN, RASTER_DATASOURCE_REPL,\n data_source.connection\n )\n # TODO: get srid\n srid = 'EPSG:2056'\n metadata = {\n 'datatype': 'raster',\n 'raster_datasource': {\n 'datasource': connection + data_set.data_set_name,\n 'srid': srid\n }\n }\n\n return metadata",
"def metadata(self):\n if self._open is not None:\n self._init_metadata()\n return self._metadata[self._metadata_root]\n else:\n return None",
"def get_metadata(self):\n session_path = Path(self.source_data['folder_path'])\n session_id = session_path.stem\n metadata = NeuroscopeRecordingInterface.get_ecephys_metadata(\n xml_file_path=str((session_path / f\"{session_id}.xml\").absolute())\n )\n metadata.update(UnitProperties=[])\n return metadata",
"def get_metadata(self):\n return self.client._perform_json(\n \"GET\", \"/projects/%s/recipes/%s/metadata\" % (self.project_key, self.recipe_name))",
"def get_metadata(self):\n return self.manager.get_metadata(self)",
"def getMetadata(self):\n metadata = {}\n document_properties = self.document_loaded.getDocumentProperties()\n user_defined_properties = document_properties.getUserDefinedProperties()\n for container in [document_properties, user_defined_properties]:\n for property_name in dir(container):\n if property_name in ('SupportedServiceNames',):\n continue\n property_value = getattr(container, property_name, '')\n if property_value:\n if isinstance(property_value, basestring):\n metadata[property_name] = property_value\n elif isinstance(property_value, tuple) and isinstance(property_value[0], basestring):\n metadata[property_name] = property_value\n else:\n try:\n if property_value.typeName == 'com.sun.star.util.DateTime':\n # It is a local time and we have no timezone information.\n datetime = \"%02d/%02d/%04d %02d:%02d:%02d\" % (property_value.Day, property_value.Month,\n property_value.Year, property_value.Hours, property_value.Minutes, property_value.Seconds)\n metadata[property_name] = datetime\n except AttributeError:\n pass\n\n service_manager = helper_util.getServiceManager(self.hostname, self.port,\n self.uno_path,\n self.office_binary_path)\n type_detection = service_manager.createInstance(\"com.sun.star.document.TypeDetection\")\n uno_file_access = service_manager.createInstance(\"com.sun.star.ucb.SimpleFileAccess\")\n doc = uno_file_access.openFileRead(self.systemPathToFileUrl(self.document_url))\n input_stream = self._createProperty(\"InputStream\", doc)\n open_new_view = self._createProperty(\"OpenNewView\", True)\n filter_name = type_detection.queryTypeByDescriptor((input_stream,\n open_new_view), True)[0]\n doc.closeInput()\n metadata['MIMEType'] = mimemapper[\"mimetype_by_filter_type\"].get(filter_name)\n return metadata",
"def metadata(self):\r\n return self._metadata",
"def _metadata(self) -> Dict[str, Any]:\n return self.__metadata",
"def metadata(self):\n raise NotImplementedError('yet')\n return OntGraphMetadata(self)",
"def metadata(self) -> global___SummaryMetadata:",
"def metadata(self) -> dict:\n return self._metadata",
"def metadata(self) -> 'outputs.DataCollectionEndpointResponseMetadata':\n return pulumi.get(self, \"metadata\")",
"def metadata(self):\n return self._metadata",
"def metadata(self):\n return self._metadata",
"def metadata(self):\n return self._metadata",
"def metadata(self):\n return self._metadata",
"def metadata(self):\n return self._metadata",
"def get_metadata(self,\n params: typing.Optional[typing.Mapping[str, str]] = None):\n raise NotImplementedError('This data connector does not provide metadata')",
"def _get_obs_metadata(self, cube):\n time_coord = cube.coord(self.time_coord)\n dates = [cube_time_converter(time, time_coord.units) \n for time in time_coord.points]\n \n area_bounds = self._area_inst.get_cube_area_bounds(cube, \n self.xy_coords)\n x_bounds = [area_bounds[self._area_inst.x_min], \n area_bounds[self._area_inst.x_max]]\n y_bounds = [area_bounds[self._area_inst.y_min], \n area_bounds[self._area_inst.y_max]]\n \n metadata = {} \n metadata['VARIABLE'] = cube.name()\n metadata['UNITS'] = str(cube.units)\n metadata['DATES'] = dates\n metadata[self.xy_coords[0].upper()+'_BOUNDS'] = x_bounds\n metadata[self.xy_coords[-1].upper()+'_BOUNDS'] = y_bounds\n \n # Find additional coordinates in cube and add them to metadata.\n for coord in cube.coords():\n if coord.name() not in self.unwanted_coords and \\\n coord.name() not in self._required_coords and \\\n coord.name() not in self.xy_coords:\n metadata[coord.name().upper()] = coord.points\n \n return metadata",
"def get_metadata(self):\n return copy.copy(self.metadata)",
"def _get_metadata(conn):\n metadata_sql = \"SELECT * FROM metadata\"\n metadata = conn.execute(metadata_sql).fetchall()\n return {\n row[0]: {\n \"name\": row[0],\n \"field_name\": row[1],\n \"field_expr\": row[1],\n \"type\": row[2],\n \"minimum\": row[3],\n \"maximum\": row[4],\n \"average\": row[5],\n \"variance\": row[6],\n \"total\": row[7],\n \"stddev\": row[8],\n \"other\": json.loads(row[9]) if row[9] else None,\n }\n for row in metadata\n }"
] | [
"0.6738899",
"0.62456894",
"0.59765214",
"0.56073684",
"0.5586877",
"0.5581771",
"0.5532094",
"0.5492781",
"0.54767776",
"0.54743665",
"0.5456338",
"0.54506356",
"0.54184407",
"0.53791165",
"0.5372126",
"0.53449667",
"0.53406215",
"0.5337875",
"0.5326182",
"0.5314207",
"0.5306864",
"0.5289555",
"0.5289555",
"0.5289555",
"0.5289555",
"0.5289555",
"0.5259639",
"0.5255813",
"0.5234465",
"0.5232396"
] | 0.81569564 | 0 |
Split comma separated values into list. | def split_values(self, value):
if value:
return [s.strip() for s in value.split(',')]
else:
return [] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_by_comma(s):\n return s.strip().split(\",\")",
"def separate_comma(s):\n return s.split(',')",
"def to_python(self, value):\n # Return an empty list if no input was given.\n if not value:\n return []\n return value.split(',')",
"def pure_list(comma_list):\n pure_items = []\n for comma_item in comma_list:\n for item in comma_item.split(','):\n pure_items.append(item)\n return pure_items",
"def split(self, text):\n\n return [x.strip() for x in text.split(\",\")]",
"def stringInputToList(x):\n return list(filter(None, [y.strip() for y in x.split(',')]))",
"def split_value(string):\n split = string.split(',')\n result = []\n\n level = 0\n buf = []\n for entry in split:\n level += entry.count('(')\n level -= entry.count(')')\n\n buf.append(entry)\n if level == 0:\n result.append(','.join(buf))\n buf = []\n return result",
"def split_values(value):\n try:\n result = dtype([conv(x) for x in value.split(',')])\n except:\n raise argparse.ArgumentTypeError('Expect comma-separated tuple')\n\n if num_items and len(result) != num_items:\n raise argparse.ArgumentTypeError('Expected {} items'.format(num_items))\n\n return result",
"def _providers_string_to_list(val):\n # Use a set to remove duplicates\n if type(val) == str:\n return list(set(val.replace(' ', '').split(',')))\n return list(set(val))",
"def getlist(x, y):\n return get(x, y).split(',')",
"def values(line):\n return [v.strip() or None for v in text(line).split(',')]",
"def split_device_list(devices: str) -> list:\n return devices.split(\",\")",
"def _convert_str_to_list(cls, v: Union[List[str], str]) -> List[str]:\n if isinstance(v, str):\n return v.split(\",\")\n return v # cov: ignore",
"def strToList(x):\n if type(x)==str:\n return [int(i) for i in x[1:-1].split(\", \")]",
"def convert_comma_separated_str_to_list(input_str: str, trim: bool = True) -> List[str]:\n comma_separated_str = input_str.strip() if trim else input_str\n if not comma_separated_str:\n return []\n\n result = []\n for part_str in comma_separated_str.split(\",\"):\n value = part_str\n if trim:\n value = value.strip()\n if not value:\n continue\n result.append(value)\n return result",
"def from_csv_line(line):\r\n return line.strip().split(',')",
"def comma_list(s):\n\n return tuple(int(v) for v in s.split(\",\"))",
"def _split_parameters(self, parameters):\n if not parameters:\n return []\n return [parameter.strip() for parameter in parameters.split(', ')]",
"def split_choices(choices_string):\n return [x.strip() for x in choices_string.split(\",\") if x.strip()]",
"def split_name_values(param_items):\n return_list = list()\n for single_item in param_items:\n temp_list = [single_item[1]]\n temp_list.extend(clear_useless_end(single_item[2]).split(\",\"))\n return_list.append(temp_list)\n\n return return_list",
"def split_line(line: str) -> [str]:\n return line.strip().split(',')",
"def split(a):\r\n compos = [-1] # compos stores the positions of the relevant commas in the argument string\r\n compos.extend(t[2][1] for t in generate_tokens(StringIO(a).readline) if t[1] == ',')\r\n compos.append(len(a))\r\n return [ a[compos[i]+1:compos[i+1]] for i in xrange(len(compos)-1)]",
"def parse_comma_separated_list(\n value: str, regexp: Pattern[str] = COMMA_SEPARATED_LIST_RE\n) -> list[str]:\n assert isinstance(value, str), value\n\n separated = regexp.split(value)\n item_gen = (item.strip() for item in separated)\n return [item for item in item_gen if item]",
"def strToList(x):\n if type(x)==str:\n return x[2:-2].split(\"', '\")",
"def parse_csv_option(option):\n if option:\n return option.split(',')\n else:\n return []",
"def parse_csv_option(option):\n if option:\n return option.split(',')\n else:\n return []",
"def SplitValue(self, value):\n\n # Be lenient about an empty string.\n if value == \"\":\n return (\"\", \"\")\n # Break it in half.\n elements = string.split(value, \";\", 1)\n # Unescape semicolons in both halves.\n elements = map(lambda e: string.replace(e, r\"\\;\", \";\"), elements) \n return elements",
"def SplitValue(self, value):\n\n # Be lenient about an empty string.\n if value == \"\":\n return (\"\", \"\")\n # Break it in half.\n elements = string.split(value, \";\", 1)\n # Unescape semicolons in both halves.\n elements = map(lambda e: string.replace(e, r\"\\;\", \";\"), elements) \n return elements",
"def split_line(line):\n if ',' in line:\n return [a.strip() for a in line.split(',')]\n return line.split()",
"def convert_string_to_list(string_val):\n result_list = []\n\n list_string = string_val.split(',')\n for val in list_string:\n val = str(val.strip())\n val = val.replace(\"(\", \"\")\n val = val.replace(\")\", \"\")\n val = val.replace(\"L\", \"\")\n val = val.replace(\"[\", \"\")\n val = val.replace(\"]\", \"\")\n if val not in (\"\", \"None\"):\n result_list.append(int(val))\n\n return result_list"
] | [
"0.78025526",
"0.76226556",
"0.7475591",
"0.72795135",
"0.7144818",
"0.69944984",
"0.69677824",
"0.6933669",
"0.6908274",
"0.68442553",
"0.6830242",
"0.6824286",
"0.67987007",
"0.67562747",
"0.6737418",
"0.6710399",
"0.67035455",
"0.66600406",
"0.6642529",
"0.6638843",
"0.66069144",
"0.6580775",
"0.65747386",
"0.6553147",
"0.6523621",
"0.6523621",
"0.64756656",
"0.64756656",
"0.64709073",
"0.646057"
] | 0.8507184 | 0 |
Update QML with embedded symbols. | def update_qml(self, qml):
if qml is None:
return qml
try:
# parse XML
root = ElementTree.fromstring(qml)
# embed symbols
self.embed_qml_symbols(root, 'SvgMarker', 'name')
self.embed_qml_symbols(root, 'SVGFill', 'svgFile')
self.embed_qml_symbols(root, 'RasterFill', 'imageFile')
# return updated QML
qml = ElementTree.tostring(
root, encoding='utf-8', method='xml'
)
return qml.decode()
except Exception as e:
self.logger.warning(
"Could not embed QML symbols:\n%s" % e
)
return qml | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def embed_qml_symbols(self, root, layer_class, prop_key):\n for svgprop in root.findall(\".//layer[@class='%s']/prop[@k='%s']\" %\n (layer_class, prop_key)):\n symbol_path = svgprop.get('v')\n path = os.path.abspath(\n os.path.join(QGS_RESOURCES_DIR, symbol_path)\n )\n\n # NOTE: assume symbols not included in ZIP are default symbols\n if os.path.exists(path):\n try:\n # read symbol data and convert to base64\n with open(path, 'rb') as f:\n symbol_data = base64.b64encode(f.read())\n\n # embed symbol in QML\n svgprop.set('v', \"base64:%s\" % symbol_data.decode())\n self.logger.info(\"Embed symbol in QML: %s\" % symbol_path)\n except Exception as e:\n self.logger.warning(\n \"Could not embed QML symbol %s:\\n%s\" % (symbol_path, e)\n )",
"def updateGraph(self, symbol=None):\n if symbol is None:\n return\n\n # Get all stock data back for the given symbol\n self.stock_data = self.db.queryAllData(table_name=symbol)\n\n # Create a list of prices and a list of dates\n self.prices = [x[1].strip('$') for x in self.stock_data]\n self.dates = [x[0] for x in self.stock_data]\n date_string = [x.strftime(\"%m/%d/%Y\") for x in self.dates]\n self.x = [datetime.datetime.strptime(d, '%m/%d/%Y').date()\n for d in date_string]\n\n # Create an instance of QtMpl\n self.mpl = self.central.mpl\n self.mpl.addLine(x=self.x, y=self.prices, title=symbol)",
"def setup_ui(self):\n\n self.setWindowTitle(\"PyDM Symbol Widget Editor\")\n vlayout = QtWidgets.QVBoxLayout()\n vlayout.setContentsMargins(5, 5, 5, 5)\n vlayout.setSpacing(5)\n self.setLayout(vlayout)\n\n hlayout = QtWidgets.QHBoxLayout()\n hlayout.setContentsMargins(0, 0, 0, 0)\n hlayout.setSpacing(5)\n vlayout.addLayout(hlayout)\n\n # Creating the widgets for the buttons to add and\n # remove symbols\n list_frame = QtWidgets.QFrame(parent=self)\n list_frame.setMinimumHeight(300)\n list_frame.setMinimumWidth(300)\n list_frame.setLineWidth(1)\n list_frame.setFrameShadow(QtWidgets.QFrame.Raised)\n list_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\n lf_layout = QtWidgets.QVBoxLayout()\n list_frame.setLayout(lf_layout)\n\n lf_btn_layout = QtWidgets.QHBoxLayout()\n lf_btn_layout.setContentsMargins(0, 0, 0, 0)\n lf_btn_layout.setSpacing(5)\n\n self.btn_add_symbol = QtWidgets.QPushButton(parent=self)\n self.btn_add_symbol.setAutoDefault(False)\n self.btn_add_symbol.setDefault(False)\n self.btn_add_symbol.setText(\"Add Symbol\")\n self.btn_add_symbol.clicked.connect(self.add_symbol)\n\n self.btn_del_symbol = QtWidgets.QPushButton(parent=self)\n self.btn_del_symbol.setAutoDefault(False)\n self.btn_del_symbol.setDefault(False)\n self.btn_del_symbol.setText(\"Remove Symbol\")\n self.btn_del_symbol.clicked.connect(self.del_symbol)\n\n lf_btn_layout.addWidget(self.btn_add_symbol)\n lf_btn_layout.addWidget(self.btn_del_symbol)\n\n lf_layout.addLayout(lf_btn_layout)\n\n # Table containing the state/filename pairs which\n # will display the different symbols\n self.tbl_symbols = QtWidgets.QTableWidget()\n self.tbl_symbols.setShowGrid(True)\n self.tbl_symbols.setCornerButtonEnabled(False)\n headers = [\"State\", \"File\"]\n self.tbl_symbols.setColumnCount(len(headers))\n self.tbl_symbols.setHorizontalHeaderLabels(headers)\n header = self.tbl_symbols.horizontalHeader()\n header.setSectionResizeMode(0, QtWidgets.QHeaderView.ResizeToContents)\n header.setSectionResizeMode(1, QtWidgets.QHeaderView.Stretch)\n self.tbl_symbols.itemSelectionChanged.connect(self.load_from_list)\n self.tbl_symbols.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)\n self.tbl_symbols.setSelectionMode(QtWidgets.QAbstractItemView.SingleSelection)\n self.tbl_symbols.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)\n self.tbl_symbols.verticalHeader().setVisible(False)\n lf_layout.addWidget(self.tbl_symbols)\n\n hlayout.addWidget(list_frame)\n\n # Buttons to save or cancel changes made\n buttons_layout = QtWidgets.QHBoxLayout()\n save_btn = QtWidgets.QPushButton(\"Save\", parent=self)\n save_btn.setAutoDefault(False)\n save_btn.setDefault(False)\n save_btn.clicked.connect(self.saveChanges)\n cancel_btn = QtWidgets.QPushButton(\"Cancel\", parent=self)\n cancel_btn.setAutoDefault(False)\n cancel_btn.setDefault(False)\n cancel_btn.clicked.connect(self.cancelChanges)\n buttons_layout.addStretch()\n buttons_layout.addWidget(cancel_btn)\n buttons_layout.addWidget(save_btn)\n\n vlayout.addLayout(buttons_layout)\n\n # Creating the widgets that we will use to compose the\n # symbol parameters\n self.frm_edit = QtWidgets.QFrame()\n self.frm_edit.setEnabled(False)\n self.frm_edit.setLineWidth(1)\n self.frm_edit.setFrameShadow(QtWidgets.QFrame.Raised)\n self.frm_edit.setFrameShape(QtWidgets.QFrame.StyledPanel)\n\n frm_edit_layout = QtWidgets.QVBoxLayout()\n self.frm_edit.setLayout(frm_edit_layout)\n\n hlayout.addWidget(self.frm_edit)\n\n edit_name_layout = QtWidgets.QFormLayout()\n edit_name_layout.setFieldGrowthPolicy(QtWidgets.QFormLayout.ExpandingFieldsGrow)\n lbl_state = QtWidgets.QLabel(\"State:\")\n self.txt_state = QtWidgets.QLineEdit()\n self.txt_state.editingFinished.connect(self.state_changed)\n edit_name_layout.addRow(lbl_state, self.txt_state)\n lbl_file = QtWidgets.QLabel(\"File:\")\n self.txt_file = QtWidgets.QLineEdit()\n self.txt_file.textEdited.connect(self.file_changed)\n self.txt_file.returnPressed.connect(self.file_changed)\n edit_name_layout.addRow(lbl_file, self.txt_file)\n\n self.lbl_image = QtWidgets.QLabel()\n self.lbl_image.setWordWrap(True)\n self.lbl_image.setAlignment(Qt.AlignCenter)\n edit_name_layout.addRow(self.lbl_image)\n\n frm_edit_layout.addLayout(edit_name_layout)",
"def update_Q(self):",
"def Main():\n# print(dir(pyopenjtalk))\n\n# x, sr = pyopenjtalk.tts('なにか喋ります。', weight_f0=0.7)\n# ply = sa.play_buffer(x.astype(numpy.int16), 1, 2, sr)\n# ply.wait_done()\n# pyopenjtalk.tts('おめでとうございます。')\n# pyopenjtalk.synthesize(pyopenjtalk.extract_fullcontext('ありがとうございます。'))\n\n app = QApplication(sys.argv)\n connect = Connect()\n engine = QQmlApplicationEngine()\n ctx = engine.rootContext()\n ctx.setContextProperty(\"Connect\", connect)\n HERE = os.path.dirname(os.path.abspath(__file__))\n UI = os.path.join(HERE, 'talker.qml')\n# engine.load(UI)\n engine.load(QUrl(UI))\n if not engine.rootObjects(): sys.exit(-1)\n sys.exit(app.exec_())\n \"\"\"\n \"\"\"\n\n \"\"\"\n app = QApplication(sys.argv)\n view = QQuickView()\n HERE = os.path.dirname(os.path.abspath(__file__))\n UI = os.path.join(HERE, 'talker.qml')\n url = QUrl(UI)\n view.setSource(url)\n view.show()\n sys.exit(app.exec_())\n \"\"\"",
"def qml(self):\n return self.default_style(\"QML\")",
"def updateUI(self, updateRenderSetup=False):\n\n global rsUtility\n global currentSelection\n global propertyOverridesMode\n\n shaderUtility.update()\n\n q.getQItem(windowID, QtWidgets.QWidget)\n q.widget.setUpdatesEnabled(False) # Pause qt draw temporarily\n\n self.gwCustomRenamer.updateUI(updateWindow=False)\n\n # Update Render layer Setup\n\n if updateRenderSetup is True:\n if rsUtility.activeLayer.needsRefresh():\n rsUtility.activeLayer.apply()\n\n # Housekeeping:\n\n rsUtility.removeMissingSelections()\n\n # Reapply custom QT style:\n\n windowStyle.apply(windowStyle)\n\n # #############################################\n # Active/Visible Render Layer\n\n listItem = []\n currentName = \\\n renderSetup.instance().getVisibleRenderLayer().name()\n for l in renderSetup.instance().getRenderLayers():\n listItem.append(l.name())\n\n q.getQItem('%s_selectVisibleLayer' % windowID,\n QtWidgets.QWidget)\n\n resetOptionMenu(q.fullPath, util.natsort(listItem), rl=True)\n selectOptionMenuItem(q.fullPath, currentName)\n\n # #############################################\n # Active/Visible Render Layer\n\n listItem = []\n currentName = rsUtility.activeLayer.name()\n for l in renderSetup.instance().getRenderLayers():\n listItem.append(l.name())\n\n q.getQItem('%s_selectActiveLayer' % windowID, QtWidgets.QWidget)\n\n resetOptionMenu(q.fullPath, util.natsort(listItem), rl=True)\n selectOptionMenuItem(q.fullPath, currentName)\n\n # #################\n # Button\n\n if cmds.optionMenu(q.fullPath, q=True, value=True) \\\n == rsUtility.defaultName:\n q.getQItem('rsAddCollection', QtWidgets.QWidget)\n cmds.button(q.fullPath, edit=True, enable=False)\n q.getQItem('rsRemoveCollection', QtWidgets.QWidget)\n cmds.button(q.fullPath, edit=True, enable=False)\n else:\n q.getQItem('rsAddCollection', QtWidgets.QWidget)\n cmds.button(q.fullPath, edit=True, enable=True)\n q.getQItem('rsRemoveCollection', QtWidgets.QWidget)\n cmds.button(q.fullPath, edit=True, enable=True)\n\n # #############################################\n # Collections\n\n customStrings = []\n cleanList = []\n q.getQItem('%s_ShaderScrollList' % windowID, QtWidgets.QWidget)\n cmds.textScrollList(q.fullPath, edit=True, removeAll=True)\n\n def _spacer(inString):\n num = int(30 - len(inString))\n if num > 0:\n\n # return util.addChars(' ', num)\n\n return ' '\n else:\n return ' '\n\n # Loop through shader list\n\n for shaderName in shaderUtility.data.keys():\n c = rsUtility.activeLayer.collection(\n shaderName.replace(':', '_'), isQuery=True)\n\n # Mark item as inactive if not in the collections list\n\n if c is None:\n\n # Set the custom string of the shader.\n # The custom string used by the Qt delegate for custom display and to indicate if the item is active or inactive.\n\n shaderUtility.data[shaderName]['customString'] = \\\n '%s%s%s)' % (shaderName, ' ', '('\n + str(len(shaderUtility.data[shaderName]['usedBy'\n ])))\n else:\n\n # Mark item as active if in the collections list\n\n # Get current override values\n\n for (index, item) in \\\n enumerate(rsUtility.overrideAttributes):\n try:\n\n rsUtility.overrideAttributes[index][item['default'\n ]] = c.getOverrideValue(item['long'])\n except:\n print '# Couldn\\'t get attribute value for ' \\\n + item['long'] + '.'\n\n def _get(item):\n val = c.getOverrideValue(item['long'])\n if val is None:\n return ''\n else:\n return item['custom'][1 - val]\n\n # Add warning if usedBy doesn't match collection selection\n\n WARNING = ''\n if c.selection.asList() \\\n != list(shaderUtility.data[shaderName]['usedBy']):\n WARNING = '!!'\n SHADER_OVERRIDE = ''\n if _hasOverride(shaderName):\n SHADER_OVERRIDE = '#'\n shaderUtility.data[shaderName]['customString'] = \\\n '%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s%s' % (\n ACTIVEITEM_PREFIX,\n shaderName,\n _spacer(ACTIVEITEM_PREFIX + shaderName),\n _get(rsUtility.overrideAttributes[5]),\n _get(rsUtility.overrideAttributes[0]),\n _get(rsUtility.overrideAttributes[1]),\n _get(rsUtility.overrideAttributes[2]),\n _get(rsUtility.overrideAttributes[3]),\n _get(rsUtility.overrideAttributes[4]),\n _get(rsUtility.overrideAttributes[6]),\n _get(rsUtility.overrideAttributes[7]),\n _get(rsUtility.overrideAttributes[8]),\n _get(rsUtility.overrideAttributes[9]),\n _get(rsUtility.overrideAttributes[10]),\n str(len(shaderUtility.data[shaderName]['usedBy'])),\n WARNING,\n SHADER_OVERRIDE,\n )\n customStrings.append(shaderUtility.data[shaderName]['customString'\n ])\n cleanList.append(shaderName)\n\n q.getQItem('%s_filterShaderList' % windowID, QtWidgets.QWidget)\n filter = cmds.textField(q.fullPath, query=True, text=True)\n filteredList = []\n if filter != '<Lights>' and filter != '<Environment>' \\\n and filter != '<Shaders>' and filter != '<StandIns>':\n filteredList = [s for s in customStrings if filter.lower()\n in s.lower()]\n else:\n if filter == '<Lights>':\n filteredList = [s for s in customStrings\n if shaderUtility.data[shaderUtility.customStringToShaderName(s)]['light'\n ]]\n if filter == '<Environment>':\n filteredList = [s for s in customStrings\n if shaderUtility.data[shaderUtility.customStringToShaderName(s)]['environment'\n ]]\n if filter == '<Shaders>':\n filteredList = [s for s in customStrings\n if shaderUtility.data[shaderUtility.customStringToShaderName(s)]['shader'\n ]]\n if filter == '<StandIns>':\n filteredList = [s for s in customStrings\n if shaderUtility.data[shaderUtility.customStringToShaderName(s)]['standIn'\n ]]\n\n q.getQItem('%s_ShaderScrollList' % windowID, QtWidgets.QWidget)\n\n for item in util.natsort(filteredList, filterOn=True):\n cmds.textScrollList(q.fullPath, edit=True, append=item)\n\n # Re-Set selected items from saved selection.\n\n matches = set([])\n\n if currentSelection is not None:\n matches = set(currentSelection).intersection(set(cleanList))\n for match in matches:\n cmds.textScrollList(q.fullPath, edit=True,\n selectItem=shaderUtility.data[match]['customString'\n ])\n\n # Set height\n\n _setTextScrollListVisibleItemNumber()\n\n # Style scrollist\n\n numItems = len(filteredList)\n windowStyle.apply(windowStyle)\n\n # Checkboxes\n\n propertyOverridesMode = setPropertyOverridesMode()\n\n # Shader Overrides\n\n listItem = []\n menuName = '%s_optionMenu02' % windowID\n for item in SHADER_OVERRIDE_OPTIONS:\n listItem.append(item['ui'])\n resetOptionMenu(menuName, listItem, rl=False)\n setShaderOverrideMode()\n\n # #############################################\n # Filter list\n\n resetOptionMenu('rsShaderGroups',\n util.natsort(shaderUtility.getShaderGroups().keys()),\n rl=False)\n filterListText = cmds.textField('%s_filterShaderList'\n % windowID, query=True, text=True)\n selectOptionMenuItem('rsShaderGroups', filterListText, rl=False)\n\n # ############################################\n # Render output templates\n # Output format\n\n listItem = []\n menuName = '%s_optionMenu03' % windowID\n for item in renderOutput.SIZE_TEMPLATE:\n listItem.append(item['ui'])\n resetOptionMenu(menuName, listItem, rl=False)\n\n # Check current resolution\n\n currentWidth = cmds.getAttr('%s.width'\n % renderOutput.RESOLUTION_NODE)\n currentHeight = cmds.getAttr('%s.height'\n % renderOutput.RESOLUTION_NODE)\n\n # Check if the current list corresponds to any of the predefined sizes\n\n current = [w for w in renderOutput.SIZE_TEMPLATE\n if currentWidth == w['width'] and currentHeight\n == w['height']]\n if current:\n selectOptionMenuItem(menuName, current[0]['ui'])\n\n _outputTemplate()\n\n # Playback speed\n # Populate list\n\n listItem = []\n menuName = '%s_optionMenu06' % windowID\n for item in renderOutput.TIME_TEMPLATE:\n listItem.append(item['ui'])\n resetOptionMenu(menuName, listItem, rl=False)\n\n # Get current option\n\n currentTime = cmds.currentUnit(query=True, time=True)\n current = [t for t in renderOutput.TIME_TEMPLATE if currentTime\n == t['name']]\n if current:\n selectOptionMenuItem('%s_optionMenu06' % windowID,\n current[0]['ui'])\n\n # In and out frames:\n\n cmds.textField('%s_setInFrame' % windowID, edit=True,\n text=int(cmds.getAttr('defaultRenderGlobals.startFrame'\n )))\n cmds.textField('%s_setOutFrame' % windowID, edit=True,\n text=int(cmds.getAttr('defaultRenderGlobals.endFrame'\n )))\n\n q.getQItem(windowID, QtWidgets.QWidget)\n q.widget.setUpdatesEnabled(True) # Pause qt draw temporarily",
"def setSymbolProps(self, name, symbol):\r\n self.symbolProps = autosar.base.SymbolProps( str(name), str(symbol))",
"def update_from_tags():\n tags.update_diagrams()\n tags.update_tiles()",
"def _update_repr_dict(self):\n self._remote_call('request_repr_dict', target='Widget')",
"def refresh_all(self):\n\t\t\n\t\tself.symbolsList.set_datasource(self.source)\n\t\tself.symbolsList.refresh()\n\t\t\n\t\tself.plotFrame.set_datasource(self.source)\n\t\tself.plotFrame.refresh()",
"def updateWidget(self):\n pass",
"def set_symbols(self, symboltable: dict):\n\n for index in range(1, self.symbol_layout.rowCount()):\n self.symbol_layout.removeRow(index)\n\n font = QFont('Fira Code', 8, QFont.Medium)\n for entry in symboltable:\n symbol = QLineEdit()\n symbol.setReadOnly(True)\n symbol.setText(entry)\n symbol.setFont(font)\n address = QLineEdit()\n address.setReadOnly(True)\n address.setFont(font)\n address.setText(str(symboltable[entry]))\n self.symbol_layout.addRow(address, symbol)",
"def refresh(self):\n\n assets_model = self.data[\"model\"][\"assets\"]\n assets_model.clear()\n\n has = {\"children\": False}\n\n project = io.ObjectId(os.environ[\"MINDBENDER__PROJECT\"])\n assets = io.find({\"type\": \"asset\", \"parent\": project})\n for asset in sorted(assets, key=lambda i: i[\"name\"]):\n item = QtWidgets.QListWidgetItem(asset[\"name\"])\n item.setData(QtCore.Qt.ItemIsEnabled, True)\n item.setData(DocumentRole, asset)\n assets_model.addItem(item)\n has[\"children\"] = True\n\n if not has[\"children\"]:\n item = QtWidgets.QListWidgetItem(\"No assets found\")\n item.setData(QtCore.Qt.ItemIsEnabled, False)\n assets_model.addItem(item)\n\n assets_model.setFocus()\n assets_model.setCurrentRow(0)\n self.data[\"button\"][\"load\"].hide()\n self.data[\"button\"][\"stop\"].hide()",
"def embedded(self, embedded):\n self._embedded = embedded",
"def __init__(self, main_app):\r\n\r\n # run the init of QMainWindow\r\n super().__init__()\r\n\r\n # create list for label elements\r\n self.list_labels = []\r\n\r\n # save a reference to the main_app\r\n self.main_application = main_app\r\n\r\n # load .ui file for window\r\n loadUi('GUI/CodeHighlightWindow.ui', self)\r\n\r\n # set the title\r\n self.setWindowTitle(\"SWEG2 - Code Highlight Window\")\r\n\r\n # define the font used throughout the application\r\n self.master_font = QFont()\r\n self.master_font.setFamily(self.algorithm_name.font().family())\r\n self.master_font.setPointSize(12)\r\n\r\n # add event listener to next button click\r\n self.pushButton.clicked.connect(self.on_next_clicked)\r\n\r\n # set spacing for the line list to none\r\n self.line_list.setContentsMargins(0, 0, 0, 0)\r\n self.line_list.setSpacing(0)\r\n self.line_list.update()",
"def refresh():\n curve_editor.refresh()",
"def update(self) -> None:\n self.all_sprites.update()",
"def update(self):\n self.smd3.update()\n self.logic.update(self.smd3)\n self.header.update(self.smd3)",
"def __init__(self) -> None:\n super().__init__('qt') # Initialize the base class.\n self.active = True\n self.consoleOnly = False # Console is separate from the log.\n self.iconimages: dict[str, Any] = {} # Keys are paths, values are Icons.\n self.globalFindDialog: Widget = None\n self.idleTimeClass = qt_idle_time.IdleTime\n self.insert_char_flag = False # A flag for eventFilter.\n self.mGuiName = 'qt'\n self.plainTextWidget = qt_text.PlainTextWrapper\n self.show_tips_flag = False # #2390: Can't be inited in reload_settings.\n self.styleSheetManagerClass = StyleSheetManager\n # Be aware of the systems native colors, fonts, etc.\n QtWidgets.QApplication.setDesktopSettingsAware(True)\n # Create objects...\n self.qtApp = QtWidgets.QApplication(sys.argv)\n self.reloadSettings()\n self.appIcon = self.getIconImage('leoapp32.png')\n\n # Define various classes key stokes.\n #@+<< define FKeys >>\n #@+node:ekr.20180419110303.1: *4* << define FKeys >>\n self.FKeys = [\n 'F1', 'F2', 'F3', 'F4', 'F5', 'F6', 'F7', 'F8', 'F9', 'F10', 'F11', 'F12']\n # These do not generate keystrokes on MacOs.\n #@-<< define FKeys >>\n #@+<< define ignoreChars >>\n #@+node:ekr.20180419105250.1: *4* << define ignoreChars >>\n # Always ignore these characters\n self.ignoreChars = [\n # These are in ks.special characters.\n # They should *not* be ignored.\n # 'Left', 'Right', 'Up', 'Down',\n # 'Next', 'Prior',\n # 'Home', 'End',\n # 'Delete', 'Escape',\n # 'BackSpace', 'Linefeed', 'Return', 'Tab',\n # F-Keys are also ok.\n # 'F1','F2','F3','F4','F5','F6','F7','F8','F9','F10','F11','F12',\n 'KP_0', 'KP_1', 'KP_2', 'KP_3', 'KP_4', 'KP_5', 'KP_6', 'KP_7', 'KP_8', 'KP_9',\n 'KP_Multiply, KP_Separator,KP_Space, KP_Subtract, KP_Tab',\n 'KP_F1', 'KP_F2', 'KP_F3', 'KP_F4',\n # Keypad chars should be have been converted to other keys.\n # Users should just bind to the corresponding normal keys.\n 'KP_Add', 'KP_Decimal', 'KP_Divide', 'KP_Enter', 'KP_Equal',\n 'CapsLock', 'Caps_Lock',\n 'NumLock', 'Num_Lock',\n 'ScrollLock',\n 'Alt_L', 'Alt_R',\n 'Control_L', 'Control_R',\n 'Meta_L', 'Meta_R',\n 'Shift_L', 'Shift_R',\n 'Win_L', 'Win_R', # Clearly, these should never be generated.\n # These are real keys, but they don't mean anything.\n 'Break', 'Pause', 'Sys_Req',\n 'Begin', 'Clear', # Don't know what these are.\n ]\n #@-<< define ignoreChars >>\n #@+<< define specialChars >>\n #@+node:ekr.20180419081404.1: *4* << define specialChars >>\n # Keys whose names must never be inserted into text.\n self.specialChars = [\n # These are *not* special keys.\n # 'BackSpace', 'Linefeed', 'Return', 'Tab',\n 'Left', 'Right', 'Up', 'Down', # Arrow keys\n 'Next', 'Prior', # Page up/down keys.\n 'Home', 'End', # Home end keys.\n 'Delete', 'Escape', # Others.\n 'Enter', 'Insert', 'Ins', # These should only work if bound.\n 'Menu', # #901.\n 'PgUp', 'PgDn', # #868.\n ]\n #@-<< define specialChars >>\n # Put up the splash screen()\n if (g.app.use_splash_screen and\n not g.app.batchMode and\n not g.app.silentMode and\n not g.unitTesting\n ):\n self.splashScreen = self.createSplashScreen()\n # qtFrame.finishCreate does all the other work.\n self.frameFactory = qt_frame.TabbedFrameFactory()",
"def update_editor ( self ):\n super( SimpleFontEditor, self ).update_editor()\n set_font( self )",
"def update_editor ( self ):\n font = self.factory.to_wx_font( self )\n try:\n self._facename.SetStringSelection( font.GetFaceName() )\n except:\n self._facename.SetSelection( 0 )\n try:\n self._point_size.SetStringSelection( str( font.GetPointSize() ) )\n except:\n self._point_size.SetSelection( 0 )\n font.SetPointSize( min( 10, font.GetPointSize() ) )\n self._font.SetValue( self.str_value )\n self._font.SetFont( font )",
"def __updateEngines(self):\n currentEngine = self.engineComboBox.itemData(\n self.engineComboBox.currentIndex())\n self.engineComboBox.clear()\n for engineName in TranslatorEngines.supportedEngineNames():\n icon = TranslatorEngines.getEngineIcon(engineName)\n self.engineComboBox.addItem(\n icon,\n TranslatorEngines.engineDisplayName(engineName),\n engineName)\n self.engineComboBox.model().sort(0)\n self.engineComboBox.setCurrentIndex(\n self.engineComboBox.findData(currentEngine))",
"def update(self):",
"def update(self):",
"def update(self):",
"def update_listed_eqns(self):\n self.clear_rightside()\n self.lst_eqns.clear()\n self.lst_eqns.addItems(list(self.eqn_data[self.cmb_sections.currentText()]))",
"def update_knowledge(self):\n pass",
"def update_processgraph(self):\n graph = self.processgraphEdit.toPlainText()\n self.processgraph.graph = json.loads(graph)\n self.processgraph.builder.processes = json.loads(graph)\n #widget = self.processgraphWidget\n #self.load_dict_into_widget(widget, self.processgraph.graph)\n #widget.show()",
"def redraw(self):\n bpy.context.scene.objects.active = bpy.context.scene.objects.active"
] | [
"0.67538667",
"0.5128117",
"0.509659",
"0.5053252",
"0.4964151",
"0.49451157",
"0.4761072",
"0.4732717",
"0.47295678",
"0.46906155",
"0.4686383",
"0.46371529",
"0.46117783",
"0.4515449",
"0.4510255",
"0.4473232",
"0.44729185",
"0.44690013",
"0.4440391",
"0.4439548",
"0.443494",
"0.44177774",
"0.4398748",
"0.43926862",
"0.43926862",
"0.43926862",
"0.43919024",
"0.4347138",
"0.43424654",
"0.43388614"
] | 0.76550114 | 0 |
Embed symbol resources as base64 in QML. | def embed_qml_symbols(self, root, layer_class, prop_key):
for svgprop in root.findall(".//layer[@class='%s']/prop[@k='%s']" %
(layer_class, prop_key)):
symbol_path = svgprop.get('v')
path = os.path.abspath(
os.path.join(QGS_RESOURCES_DIR, symbol_path)
)
# NOTE: assume symbols not included in ZIP are default symbols
if os.path.exists(path):
try:
# read symbol data and convert to base64
with open(path, 'rb') as f:
symbol_data = base64.b64encode(f.read())
# embed symbol in QML
svgprop.set('v', "base64:%s" % symbol_data.decode())
self.logger.info("Embed symbol in QML: %s" % symbol_path)
except Exception as e:
self.logger.warning(
"Could not embed QML symbol %s:\n%s" % (symbol_path, e)
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def update_qml(self, qml):\n if qml is None:\n return qml\n\n try:\n # parse XML\n root = ElementTree.fromstring(qml)\n\n # embed symbols\n self.embed_qml_symbols(root, 'SvgMarker', 'name')\n self.embed_qml_symbols(root, 'SVGFill', 'svgFile')\n self.embed_qml_symbols(root, 'RasterFill', 'imageFile')\n\n # return updated QML\n qml = ElementTree.tostring(\n root, encoding='utf-8', method='xml'\n )\n return qml.decode()\n except Exception as e:\n self.logger.warning(\n \"Could not embed QML symbols:\\n%s\" % e\n )\n return qml",
"def rebase_add_encoding_prefix():\n\tfilenames, clippings = load_clippings(inFolder)\n\tfor file, clip in zip(filenames, clippings):\n\t\timg = clip[\"imgEncoding\"]\n\t\tstr = \"data:image/png;base64,\"\n\t\tif str not in img:\n\t\t\tclip[\"imgEncoding\"] = str + img\n\t\t\twith open(file, \"w\") as outfile:\n\t\t\t\tjson.dump(clip, outfile)",
"def base64_string(self) -> global___Expression:",
"def generate_qrc(path, name=\"resources\", prefix=\"icons\"):\n qrc = '<RCC>\\n\\t<qresource prefix=\"{}\">\\n'.format(prefix)\n for each in sorted(os.listdir(path)):\n qrc += \"\\t\\t<file>{0}</file>\\n\".format(each)\n qrc += \"\\t</qresource>\\n</RCC>\\n\"\n\n qrc_file = os.path.join(path, name + \".qrc\")\n with open(qrc_file, \"w\") as stream:\n stream.write(qrc)\n\n return qrc_file",
"def base64(self):\n image = self.png.getvalue()\n return base64.encodestring(image).decode('utf-8')",
"def make_embedded_qr_code(text, qr_code_options=QRCodeOptions()):\n image_format = qr_code_options.image_format\n img = make_qr_code_image(text, SvgEmbeddedInHtmlImage if image_format == SVG_FORMAT_NAME else PilImageOrFallback, qr_code_options=qr_code_options)\n stream = BytesIO()\n if image_format == SVG_FORMAT_NAME:\n img.save(stream, kind=SVG_FORMAT_NAME.upper())\n html_fragment = (str(stream.getvalue(), 'utf-8'))\n else:\n img.save(stream, format=PNG_FORMAT_NAME.upper())\n html_fragment = '<img src=\"data:image/png;base64, %s\" alt=\"%s\">' % (str(base64.b64encode(stream.getvalue()), encoding='ascii'), escape(text))\n return mark_safe(html_fragment)",
"def _binaries_to_symbolize(self):\n raise NotImplementedError()",
"def data_uri(data, filename, encoding=\"utf8\", mime=\"text/plain\"):\n data = base64.b64encode(data)\n return f'data:{mime};charset={encoding};filename={filename};base64,{data.decode(\"utf-8\")}'",
"def encode(self, decoded):",
"def build_assets(self):\n theme = self.theme\n \n # ~ self.assets_dir = cwd + \"/CenterSide_Themes/\" + theme + \"/\"\n \n \n \n \n \n \n # ~ self.blank_langmssg = QPixmap(\"blank_langmssg.svg\")\n # ~ self.blank_thememssg = QPixmap(\"blank_thememssg.svg\")\n \n \n \n \n \n # ~ self.icon_info = QIcon(\"Icons/info.svg\")\n # ~ self.icon_intructions = QIcon(\"Icons/instructions.svg\")\n # ~ self.icon_internet = QIcon(\"Icons/internet.svg\")\n # ~ self.icon_invite = QIcon(\"Icons/invite.svg\")\n # ~ self.icon_languages = QIcon(\"Icons/languages.svg\")\n # ~ self.icon_local = QIcon(\"Icons/local.svg\")\n # ~ self.icon_message = QIcon(\"Icons/message.svg\")\n # ~ self.icon_name = QIcon(\"Icons/name.svg\")\n # ~ self.icon_options = QIcon(\"Icons/options.svg\")\n # ~ self.icon_palettes = QIcon(\"Icons/palettes.svg\")\n \n # ~ self.icon_quit = QIcon(\"Icons/quit.svg\")\n # ~ self.icon_refresh = QIcon(\"Icons/refresh.svg\")\n # ~ self.icon_shop = QIcon(\"Icons/shop.svg\")\n # ~ self.icon_soundon = QIcon(\"Icons/soundon.svg\")\n # ~ self.icon_soundoff = QIcon(\"Icons/soundoff.svg\")\n # ~ self.icon_vsAI = QIcon(\"Icons/vsAI.svg\")",
"def resource_js(self):\n \n portal_url = getSite().absolute_url()\n \n return \"\"\"\n <script type=\"text/javascript\" src=\"%s/++resource++swfobject.js\"></script>\n <script type=\"text/javascript\" src=\"%s/++resource++audio_player.js\"></script> \n <script type=\"text/javascript\"> \n AudioPlayer.setup(\"%s/++resource++audio_player.swf\", { \n width: 300\n }); \n </script>\n \"\"\" % (portal_url, portal_url, portal_url)",
"def generate_code_bitmap(self, bitmap, required=False):\n assert self.tmpl_inline_bitmap\n\n if not bitmap and not required:\n return self.codegen.cn('wxNullBitmap')\n\n preview = self.codegen.preview\n\n if ( preview and ( bitmap.startswith('var:') or bitmap.startswith('code:') ) ) or (not bitmap and required):\n preview_icon = os.path.join(config.icons_path, \"icon.png\")\n return self.tmpl_inline_bitmap % { 'name': self.codegen.cn('wxBitmap'),\n 'bitmap': self.codegen.quote_path(preview_icon),\n 'bitmap_type': self.codegen.cn('wxBITMAP_TYPE_ANY') }\n\n if bitmap.startswith('var:'):\n return self.tmpl_inline_bitmap % { 'name': self.codegen.cn('wxBitmap'),\n 'bitmap': bitmap[4:].strip(),\n 'bitmap_type': self.codegen.cn('wxBITMAP_TYPE_ANY') }\n\n if bitmap.startswith('empty:'): return self.get_inline_stmt_emptybitmap(bitmap)\n if bitmap.startswith('art:'): return self.get_inline_stmt_artprovider(bitmap) \n if bitmap.startswith('code:'): return '%s' % self.codegen.cn(bitmap[5:].strip())\n\n if preview:\n bitmap = misc.get_absolute_path(bitmap, True)\n\n return self.tmpl_inline_bitmap % { 'name': self.codegen.cn('wxBitmap'),\n 'bitmap': self.codegen.quote_path(bitmap),\n 'bitmap_type': self.codegen.cn('wxBITMAP_TYPE_ANY') }",
"def encoded_icon(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"encoded_icon\")",
"def create_qt_mime_data(data):\n from PyQt5.QtCore import QByteArray, QDataStream, QIODevice, QMimeData\n\n item_data = QByteArray()\n data_stream = QDataStream(item_data, QIODevice.WriteOnly)\n\n qgraph_mime = {\n 'version': qmxgraph.constants.QGRAPH_DD_MIME_VERSION,\n }\n qgraph_mime.update(data)\n data_stream.writeString(json.dumps(qgraph_mime).encode('utf8'))\n\n mime_data = QMimeData()\n mime_data.setData(qmxgraph.constants.QGRAPH_DD_MIME_TYPE, item_data)\n\n return mime_data",
"def create_icon_url(cls, name):\n return os.path.join(RESOURCE_FOLDER, name)",
"def adobe_base64_encode(cls, to_encode):\n if isinstance(to_encode, unicode):\n to_encode = to_encode.encode(\"utf8\")\n encoded = base64.encodestring(to_encode)\n return encoded.replace(b\"+\", b\":\").replace(b\"/\", b\";\").replace(b\"=\", b\"@\").strip()",
"def encode(self) :\n\t\tbitmap = ISO8583Bitmap()\n\t\ttexts=[]\n\t\tfor i in range(2,129) :\n\t\t\tid = 'f%03d' % i\n\t\t\tif hasattr(self,id) :\n\t\t\t\tv = getattr(self,id)\n\t\t\t\ttyp = self.desc_dict[id]['type']\n\t\t\t\tbitmap.setBitmap(i)\n\t\t\t\t# logit(\"%s:%s\" % (id,v))\n\t\t\t\ttxt = dataAttachTo8583(v,typ)\n\t\t\t\ttexts.append(txt)\n\t\treturn (bitmap,''.join(texts))",
"def base64_to_hex(apps, schema_editor):\n APNSDevice = apps.get_model('push_notifications', 'APNSDevice')\n for device in APNSDevice.objects.all():\n device.registration_id = b2a_hex(a2b_base64(device.registration_id))\n device.save()",
"def assets():",
"def resource_path(self,relative_path):\n try:\n # PyInstaller creates a temp folder and stores path in _MEIPASS\n base_path = sys._MEIPASS\n except Exception:\n base_path = os.path.abspath(\".\\\\Visual_Ressources\\\\\"+self.language+\"\\\\\") \n # \".\"\n # 'Content\\\\Back_End\\\\'\n return os.path.join(base_path, relative_path)",
"def encode(self, strs):\n encoded_str = \"\"\n for s in strs:\n encoded_str += \"%0*x\" % (8, len(s)) + s\n return encoded_str",
"def _encode_resource_id(self, resource_id):\n return urlquote(resource_id, safe='~')",
"def write_icon(config: Config) -> Config:\n icon_file = os.path.join(\n config.destination,\n \"res\",\n \"{}.png\".format(config.resourcename)\n )\n\n assert_directories(icon_file, True)\n\n if not os.path.isfile(icon_file):\n with open(icon_file, \"wb\") as f:\n f.write(gzip.decompress(base64.b64decode(ICON)))\n\n return config",
"def test_Lisp_wxBitmapButton(self):\n self.load_and_generate( 'Lisp_wxBitmapButton', included=['lisp'], test_GUI=False )",
"def get_inline_attachment(self) -> Attachment:\n file_path = os.path.join(os.getcwd(), \"assets/architecture-resize.png\")\n with open(file_path, \"rb\") as in_file:\n base64_image = base64.b64encode(in_file.read()).decode()\n\n return Attachment(\n name=\"architecture-resize.png\",\n content_type=\"image/png\",\n content_url=f\"data:image/png;base64,{base64_image}\",\n )",
"def data64(self) -> str:\n return Image.encode64(self.data)",
"def embed_image(self, node):\r\n xlink = node.get('xlink:href')\r\n if xlink and xlink[:5] == 'data:':\r\n # No need, data alread embedded\r\n return\r\n\r\n url = urllib.parse.urlparse(xlink)\r\n href = urllib.request.url2pathname(url.path)\r\n\r\n # Primary location always the filename itself.\r\n path = self.absolute_href(href or '')\r\n\r\n # Backup directory where we can find the image\r\n if not os.path.isfile(path):\r\n path = node.get('sodipodi:absref', path)\r\n\r\n if not os.path.isfile(path):\r\n inkex.errormsg('File not found \"{}\". Unable to embed image.'.format(path))\r\n return\r\n\r\n with open(path, \"rb\") as handle:\r\n # Don't read the whole file to check the header\r\n file_type = self.get_image_type(path, handle.read(10))\r\n handle.seek(0)\r\n\r\n if file_type:\r\n # Future: Change encodestring to encodebytes when python3 only\r\n node.set('xlink:href', 'data:{};base64,{}'.format(\r\n file_type, base64.encodebytes(handle.read()).decode('ascii')))\r\n node.pop('sodipodi:absref')\r\n else:\r\n inkex.errormsg(\"%s is not of type image/png, image/jpeg, \"\r\n \"image/bmp, image/gif, image/tiff, or image/x-icon\" % path)",
"def embed_images(self):\n for img in self.book.xpath(\"//img[ not(starts-with(@src, 'data:')) and @src!= '']\"):\n img_src = img.attrib[\"src\"]\n img_raw = self.get_remote_content(img_src)\n if img_raw != None:\n img_64 = base64.b64encode(img_raw)\n file_info = os.path.splitext(img_src)\n ext = file_info[1].replace(\".\", \"\")\n ext = re.sub(\"\\?.*$\", \"\" , ext)\n \n if ext == \"svg\":\n svg = html.fromstring(img_raw.decode(\"utf-8\"))\n img.clear()\n img.tag = \"svg\"\n img[:] = [svg]\n else:\n img.set(\"src\", \"data:image/{};base64,{}\".format(ext, img_64.decode(\"utf-8\")))",
"def string_raw(self):\n return \"x%x\" % self.encoded",
"def load_icons() -> str:\n return _read_text('icons-svg-inline.html')"
] | [
"0.55629766",
"0.54794717",
"0.5280593",
"0.49432886",
"0.49401248",
"0.48893714",
"0.48839408",
"0.48512277",
"0.48236877",
"0.48004314",
"0.47988746",
"0.47885874",
"0.47874668",
"0.4764687",
"0.47481993",
"0.47099677",
"0.46938792",
"0.46663794",
"0.4606227",
"0.45966935",
"0.45875174",
"0.4582743",
"0.45690426",
"0.45630053",
"0.45601618",
"0.4543661",
"0.4529529",
"0.45261496",
"0.45258448",
"0.4517948"
] | 0.66117233 | 0 |
Adds human readable date variable. Assumes date is in seconds since epoch. time_var is netCDF.Variable object. | def add_utc_date(nc, time_var):
# Create Variable
utc = nc.createVariable('utc_time', int, ('time'))
setattr(utc, 'standard_name', "time")
setattr(utc, 'long_name', "UTC date yyyy-mm-dd hh:00:00 as yyyymmddhh")
setattr(utc, "units","Gregorian_year month day hour")
toUTC = lambda d: int(dt.datetime.fromtimestamp(d).strftime('%Y%m%d%H'))
vfunc = np.vectorize(toUTC)
utc_data = vfunc(time_var[:])
utc[:] = utc_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def date_info_day(date_str, infile):\n #date_str = str(sys.argv[1])\n #infile = './' + date_str + '.nc'\n\n # prepare date\n year,mon,day = date_str.split('-')\n year_num = int(float(year))\n mon_num = int(float(mon))\n day_num = int(float(day))\n\n\n datesec_calc = []\n val_pr_day = 4\n secstep = 86400/val_pr_day\n sec = [0, 1*secstep, 2*secstep, 3*secstep]\n for j in sec:\n datesec_calc.append(j)\n\n # Open a netCDF file for appending:\n ncfile = Dataset(infile,'a')\n #time_in = ncfile.variables['time'][:]\n #ncfile = Dataset('date_datesec' + date + '.nc','w')\n\n # Create the variable (4 byte integer in this case)\n # first argument is name of variable, second is datatype, third is\n # a tuple with the names of dimensions.\n date_str = ncfile.createVariable('date',dtype('int32').char,('time'))\n datesec = ncfile.createVariable('datesec',dtype('int32').char,('time'))\n\n # Write data to variable:\n date_str[:] = year_num*10000+mon_num*100+day_num\n datesec[:] = datesec_calc\n\n # Add attributes to the variables:\n date_str.long_name = 'current date (YYYYMMDD)'\n datesec.long_name = 'current seconds of current date'\n\n # close the file.\n ncfile.close()\n return",
"def __init__(self, vardict):\n \n datevars = []\n for v in vardict:\n vf = v.VariableFormat\n for fmt in [\"DATE\", \"TIME\", \"QYR\", \"MOYR\", \"MONTH\"]:\n if vf.find(fmt) >= 0:\n datevars.append(v.VariableName)\n break\n self.datevars = \" \".join(datevars)",
"def convert_date(adate):\n\tprint \"date given: \" + adate\n\t# stuff\n\tprint \"epoch time for date: \"",
"def set_contest_date_time(contest, date_time, field):\n c = biv.load_obj(contest)\n assert type(c) == pem.E15Contest\n dt = _local_date_time_as_utc(c, date_time)\n assert hasattr(c, field), \\\n '{}: has no attr {}'.format(c, field)\n setattr(c, field, dt)\n _add_model(c)",
"def add_variable(self, name, var):\n self.variables.append(_3ds_named_variable(name, var))",
"def add_variable(self, name, var):\n self.variables.append(_3ds_named_variable(name, var))",
"def add_date(self, new_date):\r\n if self.__dates == \"\":\r\n self.__dates = new_date\r\n else:\r\n self.__dates += \", \" + new_date",
"def addDateTime(s = \"\"):\n date = str(datetime.datetime.now())\n allchars = string.maketrans('','')\n nodigs = allchars.translate(allchars, string.digits)\n date = date.translate(allchars, nodigs)\n return s + '_D' + date[2:8] + '_' + date[8:12]",
"def render_date_time_with_relative_into(into, date_time, add_ago):\n into.append(format(date_time, DATETIME_FORMAT_CODE))\n \n into.append(' [*')\n into.append(elapsed_time(date_time))\n if add_ago:\n into.append(' ago')\n into.append('*]')\n \n return into",
"def print_date():\n print time.strftime(\"%b %d, %Y\")",
"def add_variable(self, variable):\n self.variables.append(variable)",
"def date(self, date):\n self.value = date.strftime(\"%Y-%m-%d\") if date else \"\"",
"def update_variable_datetime(\n self, variable_value=None, commit=False, force=False):\n if variable_value is not None:\n new_value = variable_value\n if isinstance(new_value, (str, unicode)):\n new_value = \\\n parse_date_string('%s UTC' % new_value) \\\n or parse_date_string(new_value)\n if new_value is not None:\n if not force and self.variable_value is not None:\n current_value = parse_date_string(self.variable_value)\n if current_value is not None and new_value < current_value:\n return\n self.upsert(\n new_value.strftime('%Y-%m-%d %H:%M:%S'), commit=commit)",
"def read_cable_GPP_year(fcable, var_name):\n\n print(\"carry on read_cable_var\")\n cable = nc.Dataset(fcable, 'r')\n Time = nc.num2date(cable.variables['time'][:],cable.variables['time'].units)\n var = pd.DataFrame(cable.variables[var_name][:,0,0]*1800., columns=['cable'])\n var['Date'] = Time\n var = var.set_index('Date')\n var = var.resample(\"Y\").agg('sum')\n\n #var.index = var.index - pd.datetime(2011,12,31)\n #var.index = var.index.days\n var = var.sort_values(by=['Date'])\n\n return var",
"def _build_date_header_string(self, date_value):\n if isinstance(date_value, datetime):\n date_value = time.mktime(date_value.timetuple())\n if not isinstance(date_value, basestring):\n date_value = formatdate(date_value, localtime=True)\n return date_value",
"def human_date(self, date):\n return timeago.format(date)",
"def add_variable(self, var):\n self.var_list.append(var)\n self.var_dict[var.name] = var",
"def rfc3999(date):\n if not date: return ''\n date = date + datetime.timedelta(seconds = -time.timezone)\n if time.daylight:\n date += datetime.timedelta(seconds = time.altzone)\n return date.strftime('%m-%d-%YT%H:%M:%SZ')",
"def buildDate(date):\n parts = date.split(\"-\")\n yDate = parts[1] + \" \" + parts[2] + ', ' + parts[0]\n return yDate",
"def add(self, variable):\n var = copy(variable)\n\n if var.kind == STATIC:\n var.id = self.static_id\n self.static_id += 1\n if var.kind == ARGUMENT:\n var.id = self.argumen_id\n self.argumen_id += 1\n if var.kind == LOCAL:\n var.id = self.local_id\n self.local_id += 1\n if var.kind == FIELD:\n var.id = self.field_id\n self.field_id += 1\n \n self.variables.append(var)",
"def add(ctx, place, strdate):\n daze = ctx.obj['daze']\n if place is None:\n place = getPlaceFromDialog()\n if strdate is None:\n strdate = date.today().isoformat()\n else:\n # Check whether the arguments are maybe switched\n if re.match(r\"[A-z]+\", strdate):\n print(\"Arguments may have been inverted. Assuming {} is the date.\".format(place), file=sys.stderr)\n place, strdate = strdate, place\n if re.match(r\"\\d{2}-\\d{2}\", strdate):\n strdate = str(date.today().year) + \"-\" + strdate\n print(strdate)\n print(\"\\n\\n\")\n\n daze.add(strdate, place)\n d.dazeToFile(daze, ctx.obj['log'])",
"def set_date(self, date):\n self.data['date'] = date",
"def set_add_dispute_date(self, date):\n self.set_value_into_input_field(self.add_dispute_date_inputbox_locator, date)",
"def plastic_date():\n return 'Zun, 99 Zun 9999 99:61:61'",
"def _tp_fmt(var):\n if type(var) is datetime:\n if var.hour == 0 and var.minute == 0:\n str_out = var.strftime('%Y-%m-%d')\n else:\n str_out = var.strftime('%Y-%m-%d %H:%M')\n else:\n str_out = var\n return str_out",
"def get_date(date):\n return date",
"def string_date(mnthDay, year):\n return(mnthDay + '/' + str(year))",
"def get_date(date_time):\n year = str(date_time.year)\n month = str(date_time.month)\n day = str(date_time.day)\n\n # Formata\n year = format_value(year)\n month = format_value(month)\n day = format_value(day)\n\n return year + '-' + month + '-' + day",
"def date_add(self, field: terms.Term, date_part: str, interval: int):\n raise NotImplementedError",
"def numeric_date_recover(self):\n \n sp_time_zone, current_datetime = self.setup_datetime() \n converter2sptimezone = current_datetime.astimezone(sp_time_zone)\n \n return converter2sptimezone.strftime('%d-%m-%Y')"
] | [
"0.53371304",
"0.52994335",
"0.5053854",
"0.5022046",
"0.5021961",
"0.5021961",
"0.50010127",
"0.4921834",
"0.49154162",
"0.48844352",
"0.48474166",
"0.48312527",
"0.4828444",
"0.48283142",
"0.47912452",
"0.47758126",
"0.4751266",
"0.47495",
"0.47279778",
"0.46959502",
"0.46599412",
"0.46577442",
"0.46575487",
"0.4647703",
"0.46471658",
"0.4638856",
"0.4630255",
"0.4629845",
"0.46239644",
"0.4620369"
] | 0.63723165 | 0 |
Adds a time bounds variable to variable. Assumes time dimension is called 'time' | def add_time_bounds(nc, varname):
THREE_HOURS = 60*60*3 # in seconds
bnds_name = 'time_bnds'
bounds_dim = 'nv'
# Create bounds dimension
nc.createDimension(bounds_dim, 2)
# Get variable matching varname
time_var = nc.variables['time']
time_var.setncattr('bounds', bnds_name)
time_data = time_var[:]
time_length = len(time_data)
# reshape time data
bounds_data = np.dstack((time_data,time_data)).reshape(time_length,2)
for i in bounds_data:
i[0] = i[0] - (THREE_HOURS)
bounds_var = nc.createVariable(bnds_name, time_var.dtype, ('time', bounds_dim), fill_value=9999)
bounds_var[:] = bounds_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __define_variable_time(self, initial_guess, minimum, maximum):\n i = 0\n for nlp in self.nlp:\n if isinstance(nlp[\"tf\"], self.CX):\n time_bounds = Bounds(minimum[i], maximum[i], interpolation=InterpolationType.CONSTANT)\n time_init = InitialConditions(initial_guess[i])\n Parameters._add_to_v(self, \"time\", 1, None, time_bounds, time_init, nlp[\"tf\"])\n i += 1",
"def _define_time_coord(\n adate: pd.Timestamp, time_bounds: Optional[Sequence[pd.Timestamp]] = None,\n) -> DimCoord:\n return DimCoord(\n np.array(adate.timestamp(), dtype=TIME_COORDS[\"time\"].dtype),\n \"time\",\n bounds=time_bounds\n if time_bounds is None\n else [\n np.array(t.timestamp(), dtype=TIME_COORDS[\"time\"].dtype)\n for t in time_bounds\n ],\n units=TIME_COORDS[\"time\"].units,\n )",
"def add_timedim(data, date=\"1970-01-01\"):\n if isinstance(data, xr.DataArray):\n if \"time\" in data.dims:\n raise ValueError(\n \"You trying to add time dimension to the DataArray that already have it. \\\nThe reason migh be that you trying to use 2d variable (e.g. `a_ice`) \\\nin a function that accepts only 3d variables (e.g. `hovm_data`)\"\n )\n timestamp = [np.array(np.datetime64(date, \"ns\"))]\n data = data.expand_dims({\"time\": timestamp}, axis=0)\n return data\n else:\n data = np.expand_dims(data, axis=0)\n return data",
"def time_interval_sub(self, time_step, nsteps):\n world.subtime = TimeAxis(0.0, int(nsteps), float(time_step))\n print(\"Setting subtime\")",
"def time_interval_prop(self, time_step, nsteps):\n world.time = TimeAxis(0.0, int(nsteps), float(time_step))\n print(\"Setting time\")",
"def add_constraint(self, constraint):\n constraint_type = constraint[0]\n if constraint_type == 'time':\n dependent_variable = constraint[-2]\n dependee_variable = constraint[-1]\n dependent_index = self.subvariable_name.index(dependent_variable)\n dependee_index = self.subvariable_name.index(dependee_variable)\n constraint[-2] = self.value[dependent_index]\n constraint[-1] = self.value[dependee_index]\n if constraint_type in ['threshold', 'count']:\n threshold_variable = constraint[-1]\n threshold_index = self.subvariable_name.index(threshold_variable)\n constraint[-1] = self.value[threshold_index]\n if constraint_type == 'only_one':\n onlyone_variable = constraint[-1]\n onlyone_index = self.subvariable_name.index(onlyone_variable)\n constraint[-1] = self.value[onlyone_index]\n if constraint_type in self.constraint.keys():\n self.constraint[constraint_type] += [constraint[1:]]\n else:\n self.constraint[constraint_type] = [constraint[1:]]",
"def add_time_nuc(this_spot_data, t, nucmask):\n # Combine frame number and zxy to for coordinate tuple, accounting for out-of-bounds z-coordinates due to re-focus adjustments.\n spot_coords = tuple(np.concatenate((\n [t], \n #[clamp(this_spot_data[0], 0, nucmask.shape[-3] - 1)],\n #this_spot_data[1:3]\n this_spot_data[0:3]\n )).astype(int))\n \n nuc_id = nucmask[z_inbounds(spot_coords, nucmask)]\n # Add time and nuclear ID columns to spot data and call update to search \n # for connected spots in previous frames.\n this_spot_data = np.append([t, nuc_id], this_spot_data)\n return this_spot_data",
"def build_time_feature_vector_rule(self, time):\n start_morning_hour = self.is_morning_hour(time)\n start_lunch_hour = self.is_lunch_hour(time)\n start_afternoon_hour = self.is_afternoon_hour(time)\n start_dinner_hour = self.is_dinner_hour(time)\n start_home_hour = self.is_home_hour(time)\n\n if isinstance(start_home_hour, tf.Tensor):\n return tf.cast(\n tf.concat([\n start_morning_hour, start_lunch_hour, start_afternoon_hour, start_dinner_hour,\n start_home_hour\n ], axis=1), 'float32')\n\n else:\n return np.concatenate([\n start_morning_hour, start_lunch_hour, start_afternoon_hour, start_dinner_hour,\n start_home_hour\n ], 1).astype('float')",
"def put_time(self, step, value):\n assert step > 0, \"Step must be larger than 0.\"\n # XXX: Currently the time axis is not unlimited due to a limitation\n # in h5netcdf - thus no new time steps can be created after the\n # initialization.\n assert step <= self._f.dimensions[\"time_step\"]\n\n self._f.variables[\"time_whole\"][step - 1] = value",
"def copy_and_append_time_dimension_to_netcdf_dataset(self,dataset_in,dataset_out):\n\n for dim_name,dim_obj in list(dataset_in.dimensions.items()):\n dataset_out.createDimension(dim_name,len(dim_obj)\n if not dim_obj.isunlimited() else None)\n dataset_out.createDimension('time',None)\n times = dataset_out.createVariable(\"time\",'f8',(\"time\",))\n times.units = \"years since 0001-01-01 00:00:00.0\"\n times.calendar = \"proleptic_gregorian\"\n times[0] = np.array([0.0])\n for var_name, var_obj in list(dataset_in.variables.items()):\n new_var = dataset_out.createVariable(var_name,var_obj.datatype,var_obj.dimensions\n if (len(var_obj.dimensions) <= 1\n or var_name == 'AREA') else\n [\"time\"] + list(var_obj.dimensions))\n if len(var_obj.dimensions) <= 1 or var_name == 'AREA':\n new_var[:] = var_obj[:]\n else:\n new_var[0,:] = var_obj[:]\n new_var.setncatts({attr_name: var_obj.getncattr(attr_name) for attr_name in var_obj.ncattrs()})",
"def put_time(self, time_step, time_value):\n ierr = exolib.py_exptim(self.exoid, time_step + self._o, time_value)\n if ierr:\n raise ExodusIIWriterError(\"Error putting time\")",
"def _adapt_time(self):\n self.time = min(max(self.time, self.minimum), self.maximum)",
"def update_variable_array(array,annuli,times,t,r,value):\n annulus=radius_to_annulus(r,annuli)\n annulus_start=np.sum(times[0:annulus])\n array[annulus_start+t]=value\n return ()",
"def __add__(self, *args, **kwargs):\n return _uhd_swig.time_spec_t___add__(self, *args, **kwargs)",
"def _set_time_bnds(in_dir, var):\n # This is a complicated expression, but necessary to keep local\n # variables below the limit, otherwise prospector complains.\n cubelist = iris.load(\n glob.glob(\n os.path.join(in_dir, var['file'].replace('c3s', 'c3s_regridded'))))\n\n # The purpose of the following loop is to remove any attributes\n # that differ between cubes (otherwise concatenation over time fails).\n # In addition, care is taken of the time coordinate, by adding the\n # time_coverage attributes as time_bnds to the time coordinate.\n for n_cube, _ in enumerate(cubelist):\n time_coverage_start = cubelist[n_cube].\\\n attributes.pop('time_coverage_start')\n time_coverage_end = cubelist[n_cube].\\\n attributes.pop('time_coverage_end')\n\n # Now put time_coverage_start/end as time_bnds\n # Convert time_coverage_xxxx to datetime\n bnd_a = datetime.strptime(time_coverage_start, \"%Y-%m-%dT%H:%M:%SZ\")\n bnd_b = datetime.strptime(time_coverage_end, \"%Y-%m-%dT%H:%M:%SZ\")\n\n # Put in shape for time_bnds\n time_bnds_datetime = [bnd_a, bnd_b]\n\n # Read dataset time unit and calendar from file\n dataset_time_unit = str(cubelist[n_cube].coord('time').units)\n dataset_time_calender = cubelist[n_cube].coord('time').units.calendar\n # Convert datetime\n time_bnds = cf_units.date2num(time_bnds_datetime, dataset_time_unit,\n dataset_time_calender)\n # Put them on the file\n cubelist[n_cube].coord('time').bounds = time_bnds\n\n return cubelist",
"def _add_time(time_to_add: int):\n store.time += time_to_add",
"def build_model_for_time_block(self,\n ndx: int,\n start_t: float,\n end_t: float,\n add_init_conditions: bool) -> Tuple[_BlockData,\n Sequence[_GeneralVarData],\n Sequence[_GeneralVarData]]:\n pass",
"def _add_time_field(self) -> None:\n self.data[\"time\"] = [datetime(int(yyyy), int(mm), int(dd)) + timedelta(hours=hh) for yyyy, mm, dd, hh in zip(self.data[\"year\"], self.data[\"month\"], self.data[\"day\"], self.data[\"hour\"])]\n for key in [\"year\", \"doy\", \"month\", \"day\", \"hour\"]:\n del self.data[key]",
"def build_time_feature_vector(self, time):\n return time",
"def bind(self):\n super(BoundedTime, self).bind()\n otc = self.on_trait_change\n otc(self._send_minimum, 'minimum')\n otc(self._send_maximum, 'maximum')\n otc(self._send_time, 'time')",
"def arr_time(self, arr_time):\n\n self._arr_time = arr_time",
"def add(self, time):\n\n self.elapsed_time = self.elapsed_time + time",
"def __iadd__(self, *args, **kwargs):\n return _uhd_swig.time_spec_t___iadd__(self, *args, **kwargs)",
"def add_time_point(self,time, mdv_instance):\n\n self.mdvtc[time] = mdv_instance",
"def __timeRestriction():\n restriction = {\"M\": [\"7:00\", \"9:30\"],\n \"A\": [\"16:00\", \"19:30\"]}\n return restriction",
"def addData(self, other, time, index):\n\n xoffset = index[0]*other.xdim\n yoffset = index[1]*other.ydim \n zoffset = index[2]*other.zdim\n \n self.data [ time-self.time_range[0], zoffset:zoffset+other.zdim, yoffset:yoffset+other.ydim, xoffset:xoffset+other.xdim] = other.data [:,:,:]",
"def addFinishTimeVar(self, order):\n\t\tvar = str(order.id) + \"-finish\"\n\t\tlastMachine = self.plant.machines[-1]\n\t\tself.problem.addVariable(var, range(order.deadline - self.endMargin,\n\t\t\torder.deadline + self.endMargin))\n\t\tself.problem.addConstraint(lambda x, y, yt: x == y + yt,\n\t\t\t[var, self.createEnterTimeVarName(order, lastMachine),\n\t\t\tself.createTimeAtMachineVarName(order, lastMachine)])",
"def _load_time(self):\n\n time_variables = ('time', 'Times', 'Itime', 'Itime2')\n got_time, missing_time = [], []\n for time in time_variables:\n # Since not all of the time_variables specified above are required, only try to load the data if they\n # exist. We'll raise an error if we don't find any of them though.\n if time in self.ds.variables:\n setattr(self.time, time, self.ds.variables[time][:])\n got_time.append(time)\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[time].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[time], attribute))\n setattr(self.atts, time, attributes)\n else:\n missing_time.append(time)\n\n if len(missing_time) == len(time_variables):\n warn('No time variables found in the netCDF.')\n else:\n if 'Times' in got_time:\n # Overwrite the existing Times array with a more sensibly shaped one.\n self.time.Times = np.asarray([''.join(t.astype(str)).strip() for t in self.time.Times])\n\n # Make whatever we got into datetime objects and use those to make everything else. Note: the `time' variable\n # is often the one with the lowest precision, so use the others preferentially over that.\n if 'Times' not in got_time:\n if 'time' in got_time:\n _dates = num2date(self.time.time, units=getattr(self.ds.variables['time'], 'units'))\n elif 'Itime' in got_time and 'Itime2' in got_time:\n _dates = num2date(self.time.Itime + self.time.Itime2 / 1000.0 / 60 / 60, units=getattr(self.ds.variables['Itime'], 'units'))\n try:\n self.time.Times = np.array([datetime.strftime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in _dates])\n except ValueError:\n self.time.Times = np.array([datetime.strftime(d, '%Y/%m/%d %H:%M:%S.%f') for d in _dates])\n # Add the relevant attribute for the Times variable.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Times', attributes)\n\n if 'time' not in got_time:\n if 'Times' in got_time:\n try:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y-%m-%dT%H:%M:%S.%f') for t in self.time.Times])\n except ValueError:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y/%m/%d %H:%M:%S.%f') for t in self.time.Times])\n elif 'Itime' in got_time and 'Itime2' in got_time:\n _dates = num2date(self.time.Itime + self.time.Itime2 / 1000.0 / 60 / 60, units=getattr(self.ds.variables['Itime'], 'units'))\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n self.time.time = date2num(_dates, units='days since 1858-11-17 00:00:00')\n # Add the relevant attributes for the time variable.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'days since 1858-11-17 00:00:00')\n setattr(attributes, 'long_name', 'time')\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'time', attributes)\n\n if 'Itime' not in got_time and 'Itime2' not in got_time:\n if 'Times' in got_time:\n try:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y-%m-%dT%H:%M:%S.%f') for t in self.time.Times])\n except ValueError:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y/%m/%d %H:%M:%S.%f') for t in self.time.Times])\n elif 'time' in got_time:\n _dates = num2date(self.time.time, units=getattr(self.ds.variables['time'], 'units'))\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n _datenum = date2num(_dates, units='days since 1858-11-17 00:00:00')\n self.time.Itime = np.floor(_datenum)\n self.time.Itime2 = (_datenum - np.floor(_datenum)) * 1000 * 60 * 60 # microseconds since midnight\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'days since 1858-11-17 00:00:00')\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Itime', attributes)\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'msec since 00:00:00')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Itime2', attributes)\n\n # Additional nice-to-have time representations.\n if 'Times' in got_time:\n try:\n self.time.datetime = np.array([datetime.strptime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in self.time.Times])\n except ValueError:\n self.time.datetime = np.array([datetime.strptime(d, '%Y/%m/%d %H:%M:%S.%f') for d in self.time.Times])\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'long_name', 'Python datetime.datetime')\n setattr(self.atts, 'datetime', attributes)\n else:\n self.time.datetime = _dates\n self.time.matlabtime = self.time.time + 678942.0 # convert to MATLAB-indexed times from Modified Julian Date.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'long_name', 'MATLAB datenum')\n setattr(self.atts, 'matlabtime', attributes)\n\n # Clip everything to the time indices if we've been given them. Update the time dimension too.\n if 'time' in self._dims:\n if all([isinstance(i, (datetime, str)) for i in self._dims['time']]):\n # Convert datetime dimensions to indices in the currently loaded data.\n self._dims['time'][0] = self.time_to_index(self._dims['time'][0])\n self._dims['time'][1] = self.time_to_index(self._dims['time'][1]) + 1 # make the indexing inclusive\n for time in self.obj_iter(self.time):\n setattr(self.time, time, getattr(self.time, time)[self._dims['time'][0]:self._dims['time'][1]])\n self.dims.time = len(self.time.time)",
"def change_time_units(var):\n century18 = dt.datetime(1800,1,1,0)\n #for i,j in enumerate(var[:]):\n # date = dt.datetime.utcfromtimestamp(j)\n # seconds = (date - century18).total_seconds()\n # hours = int( seconds / 60 / 60 )\n # var[i] = hours\n def change_unit(date):\n date = dt.datetime.utcfromtimestamp(date)\n seconds = (date - century18).total_seconds()\n hours = int( seconds / 60 / 60 )\n return hours\n\n vfunc = np.vectorize(change_unit)\n new_data = vfunc(var[:])\n var[:] = new_data\n setattr(var, 'standard_name', \"time\")\n setattr(var, 'long_name', \"time\")\n setattr(var, \"units\",\"hours since 1800-01-01 00:00:00.0\")\n setattr(var, \"calendar\", \"proleptic_gregorian\")\n return var",
"def add_time(data, t):\n data['year'] = t.year\n data['month'] = t.month\n data['day'] = t.day\n data['hour'] = t.hour\n data['minute'] = t.minute\n data['second'] = t.second"
] | [
"0.6587515",
"0.6190563",
"0.5912615",
"0.587993",
"0.58253014",
"0.56894726",
"0.56684715",
"0.56504494",
"0.5623413",
"0.5619893",
"0.55306077",
"0.55090916",
"0.5483374",
"0.5463484",
"0.5454657",
"0.5432568",
"0.5394258",
"0.53902286",
"0.53509086",
"0.5346871",
"0.5344033",
"0.5341192",
"0.5338431",
"0.5336991",
"0.5324031",
"0.5310452",
"0.5307989",
"0.5297769",
"0.5292309",
"0.52859557"
] | 0.7952806 | 0 |
Initialize some callbacks inline Use this constructor to provide credentials and certificate callbacks inline, instead of defining your own class for these ones. You can e.g. also pass in one of the credential objects as 'credentials' instead of creating a function which returns a hardcoded object. | def __init__(self, credentials=None, certificate=None):
if credentials is not None:
self.credentials = credentials
if certificate is not None:
self.certificate = certificate | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self, req, credentials_fn):\n self.req = req\n self.credentials_fn = credentials_fn",
"def __init__(self, cred=None, max_tries=5, callback=newcred):\n self.set_cred(cred)\n self.try_count = 1\n self.max_tries = max_tries\n self.callback = callback",
"def __init__(self,\r\n username=None, password=None,\r\n certChain=None, privateKey=None,\r\n checker=None,\r\n settings = None, \r\n anon = False):\r\n\r\n self.username = None\r\n self.password = None\r\n self.certChain = None\r\n self.privateKey = None\r\n self.checker = None\r\n self.anon = anon\r\n\r\n #SRP Authentication\r\n if username and password and not \\\r\n (certChain or privateKey):\r\n self.username = username\r\n self.password = password\r\n\r\n #Certificate Chain Authentication\r\n elif certChain and privateKey and not \\\r\n (username or password):\r\n self.certChain = certChain\r\n self.privateKey = privateKey\r\n\r\n #No Authentication\r\n elif not password and not username and not \\\r\n certChain and not privateKey:\r\n pass\r\n\r\n else:\r\n raise ValueError(\"Bad parameters\")\r\n\r\n self.checker = checker\r\n self.settings = settings\r\n\r\n self.tlsSession = None",
"def __init__(self, callback, *args, **kwargs):\n self.callback = lambda: callback(*args, **kwargs)",
"def __init__(__self__, *,\n cert: Optional[pulumi.Input[str]] = None,\n cname: Optional[pulumi.Input[str]] = None,\n key: Optional[pulumi.Input[str]] = None,\n status: Optional[pulumi.Input[Union[str, 'Status']]] = None):\n if cert is not None:\n pulumi.set(__self__, \"cert\", cert)\n if cname is not None:\n pulumi.set(__self__, \"cname\", cname)\n if key is not None:\n pulumi.set(__self__, \"key\", key)\n if status is None:\n status = 'Enabled'\n if status is not None:\n pulumi.set(__self__, \"status\", status)",
"def __init__(self):\n self.__client = Client(verify_ssl_cert=True)\n self.__headers = {'Content-Type': 'application/json'}\n self.login()",
"def __init__(self):\n try:\n context = ssl.create_default_context(\n purpose=ssl.Purpose.CLIENT_AUTH)\n context.options |= ssl.OP_NO_SSLv2\n context.options |= ssl.OP_NO_SSLv3\n context.options |= ssl.OP_NO_TLSv1\n context.options |= ssl.OP_NO_TLSv1_1\n context.options |= ssl.OP_NO_COMPRESSION\n context.verify_mode = ssl.CERT_REQUIRED\n # TODO do not use static configuration parameters\n context.load_verify_locations(cafile='/sbin/rpcsd/root.cert.pem')\n context.load_cert_chain(certfile='/sbin/rpcsd/gaps.pem')\n context.set_ciphers('AES128-SHA256')\n RPCS.context = context\n except FileNotFoundError:\n # If we can't set up TLS context, log error and exit\n LOG.error(\"Could not setup TLS context: certificate file(s) \"\n \"not present in the correct directory\")\n exit(1)",
"def __init__(self, proxy_callback=None, *args, **kwargs):\n self.proxy_callback = proxy_callback\n super(CASClientV2, self).__init__(*args, **kwargs)",
"def __init__(__self__, *,\n auth_type: pulumi.Input[str],\n certificate: pulumi.Input[str],\n client_id: pulumi.Input[str],\n principal_id: pulumi.Input[str]):\n pulumi.set(__self__, \"auth_type\", 'servicePrincipalCertificate')\n pulumi.set(__self__, \"certificate\", certificate)\n pulumi.set(__self__, \"client_id\", client_id)\n pulumi.set(__self__, \"principal_id\", principal_id)",
"def __init__(self, hostname, port, username, password, onUpdate\r\n\t\t\t\t,onConnect = lambda:None, onError = lambda:None):\r\n\t\tself.onConnect = onConnect\r\n\t\tself.onError = onError\r\n\t\tself.onUpdate = onUpdate\r\n\t\tfactory = pb.PBClientFactory()\r\n\t\treactor.connectTCP(hostname, port, factory)\r\n\t\td = factory.login(credentials.UsernamePassword(username, password)\r\n\t\t\t\t\t\t\t,self)\r\n\t\td.addCallback(self.OnConnect)\r\n\t\td.addErrback(self.OnError)",
"def __init__(self, username, password, **kwargs):\n self.url = 'https://heywatch.com'\n self.cli = httplib2.Http(**kwargs)\n self.cli.add_credentials(username, password)\n\n self.headers = {\n 'Accept': 'application/json',\n 'User-Agent': 'HeyWatch py/1.0.1',\n }\n\n self.account()",
"def __init__(__self__, *,\n client_certificate_config: Optional[pulumi.Input['ClientCertificateConfigArgs']] = None,\n cluster_ca_certificate: Optional[pulumi.Input[str]] = None,\n password: Optional[pulumi.Input[str]] = None,\n username: Optional[pulumi.Input[str]] = None):\n if client_certificate_config is not None:\n pulumi.set(__self__, \"client_certificate_config\", client_certificate_config)\n if cluster_ca_certificate is not None:\n pulumi.set(__self__, \"cluster_ca_certificate\", cluster_ca_certificate)\n if password is not None:\n pulumi.set(__self__, \"password\", password)\n if username is not None:\n pulumi.set(__self__, \"username\", username)",
"def __init__(__self__, *,\n certificate: Optional[pulumi.Input[str]] = None,\n certificate_id: Optional[pulumi.Input[str]] = None,\n certificate_name: Optional[pulumi.Input[str]] = None,\n domain: Optional[pulumi.Input[str]] = None,\n instance_id: Optional[pulumi.Input[str]] = None,\n private_key: Optional[pulumi.Input[str]] = None):\n if certificate is not None:\n pulumi.set(__self__, \"certificate\", certificate)\n if certificate_id is not None:\n pulumi.set(__self__, \"certificate_id\", certificate_id)\n if certificate_name is not None:\n pulumi.set(__self__, \"certificate_name\", certificate_name)\n if domain is not None:\n pulumi.set(__self__, \"domain\", domain)\n if instance_id is not None:\n pulumi.set(__self__, \"instance_id\", instance_id)\n if private_key is not None:\n pulumi.set(__self__, \"private_key\", private_key)",
"def __init__(__self__,\n resource_name: str,\n args: CertificateArgs,\n opts: Optional[pulumi.ResourceOptions] = None):\n ...",
"def __init__(self, use_datetime=0,\r\n username=None, password=None,\r\n certChain=None, privateKey=None,\r\n checker=None,\r\n settings=None,\r\n ignoreAbruptClose=False):\r\n\r\n # self._connection is new in python 2.7, since we're using it here,\r\n # we'll add this ourselves too, just in case we're pre-2.7\r\n self._connection = (None, None)\r\n xmlrpclib.Transport.__init__(self, use_datetime)\r\n self.ignoreAbruptClose = ignoreAbruptClose\r\n ClientHelper.__init__(self,\r\n username, password, \r\n certChain, privateKey,\r\n checker,\r\n settings)",
"def __init__(self, callback):\n if not callable(callback):\n raise TypeError(\"'callback' must be callable\")\n\n self._callback = callback\n self._blocked_items = {}\n self._blockers = {}",
"def __init__(self, **kw_args):\n self._isoFmt = \"%Y%m%dT%H%M%S%z\"\n\n self._init_client_id(kw_args)\n self._init_shared_secret(kw_args)\n self._init_counter_from_time(kw_args)\n self._init_last_count(kw_args)\n self._init_last_count_update_time(kw_args)\n self._init_period(kw_args)\n self._init_password_length(kw_args)\n self._init_tags(kw_args)\n self._init_note(kw_args)",
"def __init__(self, enterprise_cert_file_path):\n self._enterprise_cert_file_path = enterprise_cert_file_path\n self._cert = None\n self._sign_callback = None",
"def __init__(__self__, *,\n client_id: Optional[pulumi.Input[str]] = None,\n client_secret: Optional[pulumi.Input[str]] = None,\n consumer_id: Optional[pulumi.Input[str]] = None,\n hash_secret: Optional[pulumi.Input[bool]] = None,\n name: Optional[pulumi.Input[str]] = None,\n redirect_uris: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n tags: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if client_id is not None:\n pulumi.set(__self__, \"client_id\", client_id)\n if client_secret is not None:\n pulumi.set(__self__, \"client_secret\", client_secret)\n if consumer_id is not None:\n pulumi.set(__self__, \"consumer_id\", consumer_id)\n if hash_secret is not None:\n pulumi.set(__self__, \"hash_secret\", hash_secret)\n if name is not None:\n pulumi.set(__self__, \"name\", name)\n if redirect_uris is not None:\n pulumi.set(__self__, \"redirect_uris\", redirect_uris)\n if tags is not None:\n pulumi.set(__self__, \"tags\", tags)",
"def __init__(self, email: str, password: str, onMsg=None):\n\n self.email = email\n self.password = password\n self.authToken = mineauth.AuthenticationToken()\n self.connection = None\n self.is_connected = False\n self.onMsg = onMsg",
"def __init__(__self__, *,\n certificates: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n keys: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n secrets: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None,\n storage: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None):\n if certificates is not None:\n pulumi.set(__self__, \"certificates\", certificates)\n if keys is not None:\n pulumi.set(__self__, \"keys\", keys)\n if secrets is not None:\n pulumi.set(__self__, \"secrets\", secrets)\n if storage is not None:\n pulumi.set(__self__, \"storage\", storage)",
"def __init__(self, kwargs):\n if 'PoetEnclaveImplementation' in kwargs:\n enclave_module = kwargs['PoetEnclaveImplementation']\n else:\n enclave_module = 'sawtooth_validator.consensus.poet0.' \\\n 'poet_enclave_simulator' \\\n '.poet0_enclave_simulator'\n\n poet_enclave = importlib.import_module(enclave_module)\n poet_enclave.initialize(**kwargs)\n WaitCertificate.poet_enclave = poet_enclave\n WaitTimer.poet_enclave = poet_enclave",
"def __init__(self, client_auth_type, client_id, client_secret=None):\n self.client_auth_type = client_auth_type\n self.client_id = client_id\n self.client_secret = client_secret",
"def __init__(self, tls_1_2=None, tls_1_1=None, tls_1_0=None, ssl_3_0=None):\n self.tls_1_2 = tls_1_2\n self.tls_1_1 = tls_1_1\n self.tls_1_0 = tls_1_0\n self.ssl_3_0 = ssl_3_0",
"def __init__(self, client_name, aws_key_id, aws_access_secret, region_name, logger):\n self.aws_key_id = aws_key_id\n self.aws_access_secret = aws_access_secret\n self.region_name = region_name\n\n self.client_name = client_name\n self.logger = logger\n self.connected = False",
"def __init__(self, creds_file):\n self.creds_file = creds_file\n self.service = None\n self.creds = None\n self.courses = None\n self.scopes = None\n self.client_id = None\n self.client_secret = None\n self.hostname = None",
"def __init__(self, credentials):\r\n if not has_httplib2:\r\n raise ImportError(\"No module named httplib2\")\r\n super(GAPDecoratorAuthMethod, self).__init__()\r\n self._http = None\r\n self._credentials = credentials\r\n self._action_token = None",
"def __init__(self, auth_key, auth_secret):\n\n self._auth_key = auth_key\n self._auth_secret = auth_secret",
"def __init__(self):\n\n self._authorization = None\n self._last_used = datetime.utcnow() - timedelta(hours=10)\n\n self._resource_owner_key = None\n self._resource_owner_secret = None\n\n self._consumer_key = etrade_config.oauth_consumer_key\n self._consumer_secret = etrade_config.oath_consumer_secret\n\n self._auth_file_path = etrade_config.auth_file_path\n self._user_name = etrade_config.user_name\n self._user_pwd = etrade_config.user_pwd",
"def __init__(self, *args, **kwargs):\n super(CBCloudAPI, self).__init__(*args, **kwargs)\n self._thread_pool_count = kwargs.pop('thread_pool_count', 1)\n self._lr_scheduler = None\n self._async_executor = None\n\n if not self.credentials.org_key:\n raise CredentialError(\"No organization key specified\")"
] | [
"0.65539837",
"0.64415884",
"0.6256035",
"0.6152882",
"0.61327577",
"0.6122212",
"0.60726905",
"0.5963776",
"0.5948654",
"0.5945171",
"0.5938793",
"0.59084725",
"0.59006447",
"0.58869493",
"0.5884443",
"0.5865886",
"0.58515286",
"0.5847019",
"0.57956296",
"0.5794808",
"0.5778625",
"0.5771992",
"0.5771409",
"0.5727996",
"0.57230425",
"0.57214665",
"0.5721029",
"0.57157266",
"0.5712381",
"0.5707295"
] | 0.7311035 | 0 |
Certificate callback Override with your own function to determine whether the accept the server's certificate. | def certificate_check(self, certificate, valid, host):
raise Passthrough | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def verify_server_certificate(self):\n return self._verify_server_certificate",
"def verify_server_certificate(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"verify_server_certificate\")",
"def verify_server_certificate(self) -> bool:\n return pulumi.get(self, \"verify_server_certificate\")",
"def verify_SSL_certificate(self, code: str) -> bool:\n return True",
"def negotiate_client_certificate(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"negotiate_client_certificate\")",
"def _check_ca_certificate(self):\n if not os.path.exists(self._ca_certificate_path):\n with open(self._ca_certificate_path, \"w\") as f:\n f.write(ssl.get_server_certificate((\"127.0.0.1\", self._app_port), ssl_version=ssl.PROTOCOL_TLSv1_2))",
"def verify_server_certificate(self, verify_server_certificate):\n\n self._verify_server_certificate = verify_server_certificate",
"def issue_client_certificate(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"issue_client_certificate\")",
"def test_cert(self):\n\n try:\n client = SSLClient(host=FQDN, ip=APPLIANCE, usercert=CERT, sslverifyhost=True, cabundle=CABUNDLE)\n self.assertTrue(1==1, \"SSLClient connects with cabundle\")\n except Exception as exception:\n print(exception)\n self.fail(\"SSLClient did not connect\")\n \n response = client.send_command('LIST')\n self.assertEqual(response.ret, 100)\n\n client.disconnect()",
"def _check_authz_certificate_cb(self, key, value, authz_list_key=None, authz_list=None):\n # FIXME: should this include certificate exchange?\n _log.debug(\"_check_authz_certificate_cb\"\n \"\\n\\tkey={}\"\n \"\\n\\tvalue={}\".format(key, value))\n if value:\n certstr = value[0]\n try:\n certx509 = self.node.runtime_credentials.certificate.truststore_transport.verify_certificate_str(certstr)\n except Exception as err:\n _log.error(\"Failed to verify the authorization servers certificate from storage, err={}\".format(err))\n raise\n if not \"authzserver\" in certificate.cert_CN(certstr):\n _log.error(\"The runtime IS NOT certified by the CA as an authorization server, let's try another one.\")\n self._register_node_cb(key=authz_list_key, value=authz_list)\n else:\n _log.info(\"The runtime IS certified by the CA as an authorization server\")\n self.register_node_external()",
"def xforwardedforclientcertclientverifyenabled(self) -> bool:\n return pulumi.get(self, \"xforwardedforclientcertclientverifyenabled\")",
"def x_forwarded_for_client_cert_client_verify_enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"x_forwarded_for_client_cert_client_verify_enabled\")",
"def _validate_cert(self):\r\n cert = self.handle.getpeercert()\r\n self.peercert = cert\r\n if 'subject' not in cert:\r\n raise TTransportException(type=TTransportException.NOT_OPEN,\r\n message='No SSL certificate found from %s:%s' % (self.host, self.port))\r\n fields = cert['subject']\r\n for field in fields:\r\n # ensure structure we get back is what we expect\r\n if not isinstance(field, tuple):\r\n continue\r\n cert_pair = field[0]\r\n if len(cert_pair) < 2:\r\n continue\r\n cert_key, cert_value = cert_pair[0:2]\r\n if cert_key != 'commonName':\r\n continue\r\n certhost = cert_value\r\n if certhost == self.host:\r\n # success, cert commonName matches desired hostname\r\n self.is_valid = True\r\n return \r\n else:\r\n raise TTransportException(type=TTransportException.UNKNOWN,\r\n message='Host name we connected to \"%s\" doesn\\'t match certificate provided commonName \"%s\"' % (self.host, certhost))\r\n raise TTransportException(type=TTransportException.UNKNOWN,\r\n message='Could not validate SSL certificate from host \"%s\". Cert=%s' % (self.host, cert))",
"def test_x509_in_verify_works(self):\n serverContext = Context(SSLv23_METHOD)\n serverContext.use_privatekey(\n load_privatekey(FILETYPE_PEM, root_key_pem)\n )\n serverContext.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )\n serverConnection = Connection(serverContext, None)\n\n def verify_cb_get_subject(conn, cert, errnum, depth, ok):\n assert cert.get_subject()\n return 1\n\n clientContext = Context(SSLv23_METHOD)\n clientContext.set_verify(VERIFY_PEER, verify_cb_get_subject)\n clientConnection = Connection(clientContext, None)\n clientConnection.set_connect_state()\n\n handshake_in_memory(clientConnection, serverConnection)",
"def verify_cert(public_key, cert):\n try:\n public_key.verify(\n signature=cert.signature,\n data=cert.tbs_certificate_bytes,\n signature_algorithm=ec.ECDSA(cert.signature_hash_algorithm)\n )\n except:\n return 'failure'\n\n return 'success'",
"def _server_select_certificate(self, settings, client_hello,\n cipher_suites, cert_chain,\n private_key, version):\n\n last_cert = False\n possible_certs = []\n\n # Get client groups\n client_groups = client_hello. \\\n getExtension(ExtensionType.supported_groups)\n if client_groups is not None:\n client_groups = client_groups.groups\n\n # If client did send signature_algorithms_cert use it,\n # otherwise fallback to signature_algorithms.\n # Client can also decide not to send sigalg extension\n client_sigalgs = \\\n client_hello. \\\n getExtension(ExtensionType.signature_algorithms_cert)\n if client_sigalgs is not None:\n client_sigalgs = \\\n client_hello. \\\n getExtension(ExtensionType.signature_algorithms_cert). \\\n sigalgs\n else:\n client_sigalgs = \\\n client_hello. \\\n getExtension(ExtensionType.signature_algorithms)\n if client_sigalgs is not None:\n client_sigalgs = \\\n client_hello. \\\n getExtension(ExtensionType.signature_algorithms). \\\n sigalgs\n else:\n client_sigalgs = []\n\n # Get all the certificates we can offer\n alt_certs = ((X509CertChain(i.certificates), i.key) for vh in\n settings.virtual_hosts for i in vh.keys)\n certs = [(cert, key)\n for cert, key in chain([(cert_chain, private_key)], alt_certs)]\n\n for cert, key in certs:\n\n # Check if this is the last (cert, key) pair we have to check\n if (cert, key) == certs[-1]:\n last_cert = True\n\n # Mandatory checks. If any one of these checks fail, the certificate\n # is not usuable.\n try:\n # Find a suitable ciphersuite based on the certificate\n ciphers = CipherSuite.filter_for_certificate(cipher_suites, cert)\n for cipher in ciphers:\n if cipher in client_hello.cipher_suites:\n break\n else:\n if client_groups and \\\n any(i in range(256, 512) for i in client_groups) and \\\n any(i in CipherSuite.dhAllSuites\n for i in client_hello.cipher_suites):\n raise TLSInsufficientSecurity(\n \"FFDHE groups not acceptable and no other common \"\n \"ciphers\")\n raise TLSHandshakeFailure(\"No mutual ciphersuite\")\n\n # Find a signature algorithm based on the certificate\n try:\n sig_scheme, _, _ = \\\n self._pickServerKeyExchangeSig(settings,\n client_hello,\n cert,\n key,\n version,\n False)\n except TLSHandshakeFailure:\n raise TLSHandshakeFailure(\n \"No common signature algorithms\")\n\n # If the certificate is ECDSA, we must check curve compatibility\n if cert and cert.x509List[0].certAlg == 'ecdsa' and \\\n client_groups and client_sigalgs:\n public_key = cert.getEndEntityPublicKey()\n curve = public_key.curve_name\n for name, aliases in CURVE_ALIASES.items():\n if curve in aliases:\n curve = getattr(GroupName, name)\n break\n\n if version <= (3, 3) and curve not in client_groups:\n raise TLSHandshakeFailure(\n \"The curve in the public key is not \"\n \"supported by the client: {0}\" \\\n .format(GroupName.toRepr(curve)))\n\n if version >= (3, 4):\n if GroupName.toRepr(curve) not in \\\n ('secp256r1', 'secp384r1', 'secp521r1'):\n raise TLSIllegalParameterException(\n \"Curve in public key is not supported \"\n \"in TLS1.3\")\n\n # If all mandatory checks passed add\n # this as possible certificate we can use.\n possible_certs.append((cipher, sig_scheme, cert, key))\n\n except Exception:\n if last_cert and not possible_certs:\n raise\n continue\n\n # Non-mandatory checks, if these fail the certificate is still usable\n # but we should try to find one that passes all the checks\n\n # Check if every certificate(except the self-signed root CA)\n # in the certificate chain is signed with a signature algorithm\n # supported by the client.\n if cert:\n cert_chain_ok = True\n for i in range(len(cert.x509List)):\n if cert.x509List[i].issuer != cert.x509List[i].subject:\n if cert.x509List[i].sigalg not in client_sigalgs:\n cert_chain_ok = False\n break\n if not cert_chain_ok:\n if not last_cert:\n continue\n break\n\n # If all mandatory and non-mandatory checks passed\n # return the (cert, key) pair, cipher and sig_scheme\n return cipher, sig_scheme, cert, key\n\n # If we can't find cert that passed all the checks, return the first usable one.\n return possible_certs[0]",
"def get_ssl_certificate() :",
"def client_certificate_enabled(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"client_certificate_enabled\")",
"def validateSSL(self):\n return self.__validate_ssl",
"def may_certify(self):\r\n return self.certificates_show_before_end or self.has_ended()",
"def get_ssl_certificate():",
"def test_use_certificate(self, ctx_or_conn):\n # TODO\n # Hard to assert anything. But we could set a privatekey then ask\n # OpenSSL if the cert and key agree using check_privatekey. Then as\n # long as check_privatekey works right we're good...\n ctx_or_conn.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )",
"def _self_signed(cert):\n\n self_signed = cert.self_signed\n\n if self_signed == 'yes':\n return True\n if self_signed == 'no':\n return False\n\n # In the case of \"maybe\", we have to check the signature\n signature_algo = cert['signature_algorithm'].signature_algo\n hash_algo = cert['signature_algorithm'].hash_algo\n\n if signature_algo == 'rsassa_pkcs1v15':\n verify_func = asymmetric.rsa_pkcs1v15_verify\n elif signature_algo == 'dsa':\n verify_func = asymmetric.dsa_verify\n elif signature_algo == 'ecdsa':\n verify_func = asymmetric.ecdsa_verify\n else:\n raise PathValidationError(pretty_message(\n '''\n Unable to verify the signature of the certificate since it uses\n the unsupported algorithm %s\n ''',\n signature_algo\n ))\n\n try:\n key_object = asymmetric.load_certificate(cert)\n verify_func(key_object, cert['signature_value'].native, cert['tbs_certificate'].dump(), hash_algo)\n return True\n\n except (oscrypto.errors.SignatureError):\n return False",
"def _verification_needed(cacert, insecure):\n if insecure is False or insecure is None:\n verify = cacert or True\n else:\n verify = False\n return verify",
"def client_certificate_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"client_certificate_enabled\")",
"def client_certificate_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"client_certificate_enabled\")",
"def watch_x509_context(\n self,\n on_success: Callable[[X509Context], None],\n on_error: Callable[[Exception], None],\n retry_connect: bool = True,\n ) -> CancelHandler:",
"def certificate_auth():\r\n url = 'https://www.12306.cn'\r\n response = requests.get(url, verify=False)\r\n print(response.status_code)\r\n print(response.text)",
"def test_set_verify_callback_connection_argument(self):\n serverContext = Context(SSLv23_METHOD)\n serverContext.use_privatekey(\n load_privatekey(FILETYPE_PEM, root_key_pem)\n )\n serverContext.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )\n serverConnection = Connection(serverContext, None)\n\n class VerifyCallback:\n def callback(self, connection, *args):\n self.connection = connection\n return 1\n\n verify = VerifyCallback()\n clientContext = Context(SSLv23_METHOD)\n clientContext.set_verify(VERIFY_PEER, verify.callback)\n clientConnection = Connection(clientContext, None)\n clientConnection.set_connect_state()\n\n handshake_in_memory(clientConnection, serverConnection)\n\n assert verify.connection is clientConnection",
"def certificate_verification(self) -> Optional[pulumi.Input[Union[str, 'GatewayCertificateVerification']]]:\n return pulumi.get(self, \"certificate_verification\")"
] | [
"0.69057816",
"0.6904875",
"0.6790211",
"0.646486",
"0.63657093",
"0.61513364",
"0.6096049",
"0.60317755",
"0.5979905",
"0.5940069",
"0.5871149",
"0.58208966",
"0.57788336",
"0.5753447",
"0.57468694",
"0.574129",
"0.570776",
"0.56400156",
"0.5629674",
"0.56151485",
"0.5603868",
"0.55951643",
"0.55630916",
"0.5552037",
"0.5539482",
"0.5539482",
"0.55330765",
"0.55263907",
"0.55236566",
"0.5517941"
] | 0.7030046 | 0 |
Transfer progress callback Override with your own function to report transfer progress. | def transfer_progress(self, stats): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def onTransferUpdate(self, api, transfer):\n logging.info('Transfer update ({} {});'\n ' Progress: {} KB of {} KB, {} KB/s'\n .format(transfer,\n transfer.getFileName(),\n transfer.getTransferredBytes() / 1024,\n transfer.getTotalBytes() / 1024,\n transfer.getSpeed() / 1024))",
"def doProgress(self,progress,message):\n pass",
"def reportProgress(self):\n \n pass",
"def copy_progress(self, percentage_complete, filecount, filecomplete):\n ##TODO: display the current transfer rate\n ##TODO: display the current file being transferred and possibly the progress thereof.\n ##Perhaps use the statusbar method for this\n self.progress.setValue(int(percentage_complete))",
"def progress(self, arg, num_done, info=''):\n pass",
"def registerProgressCallback(self, callback):\n assert False, \"Deriving class must implement\"",
"def progress_callback(self, func):\n self.curl.setopt(pycurl.PROGRESSFUNCTION, func)",
"def cb(complete,total):\n percent = int(complete * 100.0 / total)\n log.info(\"Download completion: {0}%\".format(percent))",
"def cb(self, complete, total):\n percent = int(complete * 100.0 / total)\n log.info(\"Upload completion: {0}%\".format(percent))",
"def notify_progress(self, progress_data):\n pass # pragma: no cover",
"def cb(complete, total):\n percent = int(complete * 100.0 / total)\n log.info(\"Download completion: {0}%\".format(percent))",
"def download_progress(self, cloud_file, size, downloaded):",
"def OnProgress(bytes_read, total_bytes, percent):\n sys.stdout.write(\"progress: %.2f%% \\r\" % (percent))\n sys.stdout.flush()",
"def upload_progress(self, cloud_file, size, uploaded):",
"def upload_add_progress(self, nbytes):\n\n self.send_cur_nbytes += nbytes\n if self.send_goal_nbytes != 0:\n self.republish_output()",
"def cb(self, complete, total):\n \"\"\"Swift client does not support callbak\"\"\"\n percent = int(complete * 100.0 / total)\n log.info(\"Upload completion: {0}%\".format(percent))",
"def getProgress(self):",
"def handle_put_progress(self, filegen):\n # print \"bytes so-far: \", filegen.bytes_read\n\n if self.maybe_touch():\n self.log(\"UPLOAD_PROGRESS\", level=INFO)\n self.touch()\n Backend.touch(\n self.current_upload,\n bytes_downloaded=filegen.bytes_read,\n location=self.location)",
"def _on_progress(self, num):\n self._num_progresses += num\n self._log.info(\"Progress incrementing by {}\".format(num))\n self._host_comms.send_msg(\"progress\", num)",
"def progress(self, loaded, total, msg=''):\n\n self.fire('progress', {\n 'loaded': loaded,\n 'total': total,\n 'msg': msg\n })",
"def gdal_progress_callback(complete, message, data):\n if data:\n data.update(int(complete * 100) - data.n)\n if complete == 1:\n data.close()\n return 1",
"def _progressCallback(progress):\n if isinstance(progress, str):\n _progressBar.Start(progress)\n _progressBar._t0 = time.time()\n elif progress is None:\n dt = time.time() - _progressBar._t0\n _progressBar.Finish(f'{dt:2.2f} seconds')\n else:\n _progressBar.Update(progress)",
"def push_progress(self, status, object_id, progress):\n pass",
"def report_step_progress(self, step):\n pass",
"def set_progress(self, progress: float):",
"def ffmpeg_progress_hook(self, progress: int) -> None:\n\n if self.parent.simple_tui and not self.parent.web_ui:\n self.progress = 50\n else:\n self.progress = 50 + int(progress * 0.45)\n\n self.update(\"Converting\")",
"def progress(self, progress):\n\n self._progress = progress",
"def progress(self, progress):\n\n self._progress = progress",
"def _progress(self, walker):\n\n raise NotImplementedError",
"def onTransferFinish(self, api, transfer, error):\n logging.info('Transfer finished ({}); Result: {}'\n .format(transfer, transfer.getFileName(), error))\n self.continue_event.set()"
] | [
"0.7220874",
"0.7112329",
"0.69391227",
"0.69087076",
"0.68912673",
"0.6877024",
"0.6770549",
"0.67233896",
"0.67167825",
"0.6671872",
"0.66329044",
"0.6622333",
"0.66127753",
"0.65370864",
"0.65070117",
"0.64977056",
"0.6437404",
"0.6410456",
"0.63769513",
"0.6359364",
"0.63344514",
"0.6331245",
"0.6312691",
"0.6273992",
"0.62730056",
"0.6272455",
"0.6244322",
"0.6244322",
"0.62078965",
"0.61415625"
] | 0.7890586 | 0 |
Update tips callabck Override with your own function to report reference updates | def update_tips(self, refname, old, new): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __editShowCallTips(self):\n self.activeWindow().callTip()",
"def update_tips(self, usageText):\r\n try:\r\n self.widgetList[13].config(text=usageText, justify=LEFT)\r\n except TclError:\r\n kT.debug_log(\"Changed menu\", sys.exc_info()[2])\r\n return",
"def help_update(self):\n print(UPDATE)",
"def call_toolTips(self):\n self.choose_fold_button.setToolTip(\"Select folder containing the specific test TCReader log files\")\n self.get_test_presets.setToolTip(\"Select the test set points (temperatures that you want statistics for)\")\n self.selectdata_label.setToolTip(\"In this section select the folder with the .csv log files and name the \"\n \"units and TCs you want to analyze.\\n \"\n \"The tool will concatenate all files with the same unit names, so make sure \"\n \"to place only files that you require to analyze.\")\n self.date_sort_checkbox.setToolTip(\"Sort csv log files in respect to time\")\n self.temp_range.setToolTip(\"Temperature range around the set points to be analyzed\")\n self.mean_stdev.setToolTip(\"Add mean and standard deviation of each set point to output\")\n self.max_min.setToolTip(\"Add max and minimum of each set point to output\")\n self.analyze_button.setToolTip(\"Perform analysis and output it to test folder \")",
"def calculate_tip(meal_base, tip_rate):",
"def addToolTip(self, tip: str):\n self.setToolTip(tip)\n self.setFont(qtawesome.font('fa', 13))\n self.setText(self.text() + ' ' + chr(0xf059))",
"def update_tip_pose(self):\n world_pose_eef = get_link_pose(self.body, self.EEF_LINK_INDEX)\n wTe = get_matrix_from_pose_2d(world_pose_eef) # world_T_eef\n world_pose_eef = get_link_pose(self.body, self.TIP_LINK_INDEX)\n wTt = get_matrix_from_pose_2d(world_pose_eef) # world_T_tip\n self.eTt = np.matmul(np.linalg.inv(wTe), wTt)\n self.tTe = np.linalg.inv(self.eTt)",
"def addManualTip(self,A):\n #obsolete?\n profbox()\n self.fiducialNode = slicer.mrmlScene.CreateNodeByClass('vtkMRMLAnnotationFiducialNode')\n self.fiducialNode.Initialize(slicer.mrmlScene)\n self.fiducialNode.SetName('tip')\n self.fiducialNode.SetFiducialCoordinates(A)\n fd=self.fiducialNode.GetDisplayNode()\n fd.SetVisibility(1)\n fd.SetColor([0,1,0])",
"def CallTipShow(self, position, tip):\n self.CallTipCancel()\n super(EditraBaseStc, self).CallTipShow(position, tip)",
"def __UseTip(self, sector, chosenTip):\n if (sector, chosenTip) != self.__m_Platform.CurrentTipID():\n self.__m_Platform.StripTip()\n self.__m_Platform.PickupTip(sector, chosenTip)",
"def update_info(self):\n self.m_canvas.master.m_informations_displayer.set_operations(\n self.m_current_index\n )\n self.m_canvas.master.m_informations_displayer.set_time(\n self.m_history[self.m_current_index].m_passed_time\n )",
"def help(update, context):\n update.message.reply_text('Help! \\n /traccia per tracciare instantaneamente i prezzi \\n /check per far partire il check periodico \\n /stopcheck per far fermare il check periodico')",
"def getTip(self):\n return None",
"def UpdateLabel(self) -> _n_6_t_0:",
"def tips(bot, update):\n messageContent = random.choice(TIPS)\n bot.sendMessage(chat_id=update.message.chat_id, text=messageContent, parse_mode='markdown')",
"def addManualTip(self, A):\r\n # obsolete?\r\n profbox()\r\n self.fiducialNode = slicer.mrmlScene.CreateNodeByClass('vtkMRMLAnnotationFiducialNode')\r\n self.fiducialNode.Initialize(slicer.mrmlScene)\r\n self.fiducialNode.SetName('tip')\r\n self.fiducialNode.SetFiducialCoordinates(A)\r\n fd = self.fiducialNode.GetDisplayNode()\r\n fd.SetVisibility(1)\r\n fd.SetColor([0, 1, 0])",
"def update(self):",
"def update(self):",
"def update(self):",
"def _update_(self):\n self._update_distance_()\n self._check_literature_name_()",
"def update():",
"def update():",
"def getCallTip(self, command='', *args, **kwds):\n return ('', '', '')",
"def update( ):\r\n pass",
"async def _notes(self, ctx: Context):\n pass",
"def OnUpdatePlantCtrl(self, _):\n self.saveTexts()\n self.updateMarkers()",
"def ref_updated(self, event):\n pass",
"def update(self, *args, **kwargs):",
"def updatehelp(inp):\n funcs = sorted(\n {v for k, v in core.COMMANDS.items()}, key=lambda x: x.__name__)\n core.stats_wiki('jarvis').edit(\n utils.load_template('help.template', funcs=funcs))\n return lex.updatehelp",
"def tweaks(self) -> None:\n pass"
] | [
"0.6555432",
"0.64051604",
"0.5982053",
"0.58614695",
"0.5771405",
"0.570853",
"0.5652499",
"0.5582469",
"0.5555294",
"0.5554527",
"0.55478805",
"0.5545224",
"0.5434774",
"0.5425506",
"0.5390354",
"0.5354941",
"0.53439796",
"0.53439796",
"0.53439796",
"0.5318367",
"0.5292375",
"0.5292375",
"0.5290694",
"0.5259405",
"0.52286154",
"0.5206964",
"0.5197539",
"0.5164759",
"0.51621556",
"0.5152053"
] | 0.8590424 | 0 |
Push update reference callback Override with your own function to report the remote's acceptace or rejection of reference updates. | def push_update_reference(self, refname, message): | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ref_updated(self, event):\n pass",
"def _notify_update(self, cuds_object):",
"def notify(self, ref_output=None, moves_made=None):\n pass",
"def update_callback(app):\n print(\"Update callback invoked for %s\" % app.name)\n\n # TODO: Add integration tests here\n return True",
"def onUpdated(self):",
"def callback(self, obj):\r\n assert self.__obj is None, 'Only one object can be registered.'\r\n assert isinstance(obj, RemoteReference)\r\n\r\n # Store the remote reference\r\n self.__obj = obj\r\n\r\n # inform when the remote reference is disconnected using __disconnected\r\n obj.notifyOnDisconnect(self.__disconnected)\r\n\r\n # Call all remaining remote calls made before the remote reference\r\n # arrived\r\n for pending in self.__pending:\r\n pending.callback(obj)\r\n\r\n self.__pending = None",
"async def _on_ref_change(self, _change=None):\n self._update_heads()\n self._update_head_history()\n for remote in self.remotes.values():\n await remote._update_heads()",
"def after_update(self, *args):\n raise NotImplementedError",
"def MyDataChangedCallback(self, inRefcon):\r\n pass",
"def svn_changelist_invoke_receiver(svn_changelist_receiver_t__obj, void_baton, char_path, char_changelist, apr_pool_t_pool): # real signature unknown; restored from __doc__\n pass",
"def svn_notify(self,event):\n # pysvn.wc_notify_action.update_completed\n if event['action'] == pysvn.wc_notify_action.update_completed:\n revision = event['revision']\n self.revision = revision",
"def _subscribe_update_callback(self, client, userdata, message):\n logger.info('Message recieved from {} topic'.format(message.topic))\n payload = message.payload\n try:\n payload_dict = json.loads(payload)\n light_data = payload_dict['current']['state']['desired']\n if self.light.needs_updating(light_data):\n self.light.update_lights(light_data)\n reported_payload = {\n 'state': {\n 'reported': self.light.current_settings()\n }\n }\n JSON_payload = json.dumps(reported_payload)\n self.shadowClient.publish(update_topic, JSON_payload, 0)\n except ValueError:\n logger.error('Value error')\n logger.info(payload)\n except Exception as e:\n logger.error(e.message)",
"def notifyChange(self, uri, observer, syncToNetwork=None, flags=None):\n pass",
"def set_update_received_callback(self, callback):\n self.__update_received = callback",
"def on_update(self):\n raise NotImplemented(\"on_update method should be implemented.\")",
"def update(self, *args, **kwargs):\n return self.callback_func(*args, **kwargs)",
"def dispatch_push(self, p, tweaks, badge):\n pass",
"def XPLMDataChanged_f(inRefcon):",
"def update(self, *args, **kwargs):",
"def on_call_update(self, event):\n # if plivo_app != 'true', check b leg Dial callback\n plivo_app_flag = event['variable_plivo_app'] == 'true'\n if not plivo_app_flag:\n # request Dial callbackUrl if needed\n aleg_uuid = event['Bridged-To']\n if not aleg_uuid:\n return\n bleg_uuid = event['Unique-ID']\n if not bleg_uuid:\n return\n disposition = event['variable_endpoint_disposition']\n if disposition != 'ANSWER':\n return\n ck_url = event['variable_plivo_dial_callback_url']\n if not ck_url:\n return\n ck_method = event['variable_plivo_dial_callback_method']\n if not ck_method:\n return\n params = {'DialBLegUUID': bleg_uuid,\n 'DialALegUUID': aleg_uuid,\n 'DialBLegStatus': 'answer',\n 'CallUUID': aleg_uuid\n }\n # add extra params\n extra_params = self.get_extra_fs_vars(event)\n if extra_params:\n params.update(extra_params)\n spawn_raw(self.send_to_url, ck_url, params, ck_method)\n return",
"def async_update(self):",
"def on_notify(self, name):\r\n pass",
"def handle_update(self, call):\n self.fire_event(EVENT_UPDATE)",
"def test_check_update_calls_callback_when_update_available():\n with requests_mock.mock() as mocked_server:\n mocked_server.post(_base_url + _check_update_endpoint, text=_json_update_response, status_code=200)\n\n request = UpdateDetailRequest('v1', 'MyDevice', '{\"AnyCustomData\":\"any_value\"}')\n update_helper = UpdateCheckHelper(_api_key, _base_url)\n\n update_helper.check_update(request, update_available_callback)",
"def error_cb(update, context):\n config.logger.warning('Update \"%s\" caused error \"%s\"', update, context.error)",
"def _update_references(self, oldref, newref, key_in_ref):\n keys = self._backreference_keys(oldref, key_in_ref)\n assert(keys is not None)\n self.__update_field_references(oldref, newref,\n list(set(self.__class__.REFERENCE_FIELDS)\n .intersection(keys)))\n if hasattr(self, \"_refs\"):\n # note: keeping the two types of nonfield references separate helps\n # in subclasses where only one must be redefined\n self.__update_dependent_line_references(oldref, newref,\n set(self.__class__.DEPENDENT_LINES)\n .intersection(self._refs.keys())\n .intersection(keys))\n self.__update_other_references(oldref, newref,\n list(set(self.__class__.OTHER_REFERENCES)\n .intersection(self._refs.keys())\n .intersection(keys)))",
"def update(self, *args, **kwargs):\n # callable, but does nothing by default",
"def remote_push(self, pNamespace):",
"def update(self, *args, **kwargs): # real signature unknown\n pass",
"def update(self, *args, **kwargs): # real signature unknown\n pass"
] | [
"0.700554",
"0.586919",
"0.58252454",
"0.55110234",
"0.5484074",
"0.5471983",
"0.5470453",
"0.5423666",
"0.5410064",
"0.53476197",
"0.53475934",
"0.5346236",
"0.5307683",
"0.528483",
"0.5284799",
"0.52797055",
"0.5253185",
"0.52331346",
"0.5220044",
"0.5199602",
"0.5196633",
"0.5183001",
"0.5148045",
"0.5132754",
"0.5131772",
"0.51313496",
"0.5128562",
"0.512539",
"0.51236415",
"0.51236415"
] | 0.7560829 | 0 |
Name of the remote | def name(self):
return maybe_string(C.git_remote_name(self._remote)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remote_hostname(self):\n return pn_connection_remote_hostname(self._impl)",
"def get_remote_name(self, file_path: str, remote_name: str):\n if remote_name is None:\n remote_name = os.path.basename(file_path)\n return remote_name",
"def getRemoteHost():",
"def server_name(self) -> str:\n return pulumi.get(self, \"server_name\")",
"def get_hostname(self):\n return self.name",
"def name(self) -> str:\n return self._alias or f\"Nut-{self._host}\"",
"def via_host_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"via_host_name\")",
"def get_current_remote_name(self) -> str:\n match = self.status()\n\n if match.branch_upstream is None: # no upstream set\n if match.branch_head is None:\n raise Exception(\"No branch found for git repository\")\n return match.branch_head\n if match.branch_head is None:\n return match.branch_upstream\n\n return match.branch_upstream.replace(\"/\" + match.branch_head, \"\")",
"def name(self) -> str:\n return f\"{self._inst} NAT {self._data['name']}\"",
"def server_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"server_name\")",
"def server_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"server_name\")",
"def __str__(self):\n return self.host_name",
"def name(self):\n return \"myhomeserver1_\" + self._light_id",
"def get_name(self):\n \n return 'TCP/IP Server'",
"def get_name(self):\n \n return 'UDP/IP Server'",
"def host_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"host_name\")",
"def host_name(self):\n return self._host_name",
"def get_server_name(self):\n configured_value = self.charm_config[\"server-name\"]\n if configured_value:\n return configured_value\n else:\n fqdn = socket.getfqdn()\n return fqdn",
"def get_host_name(self):\n return self.get_command_output(\"hostname\").strip(\"\\n\")",
"def name(self):\n # self._name = \"wyzeapi_\"+self._device_mac+\"_\"+ self._name\n return self._device.nickname",
"def name(self):\n return \"{} {}\".format(self._clientname, self._name)",
"def host_name(self) -> str:\n return self._values.get('host_name')",
"def server_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"server_name\")",
"def server_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"server_name\")",
"def _repr_remote(self):\n return \"%s:%d\" % (self.remote_address)",
"def hostname(self):\n return 'localhost'",
"def name(self) -> str:\n return 'oltp'",
"def get_name(self):\n \n return 'Socket/IP'",
"def peername(self):\n return self.socket_.getpeername()",
"def github_name(self):\n return self.github_url.replace(\"https://github.com/\", '')"
] | [
"0.7341725",
"0.7134672",
"0.7032486",
"0.696617",
"0.693461",
"0.6897008",
"0.68189836",
"0.67615247",
"0.6744413",
"0.67394644",
"0.67394644",
"0.6712816",
"0.67052776",
"0.6688957",
"0.66647696",
"0.65945596",
"0.65823656",
"0.6563257",
"0.65593636",
"0.6494698",
"0.6481669",
"0.646157",
"0.6426272",
"0.6426272",
"0.6408237",
"0.64027005",
"0.64020187",
"0.640117",
"0.63821846",
"0.6367727"
] | 0.7960541 | 0 |
Url of the remote | def url(self):
return maybe_string(C.git_remote_url(self._remote)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remote_url(self) -> str:\n return f\"https://api.figma.com/v1/files/{self.file_id}\"",
"def url(self):\n url = self.url\n return url",
"def getRemoteUrl(self):\n # try getting the remote object by unique id\n remote_url = self._getRemoteUrlTheOldWay()\n remote_obj = self._getObjectByUid()\n if remote_obj:\n url = remote_obj.absolute_url()\n # update the url when changed (avoid unnecessary ZODB writes)\n if url != remote_url:\n self.edit(url)\n return url\n\n return remote_url",
"def get_url(self):\n return self.url",
"def get_url(self):\n return self.url",
"def geturl(self):\n return self.__url",
"def url(self) -> str:\n return pulumi.get(self, \"url\")",
"def url(self) -> str:\n return pulumi.get(self, \"url\")",
"def url(self):\n return self._client.url",
"def getUrl(self):\n return self.url",
"def get_url(self):\n return self.resource.url",
"def _getRemoteUrlTheOldWay(self):\n utool = getUtility(IURLTool)\n if self.remote_url:\n return utool() + '/' + self.remote_url\n else:\n return utool()",
"def get_url(self):\n\n return self.url",
"def get_url(self):\n\n return self.url",
"def url(self):\n return self._url",
"def url(self):\n return self._url",
"def url(self):\n return self._url",
"def url(self):\n return self._url",
"def url(self):\n return self._url",
"def url(self):\n return self._url",
"def url(self):\n return self._url",
"def url(self):\n return self._url",
"def url(self):\n return self._url",
"def url(self):\n return self._url",
"def git_remote_url(self):\n return self._git_remote_url",
"def _get_url(self):\n return 'http://{}:{}'.format(self.host, self.port)",
"def url(self):\n\n return self._url",
"def url(self):\n\n return self._url",
"def url(self):\n\n return self._url",
"def get_url(self):\n return self._url"
] | [
"0.7809164",
"0.7752618",
"0.7744077",
"0.76482004",
"0.76482004",
"0.76167583",
"0.7611277",
"0.7611277",
"0.76024985",
"0.7592173",
"0.7590239",
"0.7567472",
"0.75656253",
"0.75656253",
"0.7537656",
"0.7537656",
"0.7537656",
"0.7537656",
"0.7537656",
"0.7537656",
"0.7537656",
"0.7537656",
"0.7537656",
"0.7537656",
"0.752228",
"0.7496791",
"0.74561334",
"0.74561334",
"0.74561334",
"0.7434268"
] | 0.7905048 | 0 |
Push url of the remote | def push_url(self):
return maybe_string(C.git_remote_pushurl(self._remote)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_push_url(self, name, url):\n err = C.git_remote_set_pushurl(self._repo._repo, to_bytes(name), to_bytes(url))\n check_error(err)",
"def remote_push(self, pNamespace):",
"def push(self):\n origin = self.git_repo.remotes.origin\n origin.push()",
"def push(args):\n if args.type == 'ssh':\n cache = set(args.remote_cache).union(set(args.cache))\n for path in sorted(cache):\n if os.path.exists(os.path.join(args.base, path)) and not remote_exists(args.sftp, os.path.join(args.remote_base, path)):\n print('push: {}'.format(path))\n ensure_remote(args.sftp, os.path.dirname(os.path.join(args.remote_base, path)))\n args.sftp.put(\n os.path.join(args.base, path),\n os.path.join(args.remote_base, path)\n )\n args.remote_cache.append(path)\n args.remote_update = True\n elif args.type == 's3':\n raise NotImplementedError('s3:// remote type not yet supported!')\n elif args.type == 'gs':\n raise NotImplementedError('gs:// remote type not yet supported!')\n return",
"def _push_to_server(self) -> None:\n pass",
"def push():\n branch = git.current_branch().name\n shell.run('git push -u origin {}'.format(branch))",
"def push(self):\n out, err, code = self.command( [\"git\", \"push\"], self.directory )",
"def push(self, remote, branch, *args):\n return self.cmd('push', remote, branch, *args)",
"def push():\n local('hg push jvacx')",
"def git_remote_url(self, git_remote_url):\n self._git_remote_url = git_remote_url",
"def push(ctx):\n dufl_root = ctx.obj['dufl_root']\n git = Git(ctx.obj.get('git', '/usr/bin/git'), dufl_root)\n git.run('push', 'origin', git.working_branch())",
"def push(images, tag, registry):\n manager = Manager('push', tag, images=images, registry_url=registry)\n manager.run()",
"def push(self, *args, **kwargs):\n pass",
"def cmd_push_review(remote):\n return ['git', 'push', remote]",
"def push(self):\n if self.forward:\n git = self.repo.git\n try:\n git.push()\n self.forward = \"pushed\"\n except:\n self.forward = \"push error - \"+self.forward",
"def set_url(self, name, url):\n err = C.git_remote_set_url(self._repo._repo, to_bytes(name), to_bytes(url))\n check_error(err)",
"def infocalypse_push(ui_, repo, **opts):\n params, stored_cfg = get_config_info(ui_, opts)\n insert_uri = opts['uri']\n if insert_uri == '':\n insert_uri = stored_cfg.get_dir_insert_uri(repo.root)\n if not insert_uri:\n ui_.warn(\"There is no stored insert URI for this repo.\\n\"\n \"Please set one with the --uri option.\\n\")\n return\n\n set_target_version(ui_, repo, opts, params,\n \"Only pushing to version(s): %s\\n\")\n params['INSERT_URI'] = insert_uri\n #if opts['requesturi'] != '':\n # # DOESN'T search the insert uri index.\n # ui_.status((\"Copying from:\\n%s\\nTo:\\n%s\\n\\nThis is an \"\n # + \"advanced feature. \"\n # + \"I hope you know what you're doing.\\n\") %\n # (opts['requesturi'], insert_uri))\n # params['REQUEST_URI'] = opts['requesturi']\n\n execute_push(ui_, repo, params, stored_cfg)",
"def push(ref='origin/master'):\n from fabric.api import local, run, cd\n from fabric.contrib.project import rsync_project\n local('pelican -s %s -d' % env.config_file)\n rsync_project(\n remote_dir=env.host_site_path,\n local_dir='output/',\n delete=True\n )\n if env.host_type != 'production':\n run(\"chown -R %(user)s:%(host_webserver_user)s %(host_site_path)s \"\n \"&& chmod -R 02750 %(host_site_path)s\" % env)",
"def push(self):\n result = self.get_result_dict()\n headers = {\n 'Authorization': self.token\n }\n response = requests.post(\n url, json=json.dumps(result, indent=4), headers=headers\n )\n return response.json()",
"def pushUrl(self, url):\n if not self.checkVisitedUrl(url['url']):\n self.__logger.debug(\"new url:\" + url['url'])\n self.lock.acquire()\n self.__unvistedUrls.put(url)\n self.lock.release()\n return True\n return False",
"def push(self, path, source_url, api_key):\n options = [\"push\", path,\n \"-Verbosity\", \"detailed\" if self.debug else \"normal\"]\n if source_url:\n options += [\"-Source\", source_url]\n if api_key:\n options += [\"-ApiKey\", api_key]\n\n return self._run_nuget(options)",
"def push(self, obj):\r\n request = http.Request('POST', self.get_push_url(), obj)\r\n return request, parsers.parse_json",
"def remote():\n pass",
"def _push_to_server(self) -> None:\n if not self.url or not self.job_name:\n return\n\n try:\n pushadd_to_gateway(self.url, job=self.job_name, registry=REGISTRY, handler=self._auth_handler)\n\n except OSError as exp:\n self.logger.warning(\"Failed to push metrics to %s: %s\", self.url, str(exp))\n except:\n self.logger.exception(\"Failed to push metrics to %s\", self.url)\n\n self.logger.debug(\"Pushed metrics to %s\", self.url)",
"def __gitPush(self):\n self.vcs.gitPush(self.project.getProjectPath())",
"def push_rev(rev):\n env.push_rev = rev",
"def fetch(path):\n LOGGER.info('Post push request received, Updating %s', path)\n call(['cd \"' + path + '\" && git fetch'], shell=True)",
"def pub_tunnel(args, project=\"\", base_url=\"\", api_key=\"\"):\n project, base_url, api_key, updated = get_project_config(\n project=project, base_url=base_url, api_key=api_key)\n if updated:\n save_config()\n ssh_reverse_tunnel(args, base_url, api_key, prefix=project)",
"def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))",
"def push_to_remotes(self, repo: git.Repo, tag: str) -> None:\n if self._upstream_remotes:\n self._logger.info('Start pushing to remotes: %s.',\n self._upstream_remotes)\n else:\n self._logger.info('No push remote was specified')\n return\n for remote_name in self._upstream_remotes:\n remote = self.get_remote(repo, remote_name)\n if remote:\n self._logger.info('Push %s to %s', tag, remote)\n remote.push(str(tag))\n else:\n self._logger.error(\n 'Can\\'t find remote with name `%s`', remote_name)"
] | [
"0.7094837",
"0.6979935",
"0.68169296",
"0.67966676",
"0.66819257",
"0.6680206",
"0.66366327",
"0.6635456",
"0.6486271",
"0.6448664",
"0.64341855",
"0.62836725",
"0.625129",
"0.62458855",
"0.62370825",
"0.62095743",
"0.6193338",
"0.61739475",
"0.6118299",
"0.60442275",
"0.60290396",
"0.6026082",
"0.6013792",
"0.59750754",
"0.59745467",
"0.5945008",
"0.59372973",
"0.59140706",
"0.5897223",
"0.5893499"
] | 0.7172953 | 0 |
Save a remote to its repository's configuration. | def save(self):
err = C.git_remote_save(self._remote)
check_error(err) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remote_save(self, file_path=None, delete_local=False, remote_target=None):\n if not file_path:\n file_path = self.fname\n\n f = File()\n if not remote_target:\n remote_target = self.remote.get('target', None)\n LOG.info(\"Saving picture %s to %s\" % (file_path, remote_target))\n f.copy(file_path, remote_target, delete=delete_local)",
"def save_urls(self):\n config = self.get_github_config_path(self.CONFIG_URL)\n parser = configparser.RawConfigParser()\n try:\n parser.add_section(self.CONFIG_URL_SECTION)\n except configparser.DuplicateSectionError:\n pass\n parser.set(self.CONFIG_URL_SECTION, self.CONFIG_URL_LIST, self.urls)\n with open(config, 'w+') as config_file:\n parser.write(config_file)",
"def remote_set(location, repo, remote='origin'):\n ensure_dir(location)\n with utils.cd(location):\n if remote_exists(location, remote):\n cmd = '/usr/bin/git remote rm {}'.format(remote)\n subprocess.check_call(cmd, shell=True)\n\n cmd = '/usr/bin/git remote add {} {}'.format(remote, repo)\n subprocess.check_call(cmd, shell=True)",
"def remotes_add(flox: Flox, remote):\n # if flox.remotes.has(remote):\n # raise ConfigurationException(f\"Remote configuration '{remote}' already exists\")\n\n warning_box(\"Remote configuration sources are potentially dangerous, you should only add configuration \"\n \"from trusted sources\")\n if not click.confirm(click.style(f\"Would you still like to add {remote} as configuration source?\", fg=\"yellow\")):\n raise Abort\n\n config_type = \"local\"\n if remote.lower().startswith((\"http://\", \"https://\")):\n config_type = \"remote\"\n elif remote.lower().startswith(\"git\") or remote.endswith(\".git\"):\n config_type = \"git\"\n\n flox.remotes.set(remote, dict(\n type=config_type,\n hash=hashlib.sha256(remote.encode(\"UTF-8\")).hexdigest()\n ))\n\n fetch_remote(flox, remote)\n\n success_box(f\"Remote source '{remote}' has been added as a configuration source\")",
"def save(self) -> None:\n self._client.save_config()",
"def save():\n\n env.config.save(env.config_file)",
"def export_config_remote(handle, file_dir, file_name, hostname,\n protocol=\"scp\", username=None, password=\"\",\n preserve_pooled_values=False,\n remove_from_ucsc=False,\n timeout=600):\n _export_config(handle, file_dir=file_dir, file_name=file_name,\n remote_enabled=True,\n hostname=hostname, protocol=protocol,\n username=username, password=password,\n preserve_pooled_values=preserve_pooled_values,\n remove_from_ucsc=remove_from_ucsc,\n timeout=timeout)",
"def save_config(self):\n config.save_config(self.config, self.config_file)",
"def save_config(name, url):\n db = dbm.open(config_file, 'c')\n db[name] = url\n db.close()",
"def set_remote(\n self, name: str, url: str, push: bool = False, overwrite: bool = False\n ) -> GitRemote:\n\n url = self.chomp_protocol(url)\n\n if self.remote(name) and overwrite:\n self.cmd.remote.set_url(name=name, url=url, check_returncode=True)\n else:\n self.cmd.remote.add(name=name, url=url, check_returncode=True)\n\n remote = self.remote(name=name)\n if remote is None:\n raise Exception(\"Remote {name} not found after setting\")\n return remote",
"def change_config(self, repo):\n with repo.config_writer() as config:\n url = ('https://' + str(self.user.username) + ':' +\n str(self.get_user_token()) + '@github.com/' +\n str(self.user.username) + '/' + self.repo + '.git')\n config.set_value('remote \"origin\"', 'url', url)\n config.set_value('user', 'email', '[email protected]')\n config.set_value('user', 'name', 'Ranvir Singh')\n return config",
"def save(self):\n file = open(self.path, 'w')\n self.config.write(file)\n file.close()",
"def save(self):\n self.__config.sync()\n self.__saved = True\n Logger().debug(\"Configuration saved\")",
"def save(config, path=None):\n if path is None:\n path = settings.HOST_CONFIG_PATH\n\n with open(path, 'w') as output:\n output.write(yaml.safe_dump(config, default_flow_style=False))",
"def save_config(self, *args, **kwargs):\n raise NotImplementedError",
"def git_remote_url(self, git_remote_url):\n self._git_remote_url = git_remote_url",
"def save(self):\r\n with open(self.filename, 'wb') as configfile:\r\n self.write(configfile)",
"def save(self):\n Registry.SetKey(self.CONFIG_NAME, self.config, True)\n self.load() # for validation",
"def export_config_domain_remote(handle, file_dir, file_name,\n domain_ip, hostname, protocol,\n username=None, password=\"\",\n domain_name=None, preserve_pooled_values=False,\n timeout=600):\n backup_type = \"config-all\"\n return _backup_or_exportconfig_domain(handle, backup_type, file_dir,\n file_name, domain_ip, domain_name,\n hostname, preserve_pooled_values,\n protocol, username, password,\n timeout)",
"def saveConfig(self):\n newPath = self.newFolderPath.text()\n config.set(\"saveLocation\", str(newPath))\n config.save()\n self.reloadSettings()",
"def save(self, config_path):\n raise NotImplementedError()",
"def test_remote(self):\n\n self.assertEqual(description.RepositoryDescription(\n '[email protected]:/example/remote', '/path/to/local').remote,\n implementation.RemoteRepository(\n '[email protected]:/example/remote'))",
"def save_config(self):\n\n return self.perform_action('/mgmtd/db/save')",
"def save(self):\n self.network.save()",
"def saved_config(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"saved_config\"), kwargs)",
"def save_config(self, filename: str=None):\n if not filename:\n filename = self.config_file\n with open(filename, \"w\") as file_object:\n json.dump(self.config, file_object, indent=4, sort_keys=True)",
"def set_url(self, name, url):\n err = C.git_remote_set_url(self._repo._repo, to_bytes(name), to_bytes(url))\n check_error(err)",
"def save_config(**kwargs):\n if kwargs == {}:\n kwargs = config._config\n current_config = _load_config()\n current_config.update(**kwargs)\n # write to disk\n fname = _get_config_fname()\n if fname is None:\n raise RuntimeError('config filename could not be determined')\n if not op.isdir(op.dirname(fname)):\n os.mkdir(op.dirname(fname))\n with open(fname, 'w') as fid:\n json.dump(current_config, fid, sort_keys=True, indent=0)",
"def save(self):\n for p, c in self.configs_:\n c.write(p)",
"def set_remote(self, bRemote):\n\t\tcall_sdk_function('PrlVmDev_SetRemote', self.handle, bRemote)"
] | [
"0.6913942",
"0.6310363",
"0.6138643",
"0.6070944",
"0.5944239",
"0.59436154",
"0.5880474",
"0.58746165",
"0.5848079",
"0.5820438",
"0.57785755",
"0.57339436",
"0.56392133",
"0.56184226",
"0.5592695",
"0.55855554",
"0.5575766",
"0.556263",
"0.5523025",
"0.55052197",
"0.5482372",
"0.5479264",
"0.5461869",
"0.5460543",
"0.54554963",
"0.5430142",
"0.54177827",
"0.5401374",
"0.53791183",
"0.5376889"
] | 0.78605765 | 0 |
Total number of refspecs in this remote | def refspec_count(self):
return C.git_remote_refspec_count(self._remote) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def n_refs(self):\n return self._n_refs",
"def get_total_rehashes(self):\n return self.count_rehashes",
"def __len__(self):\n total_objs = 0\n\n if self._shelve is not None:\n total_objs += len(self._shelve)\n\n if self._dict is not None:\n total_objs += len(self._dict)\n\n return total_objs",
"def referencecount(self) :\n\t\ttry :\n\t\t\treturn self._referencecount\n\t\texcept Exception as e:\n\t\t\traise e",
"def number_commits_recorded(refenv) -> int:\n return len(list_all_commits(refenv))",
"def unmerged_total(self):\n return int(self.git.rev_list('--count', '{}..{}'.format(self.base_branch, self.topic_branch)))",
"def total(self) -> int:\n return len(self.fixes)",
"def refCount(self, node):\n return self._references.get(node, 0)",
"def total_pulls(self) -> int:\n return self.__total_pulls",
"def num_hashes(self):\n # see comment in constructor for self.hashes\n return self.config.num_hashes",
"def numnems(self):\n count = 0\n for o in self._objs.values():\n count += len(o.netifs())\n return count",
"def n_total_files(self):\n return len(self.fileinfo)",
"def getNoOfPatches(self):\n return _patchExtractor.patchExtractor_getNoOfPatches(self)",
"def count(self):\n return len(self.__links)",
"def count(self):\n return self.size()",
"def size(self):\n\t\treturn self._count",
"def len(self):\n start = self.head\n count = 0\n while start:\n count+=1\n start = start.getLink()\n return count",
"def calculate_number_of_references(div):\n n_publication_ref = len(\n [ref for ref in div.find_all(\"ref\") if ref.attrs.get(\"type\") == \"bibr\"]\n )\n n_figure_ref = len(\n [ref for ref in div.find_all(\"ref\") if ref.attrs.get(\"type\") == \"figure\"]\n )\n return {\"n_publication_ref\": n_publication_ref, \"n_figure_ref\": n_figure_ref}",
"def getNumReferents(self):\n return _libsbml.ReplacedElement_getNumReferents(self)",
"def fileCount(self):\n pass",
"def nreferences(self):\n return self.__nreferences",
"def compute(self):\n\n commit_hashes = {item['hash'] for item in self.items}\n return len(commit_hashes)",
"def get_ref_length(self, ref):\n tbl = self._get_references_node()\n return get_ref_length(tbl, ref)",
"def size_nbytes(self) -> int:\n self.__verify_repo_initialized()\n return folder_size(self._repo_path, recurse=True)",
"def count(self):\n # TODO not implemented yet\n return 0",
"def getNumReferents(self):\n return _libsbml.SBaseRef_getNumReferents(self)",
"def n_cf(self):\n return np.size(self._ref_ii, 0)",
"def circular_reference_count(obj: typing.Any) -> int:\r\n if np is not None:\r\n result = _numpy_circular_ref_count(obj)\r\n if result is not NotImplemented:\r\n return result\r\n return _get_circular_ref_count(obj)",
"def get_size(obj: Any) -> int:\n if isinstance(obj, BLACKLIST):\n return 0\n seen_ids: set[int] = set()\n size = 0\n objects = [obj]\n while objects:\n need_referents = []\n for obj_ in objects:\n if not isinstance(obj_, BLACKLIST) and id(obj_) not in seen_ids:\n seen_ids.add(id(obj_))\n size += sys.getsizeof(obj_)\n need_referents.append(obj_)\n objects = gc.get_referents(*need_referents)\n return size",
"def comptotalrequests(self) :\n\t\ttry :\n\t\t\treturn self._comptotalrequests\n\t\texcept Exception as e:\n\t\t\traise e"
] | [
"0.6764848",
"0.644436",
"0.64315546",
"0.6388287",
"0.63136035",
"0.63098884",
"0.62839144",
"0.62362033",
"0.6197885",
"0.61837137",
"0.6138996",
"0.6132474",
"0.6095574",
"0.6055916",
"0.60469747",
"0.60452753",
"0.6015536",
"0.5992",
"0.59615195",
"0.5940339",
"0.5924425",
"0.59225005",
"0.5911519",
"0.59082603",
"0.58971936",
"0.58879584",
"0.5878091",
"0.5874371",
"0.58679605",
"0.58666444"
] | 0.85212857 | 0 |
Refspecs that will be used for pushing | def push_refspecs(self):
specs = ffi.new('git_strarray *')
err = C.git_remote_get_push_refspecs(specs, self._remote)
check_error(err)
return strarray_to_strings(specs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def list_refs(self):\n pass",
"def push_update_reference(self, refname, message):",
"def refs(self):\n return self._refs",
"def updateScptRefs(self):\n for scpt in self.refs_scpt.keys():\n self.refs_scpt[scpt] = scpt.getRef()\n self.scptRefs = set(self.refs_scpt.values())",
"def RawRefs(self, default=[{}]):\n tmp = self.data.get('raw_refs', default)\n return [HEP.RawReferenceObject(i) for i in tmp]",
"def try_push_special_refs(repo):\n # test pushing to the 'private' dev/arcyd/ area, where arcyd will store\n # it's tracker branches\n repo('push', 'origin', '--dry-run', 'HEAD:refs/heads/dev/arcyd/test')\n\n # test pushing to the refs/arcyd area, where the 'landed' and 'abandoned'\n # archive branches will live\n repo('push', 'origin', '--dry-run', 'HEAD:refs/arcyd/test')",
"def references(self):\n return tuple(self.__references)",
"def make_force_push_mutate_refs_func(targets, sha):\n\n def mutate_refs(refs):\n for target in targets:\n refs[target.encode(\"UTF-8\")] = sha.encode(\"UTF-8\")\n return refs\n\n return mutate_refs",
"def references(self, references):\n\n self._references = references",
"def pushAll(**namespace):",
"def push(targets, **namespace):",
"def sources(obj, reftype):",
"def set_crossrefs(self, refs):\n for attr, obj in refs.items():\n valued_param = self._crossrefs[attr]\n valued_param.set_object(obj)",
"def _refs(self, items):\n # type: (Iterable[Any]) -> Iterable[weakref.ReferenceType]\n return map(self.ref, items)",
"def push(self, obj):\n pass",
"def push(self, *args, **kwargs):\n pass",
"def resolve(self):\n for reference in self._references:\n if reference.target is None:\n definition = self._definitions.get(reference.name)\n if definition is None:\n msg = message_factory.get_message(\n 'vapi.data.structref.structure.not.defined',\n reference.name)\n logger.debug(msg)\n raise CoreException(msg)\n reference.target = definition",
"def push_back(self, *args):\n return _ida_frame.xreflist_t_push_back(self, *args)",
"def latest_ref(self):",
"def get_refs(self, for_push: bool) -> List[Tuple[str, str]]:\n try:\n loc = posixpath.join(self._path, \"refs\")\n res = self._connection.files_list_folder(loc, recursive=True)\n files = res.entries\n while res.has_more:\n res = self._connection.files_list_folder_continue(res.cursor)\n files.extend(res.entries)\n except dropbox.exceptions.ApiError as e:\n if not isinstance(e.error, dropbox.files.ListFolderError):\n raise\n if not for_push:\n # if we're pushing, it's okay if nothing exists beforehand,\n # but it's good to notify the user just in case\n self._trace(\"repository is empty\", Level.INFO)\n else:\n self._first_push = True\n return []\n files = [i for i in files if isinstance(i, dropbox.files.FileMetadata)]\n paths = [i.path_lower for i in files]\n if not paths:\n return []\n revs: List[str] = []\n data: List[bytes] = []\n for rev, datum in self._get_files(paths):\n revs.append(rev)\n data.append(datum)\n refs = []\n for path, rev, datum in zip(paths, revs, data):\n name = self._ref_name_from_path(path)\n sha = datum.decode(\"utf8\").strip()\n self._refs[name] = (rev, sha)\n refs.append((sha, name))\n return refs",
"def _backreference_keys(self, ref, key_in_ref):\n return (self.__class__.REFERENCE_FIELDS +\n self.__class__.DEPENDENT_LINES +\n self.__class__.OTHER_REFERENCES)",
"def ref_updated(self, event):\n pass",
"def targets(obj, reftype):",
"def get_references(self):\n\n return self._refs",
"def push(f, *args, **kwargs):",
"def list_refs(self):\n print('----\\nREFs\\n----')\n self._print_dict(self.refs)",
"def _process_references0(self, references):\n if \"zarr_consolidated_format\" in references:\n # special case for Ike prototype\n references = _unmodel_hdf5(references)\n self.references = references",
"def add_ref_tag(basicSeqs):\r\n\r\n formattedBasicSeqs=list(basicSeqs) \r\n for record in formattedBasicSeqs:\r\n record.id=record.id+'_Ref'\r\n record.name=record.name+'_Ref'\r\n record.description=record.description+'_Ref'\r\n return formattedBasicSeqs",
"def create_remote_refs(git_url, ref_mutator, force=False):\n client, path = dulwich.client.get_transport_and_path(git_url)\n\n if force is False:\n determine_wants = _make_determine_wants_func(ref_mutator)\n else:\n determine_wants = ref_mutator\n # We know we don't need to push any objects.\n\n def generate_pack_contents(have, want):\n return []\n\n return client.send_pack(path, determine_wants, generate_pack_contents)",
"def References(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('references', default)\n return [HEP.ReferenceObject(i) for i in tmp]"
] | [
"0.6074251",
"0.6062355",
"0.5899434",
"0.5841198",
"0.57566303",
"0.5702181",
"0.56438404",
"0.5604831",
"0.55328727",
"0.5516635",
"0.55016184",
"0.54588157",
"0.54560244",
"0.5452985",
"0.5420332",
"0.53919894",
"0.53701526",
"0.53461057",
"0.5345899",
"0.53423005",
"0.53411174",
"0.53403354",
"0.5337063",
"0.5323041",
"0.53100157",
"0.5300619",
"0.5281677",
"0.5258079",
"0.5257074",
"0.52434146"
] | 0.63625705 | 0 |
Call fn and return the credentials object | def get_credentials(fn, url, username, allowed):
url_str = maybe_string(url)
username_str = maybe_string(username)
creds = fn(url_str, username_str, allowed)
credential_type = getattr(creds, 'credential_type', None)
credential_tuple = getattr(creds, 'credential_tuple', None)
if not credential_type or not credential_tuple:
raise TypeError("credential does not implement interface")
cred_type = credential_type
if not (allowed & cred_type):
raise TypeError("invalid credential type")
ccred = ffi.new('git_cred **')
if cred_type == C.GIT_CREDTYPE_USERPASS_PLAINTEXT:
name, passwd = credential_tuple
err = C.git_cred_userpass_plaintext_new(ccred, to_bytes(name),
to_bytes(passwd))
elif cred_type == C.GIT_CREDTYPE_SSH_KEY:
name, pubkey, privkey, passphrase = credential_tuple
if pubkey is None and privkey is None:
err = C.git_cred_ssh_key_from_agent(ccred, to_bytes(name))
else:
err = C.git_cred_ssh_key_new(ccred, to_bytes(name),
to_bytes(pubkey), to_bytes(privkey),
to_bytes(passphrase))
else:
raise TypeError("unsupported credential type")
check_error(err)
return ccred | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_creds():\n\tcredentials = None\n\tif os.path.exists('token.pickle'):\n\t\twith open('token.pickle', 'rb') as token:\n\t\t\tcredentials = pickle.load(token)\n\t# If there are no (valid) credentials available, let the user log in.\n\tif not credentials or not credentials.valid:\n\t\tif credentials and credentials.expired and credentials.refresh_token:\n\t\t\tcredentials.refresh(Request())\n\t\telse:\n\t\t\tflow = InstalledAppFlow.from_client_secrets_file('config/sa.json', SCOPES)\n\t\t\tcredentials = flow.run_local_server(port=0)\n\t\t# Save the credentials for the next run\n\t\twith open('token.pickle', 'wb') as token:\n\t\t\tpickle.dump(credentials, token)\n\treturn credentials",
"def find_credential(account):\n return Credentials.find_by_username(account)",
"def get_creds(cred_fpath=None, api_path=None):\n if cred_fpath is not None:\n print(\"reading keys from credentials file\")\n keys = pd.read_csv(cred_fpath, sep=\"=\")\n myAccessKey = keys.loc['aws_access_key_id ']['[default]'].strip()\n mySecretKey = keys.loc['aws_secret_access_key ']['[default]'].strip()\n myToken = \"\"\n else:\n r = requests.get(api_path)\n creds = r.json()\n myAccessKey = creds[\"AccessKeyId\"]\n mySecretKey = creds[\"SecretAccessKey\"]\n myToken = creds[\"Token\"]\n return myAccessKey, mySecretKey, myToken",
"def get_credentials(self):\r\n \r\n try:\r\n import argparse\r\n #flags = argparse.ArgumentParser(parents=[tools.argparser]).parse_args()\r\n if self.noauth == True:\r\n flags = tools.argparser.parse_args(args=['--noauth_local_webserver'])\r\n else:\r\n flags = tools.argparser.parse_args(args=[])\r\n except ImportError:\r\n flags = None \r\n \r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir,'sheets.googleapis.com-allstarbot.json')\r\n\r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n secret = Path(self.CLIENT_SECRET_FILE)\r\n if secret.exists():\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n else:\r\n print(\"client_secret.json not found, using env vars\")\r\n if not os.environ.get('client_id') or not os.environ.get('client_secret'): \r\n print(\"env vars client_id and client_secret not found. canceling\")\r\n raise Exception(\"client secret error\")\r\n else:\r\n flow = OAuth2WebServerFlow(\r\n os.environ.get('client_id'),\r\n os.environ.get('client_secret'),\r\n self.SCOPES) \r\n \r\n flow.params['access_type'] = 'offline'\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def find_credential(account):\n return Credentials.find_credential(account)",
"def execute(credentials):",
"def authenticate(credentials):",
"def get_appengine_credentials():\n return get_credentials()",
"def get_credentials():\n store = Storage(CREDENTIAL_PATH)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, None)\n return credentials",
"def get_creds(\n config: Config=default):\n config_path = config.credential_path\n scopes = config.scopes\n\n logger.info('loading token')\n logger.debug(f'config_path: {config_path}')\n config_path = Path(config_path).expanduser()\n store = file.Storage(config_path/'token.json')\n creds = store.get()\n\n if not creds or creds.invalid:\n # Ask the user to give the correct permissions.\n logger.info('loading credentials')\n flow = client.flow_from_clientsecrets(\n config_path/'client_id.json',\n scopes)\n\n arguments = sys.argv\n sys.argv = sys.argv[0:1]\n # This line is why we need to remove the arguments from sys.argv\n # If you find a better way to get it to work, i'm buying it\n creds = tools.run_flow(flow, store)\n sys.argv = arguments\n\n return creds",
"def set_credentials():",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'credentialv_modify.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def get_credentials():\n home_dir = os.path.expanduser('~')\n credential_dir = os.path.join(home_dir, '.credentials')\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir,\n 'sally.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n print('Storing credentials to ' + credential_path)\n return credentials",
"def credentials(self) -> Mapping:",
"def authenticate(self, cred): \n auth_helper = AuthHelper.AuthHelper(self.context)\n return auth_helper.auth(cred)",
"def get_credentials(args, my_dirname):\n\n credential_dir = os.path.join(my_dirname, '.credentials')\n if not os.path.exists(credential_dir):\n os.mkdir(credential_dir)\n credential_path = os.path.join(credential_dir, 'sheets.googleapis.com-cotus-checker.json')\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n try:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, args)\n print('Storing credentials to ' + credential_path)\n except (oauth2client.clientsecrets.InvalidClientSecretsError, json.decoder.JSONDecodeError):\n pass\n return credentials",
"def GetCredentials(self):\n return self._session.get(_CREDENTIAL_KEY, credentials.MapdCredentials())",
"def get_credentials(self):\r\n home_dir = os.path.expanduser('~')\r\n credential_dir = os.path.join(home_dir, '.credentials')\r\n if not os.path.exists(credential_dir):\r\n os.makedirs(credential_dir)\r\n credential_path = os.path.join(credential_dir, self.CRED_FILENAME)\r\n \r\n store = Storage(credential_path)\r\n credentials = store.get()\r\n if not credentials or credentials.invalid:\r\n flow = client.flow_from_clientsecrets(self.CLIENT_SECRET_FILE, self.SCOPES)\r\n flow.user_agent = self.APPLICATION_NAME\r\n if flags:\r\n credentials = tools.run_flow(flow, store, flags)\r\n else: # Needed only for compatibility with Python 2.6\r\n credentials = tools.run(flow, store)\r\n print('Storing credentials to ' + credential_path)\r\n return credentials",
"def authenticated(function):\n def wrapped(*args):\n \"\"\"Wrap function.\"\"\"\n try:\n return function(*args)\n except AESOPError:\n _LOGGER.debug(\"attempted to access page before login\")\n _LOGGER.debug(args[0])\n _login(args[0])\n return function(*args)\n return wrapped",
"def get_credentials(self, oid=None):\n path = '/credentials'\n key = 'credentials'\n if oid is not None:\n path = '%s/%s' % (path, oid)\n key = 'credential'\n res = self.client.call(path, 'GET', data='', token=self.token)\n self.logger.debug('Get openstack credentials: %s' % truncate(res))\n try:\n return res[0][key]\n except:\n raise OpenstackError('No credentials found')",
"def authentication_logger(fn):\n def inner(identifier, password):\n result = fn(identifier, password)\n \n if result:\n record, user = result\n log.info(\"AUTHN PASS %s %s\", request.remote_addr, record)\n return result\n \n log.info(\"AUTHN FAIL %s %s\", request.remote_addr, identifier)\n return result",
"def getcreds():\n global user\n global password\n if not user:\n user = input(\"Please enter your username:\\n\")\n if not password:\n password = getpass.getpass(\"Please enter password:\\n\")",
"def as_auth_name(func):\n\n def auth_client(self):\n token = Token.objects.get(user__username=username)\n self.client = APIClient()\n self.client.credentials(HTTP_AUTHORIZATION=\"Token \" + token.key)\n return func(self)\n\n return auth_client",
"def get_creds():\n with open(CREDS_PATH, 'r') as creds_file:\n creds = json.load(creds_file)\n return creds['uname'], creds['pword']",
"def _get_credentials(rse, endpoint):\n\n key = '%s_%s' % (rse, endpoint)\n result = REGION.get(key)\n if type(result) is NoValue:\n try:\n logging.debug(\"Loading account credentials\")\n result = config.get_rse_credentials(None)\n if result and rse in result:\n result = result[rse]\n result['is_secure'] = result['is_secure'][endpoint]\n REGION.set(key, result)\n else:\n raise Exception(\"Failed to load account credentials\")\n logging.debug(\"Loaded account credentials\")\n except KeyError as e:\n raise exception.CannotAuthenticate('RSE %s endpoint %s not in rse account cfg: %s' % (rse, endpoint, e))\n except:\n raise exception.RucioException(\"Failed to load credentials for RSE(%s) endpoint(%s), error: %s\" % (rse, endpoint, traceback.format_exc()))\n return result",
"def get_credentials():\n credentials_path = os.path.join(CREDENTIALS_DIR, CREDENTIALS_FILE)\n store = oauth2client.file.Storage(credentials_path)\n credentials = store.locked_get()\n\n if not credentials or credentials.invalid:\n client_secret_path = os.path.join(CREDENTIAL_DIR, CLIENT_SECRET_FILE)\n flow = client.flow_from_clientsecrets(client_secret_path, \n scope='https://www.googleapis.com/auth/admin.directory.resource.calendar',\n redirect_uri='urn:ietf:wg:oauth:2.0:oob')\n\n if flags:\n credentials = tools.run_flow(flow, store, flags)\n else: # Needed only for compatibility with Python 2.6\n credentials = tools.run(flow, store)\n\n print(\"Storing credentials to: \" + credentials_path)\n\n\n return credentials",
"def get_creds(self):\n return self.creds",
"def get_credentials(self):\n home_dir = os.path.expanduser(\"~\")\n credential_dir = os.path.join(home_dir, \".credentials\")\n if not os.path.exists(credential_dir):\n os.makedirs(credential_dir)\n credential_path = os.path.join(credential_dir, \"autoto.json\")\n\n store = Storage(credential_path)\n credentials = store.get()\n if not credentials or credentials.invalid:\n flow = client.flow_from_clientsecrets(CLIENT_SECRET_FILE, SCOPES)\n flow.user_agent = APPLICATION_NAME\n credentials = tools.run_flow(flow, store, self.auth_flags)\n print(\"Storing credentials to \" + credential_path)\n return credentials",
"def get_api_credentials(scope, service_account=True):\n\tSTORAGE = file.Storage('oAuth2.json') #local storage of oAuth tokens\n\tcredentials = STORAGE.get()\n\tif credentials is None or credentials.invalid: #check if new oAuth flow is needed\n\t\tif service_account: #server 2 server flow\n##\t\t\twith open(SERVICE_ACCOUNT_FILE) as f:\n##\t\t\t\taccount = json.loads(f.read())\n##\t\t\t\temail = account['client_email']\n##\t\t\t\tkey = account['private_key']\n\t\t\tcredentials = ServiceAccountCredentials.from_json_keyfile_name(SERVICE_ACCOUNT_FILE, scope)\n##\t\t\tcredentials = client.SignedJwtAssertionCredentials(email, key, scope=scope)\n\t\t\tSTORAGE.put(credentials)\n\t\telse: #Application Default Credentials (ADC)\n\t\t\tcredentials = GoogleCredentials.get_application_default()\n\t\t\treturn discovery.build('vision', 'v1', credentials=credentials,\n discoveryServiceUrl=DISCOVERY_URL)\t \n##\t\telse: #normal oAuth2 flow\n##\t\t\tCLIENT_SECRETS = os.path.join(os.path.dirname(__file__), 'client_secrets.json')\n##\t\t\tFLOW = client.flow_from_clientsecrets(CLIENT_SECRETS, scope=scope)\n##\t\t\tPARSER = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter, parents=[tools.argparser])\n##\t\t\tFLAGS = PARSER.parse_args(sys.argv[1:])\n##\t\t\tcredentials = tools.run_flow(FLOW, STORAGE, FLAGS)\n\t\t\n\treturn credentials",
"def get_user(fnc):\n @wraps(fnc)\n def ret(*args, **kargs):\n params = args[0]\n user = get_authorized_user(params['psid'])\n if user == None:\n return u'There is no user with that psid', httplib.NOT_FOUND\n if user == False:\n return {'code' : ACCESS_DENIED,\n 'caption' : 'You are not authorized user to do that'}, httplib.PRECONDITION_FAILED\n return fnc(*tuple([params, user] + list(args[1:])), **kargs)\n return ret"
] | [
"0.60266757",
"0.59413564",
"0.5846529",
"0.581955",
"0.58150387",
"0.58117324",
"0.58065265",
"0.5805673",
"0.57518536",
"0.57470065",
"0.57216364",
"0.5717514",
"0.5700105",
"0.56918997",
"0.56840074",
"0.56808466",
"0.5680676",
"0.5675405",
"0.56722444",
"0.5664514",
"0.5654779",
"0.5653715",
"0.56469303",
"0.56452245",
"0.56308633",
"0.5622252",
"0.5618582",
"0.561808",
"0.561397",
"0.5610774"
] | 0.6949153 | 0 |
Create a new remote with the given name and url. Returns a object. If 'fetch' is provided, this fetch refspec will be used instead of the default | def create(self, name, url, fetch=None):
cremote = ffi.new('git_remote **')
if fetch:
err = C.git_remote_create_with_fetchspec(cremote, self._repo._repo, to_bytes(name), to_bytes(url), to_bytes(fetch))
else:
err = C.git_remote_create(cremote, self._repo._repo, to_bytes(name), to_bytes(url))
check_error(err)
return Remote(self._repo, cremote[0]) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_remote(self, name: str, url: str, **kwargs: Any) -> Remote:\n return Remote.create(self, name, url, **kwargs)",
"def fetch(self, remote, *args):\n return self.cmd('fetch', remote, *args)",
"def add_fetch(self, name, refspec):\n\n err = C.git_remote_add_fetch(self._repo._repo, to_bytes(name), to_bytes(refspec))\n check_error(err)",
"def remote(self, name: str, **kwargs: Any) -> Optional[GitRemote]:\n\n try:\n ret = self.cmd.remote.show(\n name=name, no_query_remotes=True, log_in_real_time=True\n )\n lines = ret.split(\"\\n\")\n remote_fetch_url = lines[1].replace(\"Fetch URL: \", \"\").strip()\n remote_push_url = lines[2].replace(\"Push URL: \", \"\").strip()\n if remote_fetch_url != name and remote_push_url != name:\n return GitRemote(\n name=name, fetch_url=remote_fetch_url, push_url=remote_push_url\n )\n else:\n return None\n except exc.LibVCSException:\n return None",
"def set_remote(\n self, name: str, url: str, push: bool = False, overwrite: bool = False\n ) -> GitRemote:\n\n url = self.chomp_protocol(url)\n\n if self.remote(name) and overwrite:\n self.cmd.remote.set_url(name=name, url=url, check_returncode=True)\n else:\n self.cmd.remote.add(name=name, url=url, check_returncode=True)\n\n remote = self.remote(name=name)\n if remote is None:\n raise Exception(\"Remote {name} not found after setting\")\n return remote",
"def new(url):\n from grit import Repo\n return Repo.new(url=url, bare=True)",
"def _make_remote_property(name):\n\n def getter(self):\n from dvc.remote import Remote\n\n remote = self.config.get(name)\n if not remote:\n return None\n\n return Remote(self.repo, name=remote)\n\n getter.__name__ = name\n return cached_property(getter)",
"def fetch(\n name: str,\n tag: Optional[str] = None,\n secret: Optional[str] = None,\n ) -> HubExecutor:\n\n with ImportExtensions(required=True):\n import requests\n\n pull_url = get_hubble_url() + f'/{name}/?'\n path_params = {}\n if secret:\n path_params['secret'] = secret\n if tag:\n path_params['tag'] = tag\n\n request_headers = HubIO._get_request_header()\n\n pull_url += urlencode(path_params)\n resp = requests.get(pull_url, headers=request_headers)\n if resp.status_code != 200:\n if resp.text:\n raise Exception(resp.text)\n resp.raise_for_status()\n\n resp = resp.json()\n\n result = HubExecutor(\n resp['id'],\n resp.get('alias', None),\n resp['tag'],\n resp['visibility'],\n resp['image'],\n resp['package']['download'],\n resp['package']['md5'],\n )\n\n return result",
"def load_from_remote(self, url: Optional[str] = None) -> None:\n raise NotImplementedError",
"def make_site(\n cls,\n name,\n url,\n user_display=REMOTE_SITE_USER_DISPLAY,\n mode=SODAR_CONSTANTS['SITE_MODE_TARGET'],\n description='',\n secret=build_secret(),\n ):\n values = {\n 'name': name,\n 'url': url,\n 'mode': mode,\n 'description': description,\n 'secret': secret,\n 'user_display': user_display,\n }\n site = RemoteSite(**values)\n site.save()\n return site",
"def add(self, name: str, address: str) -> RemoteInfo:\n self.__verify_repo_initialized()\n succ = heads.add_remote(self._env.branchenv, name=name, address=address)\n if succ is False:\n raise ValueError(f'No-Op: Remote named: {name} already exists.')\n return RemoteInfo(name=name, address=address)",
"def create_object(self, name, url):\n r = self.request('post', 'registry/objects/', json.dumps({\n 'description': {\n 'name': name,\n 'url': url\n }\n }))\n return self._extract_id_from_batch_response(r, 'oid')",
"def create(\n name: str = typer.Argument(..., help=\"The name to give to the remote.\"),\n url: Optional[str] = typer.Option(None, '-u', help=\"URL of the remote. If not provided it will be requested.\"),\n secret: Optional[str] = typer.Option(\n None, '--secret', '-s', help=\"The skill secret. If not provided it will be requested.\"\n ),\n public_key_path: Optional[Path] = typer.Option(\n None, '-k', '--key', help=\"The path to the public key. If not provided it will be requested.\"\n ),\n):\n app_dir = Path(typer.get_app_dir('skills-cli', force_posix=True))\n config_file = app_dir / 'config.json'\n\n if not app_dir.exists():\n app_dir.mkdir(parents=True)\n\n if config_file.exists():\n config = json.loads(config_file.read_text(encoding='utf-8'))\n else:\n typer.secho(f'Config file {config_file} not found, creating...')\n config_file.touch()\n config = {}\n\n remotes = config.get('remotes', {})\n existing_config = remotes.get(name, {})\n if existing_config:\n typer.confirm(\n f'A configuration with the name \"{name}\" already exists, would you like to overwrite it?', abort=True\n )\n\n if not secret:\n secret = prompt_for_secret()\n\n if not public_key_path:\n public_key_path = prompt_for_key()\n\n if not url:\n url = typer.prompt('URL to invoke the skill', default='http://localhost:8080/parse')\n\n remotes[name] = {'name': name, 'url': url, 'secret': secret, 'public_key_path': str(public_key_path.absolute())}\n config['remotes'] = remotes\n config_file.write_text(json.dumps(config, indent=2), encoding='utf-8')",
"def fetch(args):\n storage, remote_path = split_storage(args.remote)\n\n local_path = args.local\n if local_path is None:\n _, local_path = os.path.split(remote_path)\n\n local_path_exists = os.path.exists(local_path)\n if local_path_exists and not args.force and not args.update:\n sys.exit(\"Local file %s already exists, not overwriting.\" % local_path)\n\n directory, _ = os.path.split(local_path)\n if directory:\n makedirs(directory, exist_ok=True)\n\n osf = _setup_osf(args)\n project = osf.project(args.project)\n\n store = project.storage(storage)\n for file_ in store.files:\n if norm_remote_path(file_.path) == remote_path:\n if local_path_exists and not args.force and args.update:\n if file_.hashes.get('md5') == checksum(local_path):\n print(\"Local file %s already matches remote.\" % local_path)\n break\n with open(local_path, 'wb') as fp:\n file_.write_to(fp)\n\n # only fetching one file so we are done\n break",
"def remote(self, *args, **kwargs):\n return self.api.remote(*args, **kwargs)",
"def set_url(self, name, url):\n err = C.git_remote_set_url(self._repo._repo, to_bytes(name), to_bytes(url))\n check_error(err)",
"def fetch_pull(ref):\n origin.fetch(tags=True)\n repo.git.checkout(\"{}\".format(ref))\n repo.git.pull(\"origin\", \"{}\".format(ref))",
"def fetch(self, fetch: Fetch):\n self._fetch = fetch",
"def get_remote(self, name):\n repo = Repo('.')\n if not hasattr(repo, 'remotes'):\n raise NotFound()\n for remote in repo.remotes:\n if remote.name == name:\n return remote\n raise NotFound()",
"def create_remote_instance(self, payload):\n instance = RemoteInstance()\n instance.init_from_payload(payload)\n return instance",
"def fetch_remote_ref(\n self, remote: str, local_ref: str, remote_ref: str, since: Optional[DateString] = None\n ) -> None:\n\n local_sha = None\n remote_sha = None\n kwargs = {\"verbose\": True, \"progress\": GitProgressPrinter()}\n remote = self.obj.remote(remote)\n fetch = GitRetry(remote.fetch)\n\n # Check if we already have a local reference\n if hasattr(self.obj.references, local_ref):\n local_ref_obj = self.obj.references[local_ref]\n local_sha = (\n local_ref_obj.object.hexsha\n if hasattr(local_ref_obj, \"object\")\n else local_ref_obj.commit.hexsha\n )\n\n # If we have the ref locally, we still want to update, but give negotiation hint\n kwargs[\"negotiation_tip\"] = local_ref\n\n # Get remote ref so we can check against the local ref\n if output := self.obj.git.ls_remote(remote, remote_ref):\n remote_sha = output.split()[0]\n\n # No fetch window specified\n # Or using Azure DevOps since it doesn't support shallow-since or unshallow\n if not since or any(\n urlparse(url).hostname == \"msazure.visualstudio.com\" for url in remote.urls\n ):\n LOGGER.info(\"Fetching ref %s from remote %s\", remote_ref, remote)\n fetch(remote_ref, **kwargs)\n\n # Create tag at FETCH_HEAD to preserve reference locally\n if local_sha is None or local_sha != remote_sha:\n self.obj.create_tag(local_ref, \"FETCH_HEAD\", force=True)\n\n return\n\n # If we have the ref locally, see if the ref is the same to avoid resetting depth\n if local_sha and remote_sha == local_sha:\n commit_date = self.obj.references[local_ref].commit.committed_date\n\n # Otherwise, initially fetch revision at depth 1. This will reset local depth\n else:\n LOGGER.info(\"Fetching remote ref %s from remote %s at depth 1\", remote_ref, remote)\n fetch_info = fetch(remote_ref, depth=1, **kwargs)[-1]\n commit_date = fetch_info.commit.committed_date\n\n # If last commit for revision is in the fetch window, expand depth\n # This check is necessary because some servers will throw an error when there are\n # no commits in the fetch window\n if commit_date >= since.epoch:\n LOGGER.info(\n 'Fetching ref %s from remote %s shallow since \"%s\"',\n remote_ref,\n remote,\n since,\n )\n try:\n fetch(remote_ref, shallow_since=since, **kwargs)\n except git.GitCommandError as e:\n # ADO repos do not currently support --shallow-since, only depth\n if \"Server does not support --shallow-since\" in e.stderr:\n LOGGER.warning(\n \"Server does not support --shallow-since, retrying fetch without option.\"\n )\n fetch(remote_ref, **kwargs)\n else:\n raise\n else:\n LOGGER.info(\n 'Newest commit for ref %s from remote %s is older than fetch window \"%s\"',\n remote_ref,\n remote,\n since,\n )\n\n # Create tag at FETCH_HEAD to preserve reference locally\n if local_sha is None or local_sha != remote_sha:\n self.obj.create_tag(local_ref, \"FETCH_HEAD\", force=True)",
"def fetch(args):\n do_all_projects_remotes(args + [\"fetch\"])",
"def config(args):\n uname = getpass.getuser()\n name = raw_input('Enter remote name (example: xfer): ') or 'xfer'\n if name in args.remotes:\n sys.exit('\\n{} is already listed as a remote.\\nPlease choose a different name or remove the remote using `git remote remove`\\n'.format(name))\n if args.type == 'ssh':\n server = raw_input('Enter remote url (example: {}@localhost): '.format(uname)) or uname + '@localhost'\n repo = os.path.join(args.home, os.path.basename(args.base))\n dest = raw_input('Enter remote destination for repo (default: {}): '.format(repo)) or repo\n dest = dest.replace('.git', '')\n port = raw_input('Enter port for server (default: 22): ') or 22\n remote = 'ssh://{}:{}{}.git'.format(server, port, dest)\n elif args.type == 's3':\n server = raw_input('Enter remote bucket name (example: mybucket): '.format(uname)) or uname\n remote = 's3://{}'.format(server)\n elif args.type == 'gs':\n server = raw_input('Enter remote bucket name (example: mybucket): '.format(uname)) or uname\n remote = 'gs://{}'.format(server)\n else:\n sys.exit('No rule for processing server type: {}'.format(args.type))\n run('git remote add {} {}'.format(name, remote))\n return",
"def remote(self, obj, name: str, tag: str, parties: Union[Party, list]) -> Rubbish:\n pass",
"def __getattr__(self, name):\n self._child = _RemoteCommand(\n \"%s.%s\" % (self._name, name), self._parent, self._url\n )\n return self._child",
"def test_remote(self):\n\n self.assertEqual(description.RepositoryDescription(\n '[email protected]:/example/remote', '/path/to/local').remote,\n implementation.RemoteRepository(\n '[email protected]:/example/remote'))",
"def clone_from(\n cls,\n url: PathLike,\n to_path: PathLike,\n progress: CallableProgress = None,\n env: Optional[Mapping[str, str]] = None,\n multi_options: Optional[List[str]] = None,\n allow_unsafe_protocols: bool = False,\n allow_unsafe_options: bool = False,\n **kwargs: Any,\n ) -> \"Repo\":\n git = cls.GitCommandWrapperType(os.getcwd())\n if env is not None:\n git.update_environment(**env)\n return cls._clone(\n git,\n url,\n to_path,\n GitCmdObjectDB,\n progress,\n multi_options,\n allow_unsafe_protocols=allow_unsafe_protocols,\n allow_unsafe_options=allow_unsafe_options,\n **kwargs,\n )",
"def add_fetch_type(self, name, handler):\n if not interpret_string(name):\n raise RelengInvalidSetupException('invalid fetch name provided')\n name_key = name.lower()\n if not name_key.startswith(PREFIX_REQUIREMENT):\n raise RelengInvalidSetupException('extension-defined fetch types '\n 'must be prefixed with \"{}\"'.format(PREFIX_REQUIREMENT))\n if name_key in self.fetch_types:\n raise RelengInvalidSetupException('extension fetch type {} is '\n 'already defined by another extension'.format(name))\n if not inspect.isclass(handler):\n raise RelengInvalidSetupException('handler is not a class')\n fetch_type = handler()\n fetch_op = getattr(fetch_type, 'fetch', None)\n if not callable(fetch_op):\n raise RelengInvalidSetupException('fetch type does not defined '\n 'required method(s)')\n self.fetch_types[name_key] = fetch_type",
"def remote_createNode(self, pkg, exe, args, name, namespace):\r\n return Node(self, pkg, exe, args, name, namespace)",
"def _init_from_remote(self):\n self.arch = self.remote.arch\n self.os_type = self.remote.os.name\n self.os_version = self.remote.os.version\n self.codename = self.remote.os.codename\n self.pkg_type = self.remote.system_type\n self.distro = self._get_distro(\n distro=self.remote.os.name,\n version=self.remote.os.version,\n codename=self.remote.os.codename,\n )\n # when we're initializing with a remote we most likely have\n # a task config, not the entire teuthology job config\n self.flavor = self.job_config.get(\"flavor\", \"basic\")\n self.tag = self.job_config.get(\"tag\")"
] | [
"0.75323445",
"0.65989864",
"0.6469159",
"0.63439584",
"0.6275085",
"0.5893828",
"0.57484573",
"0.5714986",
"0.5672253",
"0.55887216",
"0.5522805",
"0.54660064",
"0.5437272",
"0.5430468",
"0.5422879",
"0.5369303",
"0.5340879",
"0.5239376",
"0.5218165",
"0.52128315",
"0.52034414",
"0.5190949",
"0.5151309",
"0.51117474",
"0.511089",
"0.5086323",
"0.5085291",
"0.50788844",
"0.50788206",
"0.5066626"
] | 0.8370093 | 0 |
Rename a remote in the configuration. The refspecs in standard format will be renamed. Returns a list of fetch refspecs (list of strings) which were not in the standard format and thus could not be remapped. | def rename(self, name, new_name):
if not new_name:
raise ValueError("Current remote name must be a non-empty string")
if not new_name:
raise ValueError("New remote name must be a non-empty string")
problems = ffi.new('git_strarray *')
err = C.git_remote_rename(problems, self._repo._repo, to_bytes(name), to_bytes(new_name))
check_error(err)
ret = strarray_to_strings(problems)
C.git_strarray_free(problems)
return ret | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __gitRenameRemote(self):\n remotes = self.vcs.gitGetRemotesList(self.project.getProjectPath())\n remote, ok = QInputDialog.getItem(\n None,\n self.tr(\"Rename\"),\n self.tr(\"Select a remote repository:\"),\n remotes,\n 0, False)\n if ok:\n self.vcs.gitRenameRemote(self.project.getProjectPath(), remote)",
"def __gitChangeRemoteUrl(self):\n remotes = self.vcs.gitGetRemotesList(self.project.getProjectPath())\n remote, ok = QInputDialog.getItem(\n None,\n self.tr(\"Rename\"),\n self.tr(\"Select a remote repository:\"),\n remotes,\n 0, False)\n if ok:\n self.vcs.gitChangeRemoteUrl(self.project.getProjectPath(), remote)",
"def remotes_update(flox: Flox):\n\n for source in flox.remotes.all().keys():\n fetch_remote(flox, source)\n success(f\"Updated: {source}\")\n\n success_box(f\"Remote sources updated\")",
"def configure_remotes(self, upstream_remote_name, upstream_remote_url, fork_remote_name, fork_remote_url):\n\n if not self.existing_git_repository(self.repo_path):\n return\n try:\n remotes = self.repo.remotes\n except GitError:\n return\n except (KeyboardInterrupt, SystemExit):\n self._exit()\n else:\n for remote in remotes:\n if upstream_remote_url == self._remote_get_url(remote.name):\n if remote.name != upstream_remote_name:\n self._rename_remote(remote.name, upstream_remote_name)\n continue\n if fork_remote_url == self._remote_get_url(remote.name):\n if remote.name != fork_remote_name:\n self._rename_remote(remote.name, fork_remote_name)\n remote_names = [r.name for r in self.repo.remotes]\n if upstream_remote_name in remote_names:\n self._compare_remote_url(upstream_remote_name, upstream_remote_url)\n if fork_remote_name in remote_names:\n self._compare_remote_url(fork_remote_name, fork_remote_url)",
"def renameSIdRefs(self, *args):\n return _libsbml.FbcReactionPlugin_renameSIdRefs(self, *args)",
"def host_renameOpsiDepotserver(self, oldId, newId):",
"def renameSIdRefs(self, *args):\n return _libsbml.Port_renameSIdRefs(self, *args)",
"def renameSIdRefs(self, *args):\n return _libsbml.Species_renameSIdRefs(self, *args)",
"def renameSIdRefs(self, *args):\n return _libsbml.SimpleSpeciesReference_renameSIdRefs(self, *args)",
"def renameSIdRefs(self, *args):\n return _libsbml.PossibleSpeciesFeatureValue_renameSIdRefs(self, *args)",
"def ensure_remotes(args):\n man = load_manifest()\n for (proj_name, project) in man.projects.iteritems():\n repo = GitRepo(workdir_for_project(project))\n for remote_name in project.remotes:\n remote = man.remotes[remote_name]\n new_url = remote.fetch % proj_name\n\n p = repo.command_process([\"config\", \"--get\", \"remote.%s.url\" % remote_name],\n capture_stdout=True)\n if p.Wait() == 0:\n cur_url = p.stdout.strip()\n if cur_url != new_url:\n repo.check_command([\"config\", \"--set\", \"remote.%s.url\" % remote_name, new_url])\n else:\n repo.check_command([\"remote\", \"add\", remote_name, new_url])",
"def renameSIdRefs(self, *args):\n return _libsbml.Reaction_renameSIdRefs(self, *args)",
"def renameSIdRefs(self, *args):\n return _libsbml.MultiSpeciesType_renameSIdRefs(self, *args)",
"def renameSIdRefs(self, *args):\n return _libsbml.InSpeciesTypeBond_renameSIdRefs(self, *args)",
"def getRemotes(directory):\n gitRemoteOutput = subprocess.check_output(['git','remote','-v'],cwd=directory)\n remotes = []\n for line in gitRemoteOutput.splitlines():\n if '(fetch)' in line:\n splitLine = line.split();\n remotes.append({'name': splitLine[0].strip(), 'url': splitLine[1].strip()})\n return remotes",
"def git_remote(**kw):\n return sh('git', 'remote', **kw).strip().split('\\n')",
"def renameSIdRefs(self, *args):\n return _libsbml.SBaseRef_renameSIdRefs(self, *args)",
"def renameSIdRefs(self, *args):\n return _libsbml.SBasePlugin_renameSIdRefs(self, *args)",
"def renameSIdRefs(self, *args):\n return _libsbml.SpeciesFeature_renameSIdRefs(self, *args)",
"def __gitRemoteCredentials(self):\n remotes = self.vcs.gitGetRemotesList(self.project.getProjectPath())\n remote, ok = QInputDialog.getItem(\n None,\n self.tr(\"Rename\"),\n self.tr(\"Select a remote repository:\"),\n remotes,\n 0, False)\n if ok:\n self.vcs.gitChangeRemoteCredentials(self.project.getProjectPath(),\n remote)",
"def renameSIdRefs(self, *args):\n return _libsbml.ListOfObjectives_renameSIdRefs(self, *args)",
"def renameSIdRefs(self, *args):\n return _libsbml.Output_renameSIdRefs(self, *args)",
"def fetch_refspecs(self):\n\n specs = ffi.new('git_strarray *')\n err = C.git_remote_get_fetch_refspecs(specs, self._remote)\n check_error(err)\n\n return strarray_to_strings(specs)",
"def list_remote_refs(git_url):\n client, path = dulwich.client.get_transport_and_path(git_url)\n try:\n refs = client.fetch_pack(path, lambda refs: [], None, lambda data: None)\n return {k.decode(\"UTF-8\"): v.decode(\"UTF-8\") for k, v in refs.items()}\n except dulwich.errors.HangupException as e:\n raise LSRemoteException(f\"Unable to fetch remote refs from {git_url}: {e}\")",
"def renameSIdRefs(self, *args):\n return _libsbml.QualitativeSpecies_renameSIdRefs(self, *args)",
"def renameSIdRefs(self, *args):\n return _libsbml.Input_renameSIdRefs(self, *args)",
"def ls_remote(ctx: \"PlanemoCliContext\", remote_repo: str) -> Dict[str, str]:\n commits_and_refs = io.communicate(\n [\"git\", \"ls-remote\", remote_repo],\n stdout=subprocess.PIPE,\n )[0]\n return dict(line.split()[::-1] for line in commits_and_refs.decode(\"utf-8\").splitlines())",
"def renameSIdRefs(self, *args):\n return _libsbml.SpeciesFeatureValue_renameSIdRefs(self, *args)",
"def renameSIdRefs(self, *args):\n return _libsbml.SpeciesTypeInstance_renameSIdRefs(self, *args)",
"def renameSIdRefs(self, *args):\n return _libsbml.SBase_renameSIdRefs(self, *args)"
] | [
"0.63354367",
"0.59142274",
"0.5644527",
"0.5351181",
"0.52948046",
"0.5238612",
"0.5197955",
"0.5156204",
"0.51546645",
"0.51463294",
"0.5122313",
"0.50963366",
"0.509136",
"0.50567704",
"0.5055112",
"0.5046217",
"0.5033727",
"0.5026965",
"0.50152934",
"0.50002825",
"0.49920243",
"0.49891645",
"0.49877012",
"0.4980796",
"0.49750084",
"0.49709904",
"0.4946218",
"0.4916857",
"0.49138492",
"0.49093565"
] | 0.6283107 | 1 |
Set the pushURL for a remote | def set_push_url(self, name, url):
err = C.git_remote_set_pushurl(self._repo._repo, to_bytes(name), to_bytes(url))
check_error(err) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def git_remote_url(self, git_remote_url):\n self._git_remote_url = git_remote_url",
"def push_url(self):\n\n return maybe_string(C.git_remote_pushurl(self._remote))",
"def setRemoteUrl(self, value, **kwargs):\n if value:\n value = urlparse.urlunparse(urlparse.urlparse(value))\n self.getField('remoteUrl').set(self, value, **kwargs)",
"def set_remote(\n self, name: str, url: str, push: bool = False, overwrite: bool = False\n ) -> GitRemote:\n\n url = self.chomp_protocol(url)\n\n if self.remote(name) and overwrite:\n self.cmd.remote.set_url(name=name, url=url, check_returncode=True)\n else:\n self.cmd.remote.add(name=name, url=url, check_returncode=True)\n\n remote = self.remote(name=name)\n if remote is None:\n raise Exception(\"Remote {name} not found after setting\")\n return remote",
"def set_url(self, name, url):\n err = C.git_remote_set_url(self._repo._repo, to_bytes(name), to_bytes(url))\n check_error(err)",
"def remote_push(self, pNamespace):",
"def push(self, remote, branch, *args):\n return self.cmd('push', remote, branch, *args)",
"def remote_set(location, repo, remote='origin'):\n ensure_dir(location)\n with utils.cd(location):\n if remote_exists(location, remote):\n cmd = '/usr/bin/git remote rm {}'.format(remote)\n subprocess.check_call(cmd, shell=True)\n\n cmd = '/usr/bin/git remote add {} {}'.format(remote, repo)\n subprocess.check_call(cmd, shell=True)",
"def set_git_url(context, url):\n context.url = url",
"def push():\n branch = git.current_branch().name\n shell.run('git push -u origin {}'.format(branch))",
"def __gitChangeRemoteUrl(self):\n remotes = self.vcs.gitGetRemotesList(self.project.getProjectPath())\n remote, ok = QInputDialog.getItem(\n None,\n self.tr(\"Rename\"),\n self.tr(\"Select a remote repository:\"),\n remotes,\n 0, False)\n if ok:\n self.vcs.gitChangeRemoteUrl(self.project.getProjectPath(), remote)",
"def set_url(self, url):\n self.data['url'] = url",
"def set_remote(self, bRemote):\n\t\tcall_sdk_function('PrlVmDev_SetRemote', self.handle, bRemote)",
"def setorigin(self):\n try:\n origin = self.repo.remotes.origin\n if origin.url != self.origin_url:\n log.debug('[%s] Changing origin url. Old: %s New: %s',\n self.name, origin.url, self.origin_url)\n origin.config_writer.set('url', self.origin_url)\n except AttributeError:\n origin = self.repo.create_remote('origin', self.origin_url)\n log.debug('[%s] Created remote \"origin\" with URL: %s',\n self.name, origin.url)",
"def cmd_push_review(remote):\n return ['git', 'push', remote]",
"def push(args):\n if args.type == 'ssh':\n cache = set(args.remote_cache).union(set(args.cache))\n for path in sorted(cache):\n if os.path.exists(os.path.join(args.base, path)) and not remote_exists(args.sftp, os.path.join(args.remote_base, path)):\n print('push: {}'.format(path))\n ensure_remote(args.sftp, os.path.dirname(os.path.join(args.remote_base, path)))\n args.sftp.put(\n os.path.join(args.base, path),\n os.path.join(args.remote_base, path)\n )\n args.remote_cache.append(path)\n args.remote_update = True\n elif args.type == 's3':\n raise NotImplementedError('s3:// remote type not yet supported!')\n elif args.type == 'gs':\n raise NotImplementedError('gs:// remote type not yet supported!')\n return",
"def push(self):\n origin = self.git_repo.remotes.origin\n origin.push()",
"def git_remote_url(self):\n return self._git_remote_url",
"def push_to_remotes(self, repo: git.Repo, tag: str) -> None:\n if self._upstream_remotes:\n self._logger.info('Start pushing to remotes: %s.',\n self._upstream_remotes)\n else:\n self._logger.info('No push remote was specified')\n return\n for remote_name in self._upstream_remotes:\n remote = self.get_remote(repo, remote_name)\n if remote:\n self._logger.info('Push %s to %s', tag, remote)\n remote.push(str(tag))\n else:\n self._logger.error(\n 'Can\\'t find remote with name `%s`', remote_name)",
"def push(\n self,\n remote: str = \"origin\",\n remote_branch: Optional[str] = None,\n message: Optional[str] = None,\n author: Optional[str] = None,\n ) -> dict:\n self._check_connection()\n if remote_branch is None:\n remote_branch = self._branch\n if author is None:\n author = self._author\n if message is None:\n message = (\n f\"Pushing to {remote}/{remote_branch} by Python client {__version__}\"\n )\n rc_args = {\n \"remote\": remote,\n \"remote_branch\": remote_branch,\n \"author\": author,\n \"message\": message,\n }\n return self._dispatch_json(\"post\", self._push_url(), rc_args)",
"def push(self):\n out, err, code = self.command( [\"git\", \"push\"], self.directory )",
"def _set_url(self): \n self.url = self.geturl()",
"def url(self):\n\n return maybe_string(C.git_remote_url(self._remote))",
"def _push_to_server(self) -> None:\n pass",
"def push(self):\n self.runtime.logger.info('Pushing config...')\n with Dir(self.runtime.metadata_dir):\n exectools.cmd_assert([\"git\", \"push\"])",
"def set_url(self, url):\n self.url = url",
"def notify_url(self, notify_url):\n\n self._notify_url = notify_url",
"def set_url(self, url):\n self.url = url",
"def push():\n local('hg push jvacx')",
"def push(ctx):\n dufl_root = ctx.obj['dufl_root']\n git = Git(ctx.obj.get('git', '/usr/bin/git'), dufl_root)\n git.run('push', 'origin', git.working_branch())"
] | [
"0.72324055",
"0.71841437",
"0.69710207",
"0.66419417",
"0.6640427",
"0.6405677",
"0.61682636",
"0.6008587",
"0.5950349",
"0.5879518",
"0.5878566",
"0.5850352",
"0.5836166",
"0.5828629",
"0.58109",
"0.5789106",
"0.57801384",
"0.5705603",
"0.5705531",
"0.5667261",
"0.56259835",
"0.5609602",
"0.5595807",
"0.55849695",
"0.5518384",
"0.54923576",
"0.5486238",
"0.5445147",
"0.5427235",
"0.5421998"
] | 0.77281153 | 0 |
Add a fetch refspec (str) to the remote | def add_fetch(self, name, refspec):
err = C.git_remote_add_fetch(self._repo._repo, to_bytes(name), to_bytes(refspec))
check_error(err) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_push(self, name, refspec):\n\n err = C.git_remote_add_push(self._repo._repo, to_bytes(name), to_bytes(refspec))\n check_error(err)",
"def fetch_pull(ref):\n origin.fetch(tags=True)\n repo.git.checkout(\"{}\".format(ref))\n repo.git.pull(\"origin\", \"{}\".format(ref))",
"def __gitAddRemote(self):\n self.vcs.gitAddRemote(self.project.getProjectPath())",
"def fetch_remote_ref(\n self, remote: str, local_ref: str, remote_ref: str, since: Optional[DateString] = None\n ) -> None:\n\n local_sha = None\n remote_sha = None\n kwargs = {\"verbose\": True, \"progress\": GitProgressPrinter()}\n remote = self.obj.remote(remote)\n fetch = GitRetry(remote.fetch)\n\n # Check if we already have a local reference\n if hasattr(self.obj.references, local_ref):\n local_ref_obj = self.obj.references[local_ref]\n local_sha = (\n local_ref_obj.object.hexsha\n if hasattr(local_ref_obj, \"object\")\n else local_ref_obj.commit.hexsha\n )\n\n # If we have the ref locally, we still want to update, but give negotiation hint\n kwargs[\"negotiation_tip\"] = local_ref\n\n # Get remote ref so we can check against the local ref\n if output := self.obj.git.ls_remote(remote, remote_ref):\n remote_sha = output.split()[0]\n\n # No fetch window specified\n # Or using Azure DevOps since it doesn't support shallow-since or unshallow\n if not since or any(\n urlparse(url).hostname == \"msazure.visualstudio.com\" for url in remote.urls\n ):\n LOGGER.info(\"Fetching ref %s from remote %s\", remote_ref, remote)\n fetch(remote_ref, **kwargs)\n\n # Create tag at FETCH_HEAD to preserve reference locally\n if local_sha is None or local_sha != remote_sha:\n self.obj.create_tag(local_ref, \"FETCH_HEAD\", force=True)\n\n return\n\n # If we have the ref locally, see if the ref is the same to avoid resetting depth\n if local_sha and remote_sha == local_sha:\n commit_date = self.obj.references[local_ref].commit.committed_date\n\n # Otherwise, initially fetch revision at depth 1. This will reset local depth\n else:\n LOGGER.info(\"Fetching remote ref %s from remote %s at depth 1\", remote_ref, remote)\n fetch_info = fetch(remote_ref, depth=1, **kwargs)[-1]\n commit_date = fetch_info.commit.committed_date\n\n # If last commit for revision is in the fetch window, expand depth\n # This check is necessary because some servers will throw an error when there are\n # no commits in the fetch window\n if commit_date >= since.epoch:\n LOGGER.info(\n 'Fetching ref %s from remote %s shallow since \"%s\"',\n remote_ref,\n remote,\n since,\n )\n try:\n fetch(remote_ref, shallow_since=since, **kwargs)\n except git.GitCommandError as e:\n # ADO repos do not currently support --shallow-since, only depth\n if \"Server does not support --shallow-since\" in e.stderr:\n LOGGER.warning(\n \"Server does not support --shallow-since, retrying fetch without option.\"\n )\n fetch(remote_ref, **kwargs)\n else:\n raise\n else:\n LOGGER.info(\n 'Newest commit for ref %s from remote %s is older than fetch window \"%s\"',\n remote_ref,\n remote,\n since,\n )\n\n # Create tag at FETCH_HEAD to preserve reference locally\n if local_sha is None or local_sha != remote_sha:\n self.obj.create_tag(local_ref, \"FETCH_HEAD\", force=True)",
"def fetchref(self, ref):\n log.debug('[%s] Fetching ref: %s', self.name, ref)\n fetch_info = self.repo.remotes.origin.fetch(ref).pop()\n return fetch_info.ref",
"def create(self, name, url, fetch=None):\n\n cremote = ffi.new('git_remote **')\n\n if fetch:\n err = C.git_remote_create_with_fetchspec(cremote, self._repo._repo, to_bytes(name), to_bytes(url), to_bytes(fetch))\n else:\n err = C.git_remote_create(cremote, self._repo._repo, to_bytes(name), to_bytes(url))\n\n check_error(err)\n\n return Remote(self._repo, cremote[0])",
"def add(name, url):\n click.echo(\"registered repo {} at url {}\".format(name, url))",
"def cmd_push_review(remote):\n return ['git', 'push', remote]",
"def fetch(path):\n LOGGER.info('Post push request received, Updating %s', path)\n call(['cd \"' + path + '\" && git fetch'], shell=True)",
"def fetch(self, refspecs=None, message=None, callbacks=None):\n\n fetch_opts = ffi.new('git_fetch_options *')\n err = C.git_fetch_init_options(fetch_opts, C.GIT_FETCH_OPTIONS_VERSION)\n\n if callbacks is None:\n callbacks = RemoteCallbacks()\n\n callbacks._fill_fetch_options(fetch_opts)\n\n try:\n with StrArray(refspecs) as arr:\n err = C.git_remote_fetch(self._remote, arr, fetch_opts, to_bytes(message))\n if callbacks._stored_exception:\n raise callbacks._stored_exception\n check_error(err)\n finally:\n callbacks._self_handle = None\n\n return TransferProgress(C.git_remote_stats(self._remote))",
"def remote_set(location, repo, remote='origin'):\n ensure_dir(location)\n with utils.cd(location):\n if remote_exists(location, remote):\n cmd = '/usr/bin/git remote rm {}'.format(remote)\n subprocess.check_call(cmd, shell=True)\n\n cmd = '/usr/bin/git remote add {} {}'.format(remote, repo)\n subprocess.check_call(cmd, shell=True)",
"def add(self, name: str, address: str) -> RemoteInfo:\n self.__verify_repo_initialized()\n succ = heads.add_remote(self._env.branchenv, name=name, address=address)\n if succ is False:\n raise ValueError(f'No-Op: Remote named: {name} already exists.')\n return RemoteInfo(name=name, address=address)",
"def _git_add(repo, path, contents='example!\\n'):\n path.write_text(contents)\n subprocess.run(['git', '-C', repo, 'add', path], check=True)",
"def fpull(var, wrapper, message):\n _git_pull(wrapper)",
"def add_prod_repo_as_origin_and_push(git_repo_name):\n local(\"\"\"echo '[remote \"origin\"]' >> .git/config\"\"\")\n local(r\"echo ' fetch = +refs/heads/*:refs/remotes/origin/*' >> .git/config\")\n local(r\"echo ' url = %s:webapps/git/repos/%s.git' >> .git/config\" % (env.hosts[0], git_repo_name))\n local(r\"git push origin master\")",
"def fetch(self, remote, *args):\n return self.cmd('fetch', remote, *args)",
"def gitAdd(filename, repo_dir):\n file_path = \"%s/%s\" % (repo_dir, filename)\n git(\"add\", file_path)",
"def push_refspecs(self):\n\n specs = ffi.new('git_strarray *')\n err = C.git_remote_get_push_refspecs(specs, self._remote)\n check_error(err)\n\n return strarray_to_strings(specs)",
"def repo_add(self, name, url, **kwargs):\n\n self.helm_client.repo_add(name, url, **kwargs)",
"def remotes_add(flox: Flox, remote):\n # if flox.remotes.has(remote):\n # raise ConfigurationException(f\"Remote configuration '{remote}' already exists\")\n\n warning_box(\"Remote configuration sources are potentially dangerous, you should only add configuration \"\n \"from trusted sources\")\n if not click.confirm(click.style(f\"Would you still like to add {remote} as configuration source?\", fg=\"yellow\")):\n raise Abort\n\n config_type = \"local\"\n if remote.lower().startswith((\"http://\", \"https://\")):\n config_type = \"remote\"\n elif remote.lower().startswith(\"git\") or remote.endswith(\".git\"):\n config_type = \"git\"\n\n flox.remotes.set(remote, dict(\n type=config_type,\n hash=hashlib.sha256(remote.encode(\"UTF-8\")).hexdigest()\n ))\n\n fetch_remote(flox, remote)\n\n success_box(f\"Remote source '{remote}' has been added as a configuration source\")",
"def add(ref):\n cmd = 'qri add %s' % ref\n print('Fetching from registry...')\n result, err = shell_exec(cmd)\n return 'Added %s: %s' % (ref, result)",
"def test_pull_explicit_remote(self, repo):\n dest = os.path.join(self._tmpdir, 'cloned_repo')\n clone(['arg0', repo.path, dest])\n cloned = ComponentTestGitRepository(dest)\n self._check_repo_state(cloned, 'master', ['master'])\n eq_(pull(['argv0', 'origin']), 0)\n assert len(repo.get_commits()) == 1",
"def fetch_refspecs(self):\n\n specs = ffi.new('git_strarray *')\n err = C.git_remote_get_fetch_refspecs(specs, self._remote)\n check_error(err)\n\n return strarray_to_strings(specs)",
"def ensure_pr_fetch():\r\n modified = False\r\n remotes = git.remote().splitlines()\r\n if not \"edx\" in remotes:\r\n git.remote(\"add\", \"edx\", \"https://github.com/edx/edx-platform.git\")\r\n modified = True\r\n # it would be nice to use the git-python API to do this, but it doesn't seem\r\n # to support configurations with more than one value per key. :(\r\n edx_fetches = git.config(\"remote.edx.fetch\", get_all=True).splitlines()\r\n pr_fetch = '+refs/pull/*/head:refs/remotes/edx/pr/*'\r\n if pr_fetch not in edx_fetches:\r\n git.config(\"remote.edx.fetch\", pr_fetch, add=True)\r\n git.fetch(\"edx\")\r\n modified = True\r\n return modified",
"def create_remote_refs(git_url, ref_mutator, force=False):\n client, path = dulwich.client.get_transport_and_path(git_url)\n\n if force is False:\n determine_wants = _make_determine_wants_func(ref_mutator)\n else:\n determine_wants = ref_mutator\n # We know we don't need to push any objects.\n\n def generate_pack_contents(have, want):\n return []\n\n return client.send_pack(path, determine_wants, generate_pack_contents)",
"def set_url(self, name, url):\n err = C.git_remote_set_url(self._repo._repo, to_bytes(name), to_bytes(url))\n check_error(err)",
"def git_remote_url(self, git_remote_url):\n self._git_remote_url = git_remote_url",
"def update():\n call('git -C ~/norminette+ pull', shell=True)",
"def fetch(location, repo):\n if is_dir(location):\n remote_set(location, repo)\n with utils.cd(location):\n cmd = '/usr/bin/git fetch'\n subprocess.check_call(cmd, shell=True)\n else:\n cmd = '/usr/bin/git clone {0} {1}'.format(repo, location)\n subprocess.check_call(cmd, shell=True)",
"def fetch(self, remote: str, branch: str) -> str:\n self.__verify_repo_initialized()\n address = heads.get_remote_address(self._env.branchenv, name=remote)\n self._client = HangarClient(envs=self._env, address=address)\n CW = ContentWriter(self._env)\n\n with closing(self._client) as client:\n client: HangarClient\n\n # ----------------- setup / validate operations -------------------\n\n try:\n cHEAD = heads.get_branch_head_commit(self._env.branchenv, branch)\n except ValueError:\n # branch does not exist on local client\n try:\n s_branch = client.fetch_branch_record(branch)\n sHEAD = s_branch.rec.commit\n except grpc.RpcError as rpc_error:\n if rpc_error.code() == grpc.StatusCode.NOT_FOUND:\n # branch does not exist on remote\n logger.error(rpc_error.details())\n raise rpc_error\n else:\n c_bhistory = summarize.list_history(\n self._env.refenv, self._env.branchenv, branch_name=branch)\n try:\n s_branch = client.fetch_branch_record(branch)\n sHEAD = s_branch.rec.commit\n except grpc.RpcError as rpc_error:\n if rpc_error.code() == grpc.StatusCode.NOT_FOUND:\n # branch does not exist on remote\n logger.error(rpc_error.details())\n raise rpc_error\n\n # verify histories are intact and should be synced\n if sHEAD == cHEAD:\n warnings.warn(f'NoOp: {sHEAD} == client HEAD {cHEAD}', UserWarning)\n return branch\n elif sHEAD in c_bhistory['order']:\n warnings.warn(\n f'REJECTED: remote HEAD: {sHEAD} behind local: {cHEAD}', UserWarning)\n return branch\n\n # ------------------- get data ------------------------------------\n\n mCmtResponse = client.fetch_find_missing_commits(branch)\n m_cmts = mCmtResponse.commits\n for commit in tqdm(m_cmts, desc='fetching commit data refs'):\n # Get missing label (metadata) digest & values\n m_labels = set(client.fetch_find_missing_labels(commit))\n for label in m_labels:\n received_hash, labelVal = client.fetch_label(label)\n CW.label(received_hash, labelVal)\n # Get missing data schema digests & values\n mSchemaResponse = client.fetch_find_missing_schemas(commit)\n for schema in mSchemaResponse.schema_digests:\n schema_hash, schemaVal = client.fetch_schema(schema)\n CW.schema(schema_hash, schemaVal)\n # Record missing data hash digests (does not get data itself)\n m_hashes = client.fetch_find_missing_hash_records(commit)\n m_schema_hash_map = defaultdict(list)\n for digest, schema_hash in m_hashes:\n m_schema_hash_map[schema_hash].append((digest, schema_hash))\n for schema_hash, received_data in m_schema_hash_map.items():\n CW.data(schema_hash, received_data, backend='50')\n\n # Get missing commit reference specification\n for commit in tqdm(m_cmts, desc='fetching commit spec'):\n cmt, parentVal, specVal, refVal = client.fetch_commit_record(commit)\n CW.commit(cmt, parentVal, specVal, refVal)\n\n # --------------------------- At completion -----------------------\n\n # Update (or create) remote branch pointer with new HEAD commit\n fetchBranchName = f'{remote}/{branch}'\n try:\n heads.create_branch(\n self._env.branchenv, name=fetchBranchName, base_commit=sHEAD)\n except ValueError:\n heads.set_branch_head_commit(\n self._env.branchenv, branch_name=fetchBranchName, commit_hash=sHEAD)\n\n return fetchBranchName"
] | [
"0.65405554",
"0.63312775",
"0.5966933",
"0.57998365",
"0.5740081",
"0.5735921",
"0.57280624",
"0.57224613",
"0.5703283",
"0.56584454",
"0.56575096",
"0.5571788",
"0.55627555",
"0.55626905",
"0.5540516",
"0.548836",
"0.5485281",
"0.5485032",
"0.5458031",
"0.54532164",
"0.54309237",
"0.5429437",
"0.5351117",
"0.5346582",
"0.53416765",
"0.5269919",
"0.5263297",
"0.5226558",
"0.52230537",
"0.52063024"
] | 0.8219534 | 0 |
Add a push refspec (str) to the remote | def add_push(self, name, refspec):
err = C.git_remote_add_push(self._repo._repo, to_bytes(name), to_bytes(refspec))
check_error(err) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_fetch(self, name, refspec):\n\n err = C.git_remote_add_fetch(self._repo._repo, to_bytes(name), to_bytes(refspec))\n check_error(err)",
"def cmd_push_review(remote):\n return ['git', 'push', remote]",
"def _git_add(repo, path, contents='example!\\n'):\n path.write_text(contents)\n subprocess.run(['git', '-C', repo, 'add', path], check=True)",
"def push(self, remote, branch, *args):\n return self.cmd('push', remote, branch, *args)",
"def gitAdd(filename, repo_dir):\n file_path = \"%s/%s\" % (repo_dir, filename)\n git(\"add\", file_path)",
"def push():\n local('hg push jvacx')",
"def push(ctx):\n dufl_root = ctx.obj['dufl_root']\n git = Git(ctx.obj.get('git', '/usr/bin/git'), dufl_root)\n git.run('push', 'origin', git.working_branch())",
"def git_append(msg):\n pipe = Popen('git log -1 --pretty=%B', stdout=PIPE, shell=True)\n old_msg = pipe.stdout.read()\n new_msg = '%s\\n%s' % (old_msg.rstrip(), msg)\n\n pipe = Popen('git commit --amend --file=-', stdin=PIPE, shell=True)\n pipe.communicate(new_msg)",
"def add(ref):\n cmd = 'qri add %s' % ref\n print('Fetching from registry...')\n result, err = shell_exec(cmd)\n return 'Added %s: %s' % (ref, result)",
"def add(name, url):\n click.echo(\"registered repo {} at url {}\".format(name, url))",
"def push_update_reference(self, refname, message):",
"def try_push_special_refs(repo):\n # test pushing to the 'private' dev/arcyd/ area, where arcyd will store\n # it's tracker branches\n repo('push', 'origin', '--dry-run', 'HEAD:refs/heads/dev/arcyd/test')\n\n # test pushing to the refs/arcyd area, where the 'landed' and 'abandoned'\n # archive branches will live\n repo('push', 'origin', '--dry-run', 'HEAD:refs/arcyd/test')",
"def push_refspecs(self):\n\n specs = ffi.new('git_strarray *')\n err = C.git_remote_get_push_refspecs(specs, self._remote)\n check_error(err)\n\n return strarray_to_strings(specs)",
"def _git_push(branch):\n\n local(\n 'git push -f origin %(branch)s:%(branch)s' % {'branch': branch},\n capture=True\n )\n print('Pushed to %s' % branch)",
"def push():\n branch = git.current_branch().name\n shell.run('git push -u origin {}'.format(branch))",
"def add_prod_repo_as_origin_and_push(git_repo_name):\n local(\"\"\"echo '[remote \"origin\"]' >> .git/config\"\"\")\n local(r\"echo ' fetch = +refs/heads/*:refs/remotes/origin/*' >> .git/config\")\n local(r\"echo ' url = %s:webapps/git/repos/%s.git' >> .git/config\" % (env.hosts[0], git_repo_name))\n local(r\"git push origin master\")",
"def push(self, specs, callbacks=None):\n push_opts = ffi.new('git_push_options *')\n err = C.git_push_init_options(push_opts, C.GIT_PUSH_OPTIONS_VERSION)\n\n if callbacks is None:\n callbacks = RemoteCallbacks()\n\n callbacks._fill_push_options(push_opts)\n # Build custom callback structure\n\n try:\n with StrArray(specs) as refspecs:\n err = C.git_remote_push(self._remote, refspecs, push_opts)\n check_error(err)\n finally:\n callbacks._self_handle = None",
"def repo_add(self, name, url, **kwargs):\n\n self.helm_client.repo_add(name, url, **kwargs)",
"def add_plugin_from_gh(\n user: str,\n branch: str,\n plugin: str,\n repo: str = \"polus-plugins\",\n manifest_name: str = \"plugin.json\",\n):\n l = [user, repo, branch, plugin, manifest_name]\n u = \"/\".join(l)\n url = urljoin(\"https://raw.githubusercontent.com\", u)\n logger.info(\"Adding %s\" % url)\n return submit_plugin(url, refresh=True)",
"def push_to_github(label):\n\n # Make sure we're in the right place to do all the git things.\n os.chdir(taas.data_root())\n\n # If there's nothing to do, then do nothing.\n if (not something_to_commit()):\n print(\"Nothing to commit.\")\n return\n\n branch_name = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n branch_name += \"-\" + label\n\n run([\"git\", \"checkout\", \"-b\", branch_name])\n\n run([\"git\", \"add\", \"-A\"])\n\n run([\"git\", \"status\"])\n\n run([\"git\", \"commit\", \"-m\", \"Automated update: \"+label])\n\n run([\"git\", \"push\", \"--set-upstream\", \"origin\", branch_name])",
"def push(self):\n out, err, code = self.command( [\"git\", \"push\"], self.directory )",
"def __gitAddRemote(self):\n self.vcs.gitAddRemote(self.project.getProjectPath())",
"def add(self,path):\n out, err, code = self.command( [\"git\", \"add\", path], self.directory )",
"def add_tag(tag):\n check_call(['git', 'tag', tag])",
"def push_the_change(self, issue_id: str, commit_msg: List[str] = None) -> None:\n index = self.git.index\n index.add([\"INFO.yaml\"])\n if not commit_msg:\n commit_msg = [\"Edit INFO.yaml file.\"]\n commit_msg_with_m = list(\n chain.from_iterable(zip_longest([], commit_msg, fillvalue=\"-m\"))\n )\n self.git.git.execute(\n [\n \"git\",\n \"commit\",\n *commit_msg_with_m,\n \"-m\",\n \"That change was done by automated integration tool to maintain commiters list in INFO.yaml\",\n \"-m\",\n f\"Issue-ID: {issue_id}\",\n \"-s\",\n ]\n )\n self.git.git.execute([\"git\", \"push\", \"origin\", f\"HEAD:refs/for/{self._branch}\"])\n print(f\"Pushed successfully to {self._repo} respository\")",
"def push_rev(rev):\n env.push_rev = rev",
"def push(self):\n origin = self.git_repo.remotes.origin\n origin.push()",
"def infocalypse_push(ui_, repo, **opts):\n params, stored_cfg = get_config_info(ui_, opts)\n insert_uri = opts['uri']\n if insert_uri == '':\n insert_uri = stored_cfg.get_dir_insert_uri(repo.root)\n if not insert_uri:\n ui_.warn(\"There is no stored insert URI for this repo.\\n\"\n \"Please set one with the --uri option.\\n\")\n return\n\n set_target_version(ui_, repo, opts, params,\n \"Only pushing to version(s): %s\\n\")\n params['INSERT_URI'] = insert_uri\n #if opts['requesturi'] != '':\n # # DOESN'T search the insert uri index.\n # ui_.status((\"Copying from:\\n%s\\nTo:\\n%s\\n\\nThis is an \"\n # + \"advanced feature. \"\n # + \"I hope you know what you're doing.\\n\") %\n # (opts['requesturi'], insert_uri))\n # params['REQUEST_URI'] = opts['requesturi']\n\n execute_push(ui_, repo, params, stored_cfg)",
"def call_git_push():\n print(\"This will commit and push the git repo\")\n today = datetime.datetime.today()\n call([\"git\", \"add\", \".\"])\n call([\"git\", \"commit\", \"-m\", \"Updated notes. {:%Y-%m-%d %H:%M:%S}\".format(today)])\n call([\"git\", \"push\", \"origin\", \"master\"])",
"def _do_push(self, line: str) -> None:\n remote_head = None\n while True:\n src, dst = line.split(\" \")[1].split(\":\")\n if src == \"\":\n self._delete(dst)\n else:\n self._push(src, dst)\n if self._first_push:\n if not remote_head or src == git.symbolic_ref(\"HEAD\"):\n remote_head = dst\n line = readline()\n if line == \"\":\n if self._first_push:\n self._first_push = False\n if remote_head:\n if not self.write_symbolic_ref(\"HEAD\", remote_head):\n self._trace(\"failed to set default branch on remote\", Level.INFO)\n else:\n self._trace(\"first push but no branch to set remote HEAD\")\n break\n _write()"
] | [
"0.68458635",
"0.6618803",
"0.6397485",
"0.627628",
"0.61030674",
"0.6059851",
"0.6020045",
"0.60113615",
"0.59857327",
"0.59671175",
"0.5963221",
"0.59560585",
"0.59545165",
"0.59238607",
"0.5921502",
"0.5911289",
"0.58821833",
"0.5830431",
"0.5815642",
"0.5809405",
"0.58039296",
"0.5767074",
"0.57459545",
"0.57151586",
"0.56920594",
"0.567765",
"0.5675017",
"0.5674955",
"0.5612261",
"0.5607675"
] | 0.7897243 | 0 |
Downloads the 6B GloVe embeddings (approx. 1GB) | def download_glove ():
# Get the URL ...
print("Downloading https://nlp.stanford.edu/data/glove.6B.zip ...")
res = requests.get("https://nlp.stanford.edu/data/glove.6B.zip", stream=True)
if res.status_code != 200:
print("Could not download the 6B GloVe Dataset! The server responded with code " + res.status_code + ".")
sys.exit(1)
# ... and write it to file
fp = open("data/glove.6B.zip", "wb")
total_length = int(res.headers.get('content-length'))
# Thanks again to the internet for this beautiful piece of code <3
for chunk in tqdm.tqdm(res.iter_content(chunk_size=1024), unit="KB", total=ceil(total_length/1024) + 1):
if chunk:
fp.write(chunk)
fp.flush()
fp.close()
print("ZIP-file downloaded! Extracting ...")
with ZipFile("data/glove.6B.zip", "r") as zf:
files = zf.namelist()
print("Members in archive:")
print("\n".join(files))
for file in files:
if file.endswith("glove.6B.300d.txt"):
print("Extracting member " + file + " from archive ...")
zf.extract(file)
break
# Remove the zip file again
os.remove("data/glove.6B.zip")
print("Successfully extracted GloVe embeddings (300 dimensions) to data directory.")
print("You can now train the classifier using the GloVe embeddings.") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_glove_embedding():\n embedding = {}\n N = 400_000\n print(\"Reading glove embedding...\")\n with open(GLOVE_EMBD_PATH, \"rb\") as f:\n for line in tqdm(f, total=N):\n line = line.decode().split()\n word = line[0].lower()\n vector = np.array(line[1:]).astype(np.float32)\n embedding[word] = vector\n\n return embedding",
"def load_glove_embeddings():\n\n emmbed_file = Path(\"./embeddings.pkl\")\n if emmbed_file.is_file():\n # embeddings already serialized, just load them\n print(\"Local Embeddings pickle found, loading...\")\n with open(\"./embeddings.pkl\", 'rb') as f:\n return pk.load(f)\n else:\n # create the embeddings\n print(\"Building embeddings dictionary...\")\n data = open(\"glove.6B.50d.txt\", 'r', encoding=\"utf-8\")\n embeddings = [[0] * EMBEDDING_SIZE]\n word_index_dict = {'UNK': 0} # first row is for unknown words\n index = 1\n for line in data:\n splitLine = line.split()\n word = tf.compat.as_str(splitLine[0])\n embedding = [float(val) for val in splitLine[1:]]\n embeddings.append(embedding)\n word_index_dict[word] = index\n index += 1\n data.close()\n\n # pickle them\n with open('./embeddings.pkl', 'wb') as f:\n print(\"Creating local embeddings pickle for faster loading...\")\n # Pickle the 'data' dictionary using the highest protocol available.\n pk.dump((embeddings, word_index_dict), f, pk.HIGHEST_PROTOCOL)\n\n return embeddings, word_index_dict",
"def get_word_embeddings(t, folder, lang=\"en\"):\n vecs_url = f\"https://dl.fbaipublicfiles.com/fasttext/vectors-crawl/cc.{lang}.300.vec.gz\"\n vecs_gz_filename = vecs_url.rpartition(\"/\")[2]\n os.makedirs(folder, exist_ok=True)\n vecs_gz_filepath = os.path.join(folder, vecs_gz_filename)\n\n tokenizer_vocab_size = len(t.vocab)\n\n if wait_for_file_stable(vecs_gz_filepath):\n print(\"Using existing embeddings file\")\n else:\n print(\"Downloading word vectors...\")\n subprocess.run([\" \".join([\"wget\", \"-NP\", folder, vecs_url])], check=True, shell=True)\n\n print(\"Loading into memory...\")\n embeddings_index = dict()\n with gzip.open(vecs_gz_filepath, \"rt\") as zipf:\n firstline = zipf.readline()\n emb_vocab_size, emb_d = firstline.split(\" \")\n emb_vocab_size = int(emb_vocab_size)\n emb_d = int(emb_d)\n for line in zipf:\n values = line.split()\n word = values[0]\n # Only load subset of the embeddings recognised by the tokenizer:\n if word in t.vocab.stoi:\n coefs = np.asarray(values[1:], dtype=\"float32\")\n embeddings_index[word] = coefs\n print(\"Loaded {} of {} word vectors for tokenizer vocabulary length {}\".format(\n len(embeddings_index),\n emb_vocab_size,\n tokenizer_vocab_size,\n ))\n\n # create a weight matrix for words in training docs\n embedding_matrix = np.zeros((tokenizer_vocab_size, emb_d))\n for word, i in t.vocab.stoi.items():\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None:\n embedding_matrix[i] = embedding_vector\n\n return embedding_matrix",
"def load_glove_data():\n glove_path = path.join('..', 'data', 'glove', 'glove.twitter.27B.200d.txt')\n f = open(glove_path,'r')\n \n model = {}\n for line in f:\n splitLine = line.split()\n word = splitLine[0]\n embedding = np.array([float(val) for val in splitLine[1:]])\n model[word] = embedding\n \n return model",
"def load_glove_embeddings():\n data = open(\"glove.6B.50d.txt\",'r',encoding=\"utf-8\")\n embeddings = []\n word_index_dict = {'UNK':0}\n index = 1\n for lines in data:\n wordVector = lines.split(\" \")\n if(wordVector[0] in string.punctuation or any(char.isdigit() for char in wordVector[0])):\n continue\n embeddings.append(wordVector[1:-1])\n word_index_dict[wordVector[0]] = index\n index+=1\n print(\"done\")\n\n return embeddings, word_index_dict",
"def _load_glove_vec(fname, vocab):\n print 'load glove...'\n word_vecs = {}\n cnt = 0\n l = open(fname,'r').readline()\n embedding_size = len(l.strip().split()) -1\n print 'embedding vector size: %d'%(embedding_size)\n with open(fname, \"r\") as f:\n for l in f:\n stemp = l.strip().split(' ',1)\n assert len(stemp) == 2\n word = stemp[0]\n if word in vocab:\n word_vecs[stemp[0]] = np.fromstring(' '.join(stemp[1:]),sep = ' ')\n cnt+=1\n if cnt%10000==0:\n print '%d lines...'%cnt\n return (word_vecs,embedding_size)",
"def glove(data_fname='glove.840B.300d.txt', out_fname='glove.pkl'):\n words, U, dim = [], [], None\n with open(DATA_DIR + data_fname, 'rb') as f:\n for j, line in enumerate(f):\n x = line.strip().split()\n word, vector, d = x[0], np.ravel(x[1:]), len(x) - 1\n if dim is None: dim = d\n elif d != dim: raise Exception('{0}: {1}!={2}'.format(j, dim, d))\n U.append(vector)\n words.append(word)\n U = np.array(U)\n print \"Found {0} words\".format(len(words))\n print \"Found {0}x{1} embedding matrix\".format(*U.shape)\n with open(DATA_DIR + out_fname, 'wb') as f:\n cPickle.dump((words, U), f)",
"def glove():\n import numpy as np\n \"\"\" Read from Gluons embedding pickle files\"\"\"\n with np.load(glove_model_path) as f:\n matrix = f['idx_to_vec']\n matrix.setflags(write=0)\n return matrix, f['idx_to_token'], token_to_idx(f['idx_to_token'])",
"def load_google_embeddings(embeddings_path):\n\n embeddings = KeyedVectors.load_word2vec_format(\n embeddings_path,\n binary=True\n )\n\n dim = embeddings['dog'].size\n\n return embeddings",
"def load_kb_embeddings(path_to_folder):\n\n entity2idx = {}\n allowed_indices = set()\n with open(\"data/entity2id.filtered.txt\", 'r') as f:\n for l in f.readlines():\n k, v, idx = tuple(l.strip().split(\"\\t\"))\n entity2idx[k] = int(idx) + 3\n allowed_indices.add(int(v))\n\n embeddings = []\n with open(path_to_folder + \"/entity2vec.vec\", 'r') as f:\n idx = 0\n for line in f.readlines():\n if idx in allowed_indices:\n split = line.strip().split('\\t')\n embeddings.append([float(num) for num in split])\n idx += 1\n\n entity2idx[all_zeroes] = 0 # 0 is reserved for padding\n entity2idx[unknown_el] = 1 # 1 is reserved for OOV\n entity2idx[no_annotation] = 2 # 2 is reserved for no annotation tokens\n embedding_size = len(embeddings[0])\n vector_oov = 2 * 0.1 * np.random.rand(embedding_size) - 0.1\n vector_na = 2 * 0.1 * np.random.rand(embedding_size) - 0.1\n embeddings = np.asarray([[0.0]*embedding_size, vector_oov, vector_na] + embeddings, dtype='float32')\n\n print(\"KB embeddings loaded: {}\".format(embeddings.shape))\n assert len(entity2idx) == len(embeddings)\n\n return entity2idx, embeddings",
"def extract_embeddings():\n session, _ = session_bundle.load_session_bundle_from_path(FLAGS.model_path)\n all_paths = FLAGS.input_path.split(',')\n with tf.gfile.Open(FLAGS.output_path, 'w') as out:\n for dataset, pattern in enumerate(all_paths, start=FLAGS.offset):\n paths = tf.gfile.Glob(pattern)\n for path in paths:\n make_request(dataset, path, out, session)",
"def load_embedding(self, glove_dir='glove.6B/'):\n\n f = open(os.path.join(glove_dir, 'glove.6B.100d.txt'))\n for line in f:\n values = line.split()\n word = values[0]\n self.embeddings_index[word] = np.asarray(values[1:], dtype='float32')\n f.close()",
"def load_embeddings_models():\n\n\t# ---LOADING WORD2VEC MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'word2vec', 'NILC', 'nilc_cbow_s300_300k.txt')\n\t# model_load_path = os.path.join('models', 'word2vec', 'NILC', 'nilc_skip_s300.txt')\n\tstart_time = time.time()\n\tprint(\"Started loading the word2vec model\")\n\tword2vec_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# word2vec_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\t# ---LOADING FASTTEXT MODEL---\n\tmodel_path = os.path.join(ROOT_PATH, 'models', 'fastText', 'cc.pt.300_300k.vec')\n\tstart_time = time.time()\n\tprint(\"Started loading the fasttext model\")\n\tfasttext_model = KeyedVectors.load_word2vec_format(model_path)\n\t# fasttext_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\t\n\n\t# ---LOADING PT-LKB MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'ontoPT', 'PT-LKB_embeddings_64', 'ptlkb_64_30_200_p_str.emb')\n\t# model_load_path = os.path.join('models', 'ontoPT', 'PT-LKB_embeddings_128', 'ptlkb_128_80_10_p_str.emb')\n\tstart_time = time.time()\n\tprint(\"Started loading the PT-LKB-64 model\")\n\tptlkb64_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# ptlkb64_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\t# ---LOADING GLOVE-300 MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'glove', 'glove_s300_300k.txt')\n\t# model_load_path = os.path.join('models', 'glove', 'glove_s100.txt')\n\tstart_time = time.time()\n\tprint(\"Started loading the GLOVE 300 dimensions model\")\n\tglove300_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# glove300_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\t# ---LOADING NUMBERBATCH MODEL---\n\tmodel_load_path = os.path.join(ROOT_PATH, 'models', 'numberbatch', 'numberbatch-17.02_pt_tratado.txt')\n\tstart_time = time.time()\n\tprint(\"Started loading the NUMBERBATCH dimensions model\")\n\tnumberbatch_model = KeyedVectors.load_word2vec_format(model_load_path)\n\t# numberbatch_model = None\n\tprint(\"Model loaded\")\n\tprint(\"--- %s seconds ---\" %(time.time() - start_time))\n\tprint('\\a')\n\n\treturn word2vec_model, fasttext_model, ptlkb64_model, glove300_model, numberbatch_model",
"def load_pretrained_embeddings(vocabulary: dict, max_size: int):\n # get GloVe 6B pre-trained word embeddings, of dimension 100\n glove_vec = torchtext.vocab.GloVe(name=\"6B\", dim=100, unk_init=torch.Tensor.normal_)\n\n pretrained = []\n for k, _ in vocabulary.stoi.items():\n if k == \"<PAD>\":\n emb = torch.zeros([glove_vec.dim])\n elif k == \"<UNK>\":\n emb = torch.rand([glove_vec.dim])\n else:\n emb = glove_vec.get_vecs_by_tokens(k, lower_case_backup=True)\n pretrained.append(emb) \n\n # return a tensor of size [vocab_size, emb_dim]\n return torch.stack(pretrained, dim=0)",
"def buildEmbeddingMatrix(path_to_gloVe, tokenizer, embedding_dimen=300):\n logging.info(\"Loading GloVe vector model..\")\n t = time()\n # Loads the gloVe model into a dictionary\n with open(path_to_gloVe, encoding='utf8') as file:\n embeddings = dict()\n for line in file:\n values = line.split()\n # key is the word, value is the numpy array for the corresponding word vector\n embeddings[values[0]] = np.asarray(values[1:], 'float32')\n # Create a 2D tensor of shape(num_unique_words+1, embedding_dimen) (Index 0 is used for padding)\n embedding_matrix = np.zeros((len(nltk_tokenizer.word_index) + 1, embedding_dimen))\n word_found_in_embedding = 0\n for word, index in nltk_tokenizer.word_index.items():\n embedding_vector = embeddings.get(word)\n # Only populate word vectors that exist in GloVe model,\n # words not found (e.g: spelling error) will be padded with zeroes as their word vector\n if embedding_vector is not None:\n embedding_matrix[index] = embedding_vector\n word_found_in_embedding += 1\n logging.info(\"Done!\")\n logging.info(\"Loaded {} word vectors into the embedding.\".format(len(embedding_matrix)))\n logging.info(\"Found {} word vectors that exist in the GloVe model.\".format(word_found_in_embedding))\n logging.info(\"Time taken to load pre-trained GloVe model: {} mins\".format(round(((time() - t) / 60), 2)))\n return embedding_matrix",
"def generate_conll2003_embeddings():\n glove_embedding = get_glove_embedding()\n\n word2index = {}\n idx2word = {}\n embed_array = []\n\n word2index[\"<pad>\"] = 1\n embed_array.append(init_embedding())\n\n word2index[\"<unk>\"] = 0\n embed_array.append(init_embedding())\n\n data = []\n with open(TRAIN_DATA_PATH, \"r\") as f:\n for line in f:\n data.append(json.loads(line))\n\n idx = 2\n\n for sample in tqdm(data, total=len(data)):\n words = sample[\"tokens\"]\n\n for w in words:\n w = w.lower()\n\n # if word is not present in dictionary, add to dictionary and append embedding vector\n if w not in word2index.keys():\n word2index[w] = idx\n idx += 1\n if w not in glove_embedding.keys():\n ev = init_embedding()\n else:\n ev = glove_embedding[w]\n\n embed_array.append(ev)\n\n else:\n continue\n\n # save embeddings\n embed_array = np.vstack(embed_array)\n np.save(EMBD_OUTPUT_PATH, embed_array)\n\n # save dictionary\n print(\"Dicitionary Size: \", len(word2index))\n with open(DICTIONARY_OUTPUT_PATH, \"w\") as f:\n json.dump(word2index, f)",
"def get_pretrained_weights(idx_word):\n n = len(idx_word)\n embed_dim = 25\n weights = mx.ndarray.zeros((n, embed_dim))\n print('Start downloading pre-trained vectors, this will take some time')\n glov = api.load(\"glove-twitter-25\")\n print('Pre-trained vectors downloading complete')\n not_in_vocab = 0\n for i in range(n):\n word = idx_word[i]\n try:\n weights[i] = glov[word]\n except: #if not in glove vocabulary\n not_in_vocab += 1\n weights[i] = mx.nd.random.normal(0, 0.1, embed_dim)\n if not_in_vocab > 0:\n print('Warning: {} words not in vocab of pretrained embeddings (glove-twitter-25)'.format(not_in_vocab))\n return weights",
"def load_word2vec_model():\n logging.basicConfig(\n format='%(asctime)s : %(levelname)s : %(message)s', \n level=logging.INFO)\n model_path = '/playpen/home/tongn/GoogleNews-vectors-negative300.bin'\n model = KeyedVectors.load_word2vec_format(fname=model_path, binary=True)\n return model",
"def load_embeddings(glove_path, vocab):\n vocab_size = vocab.get_vocab_size()\n words_to_keep = set(vocab.get_index_to_token_vocabulary().values())\n glove_embeddings = {}\n embedding_dim = None\n\n logger.info(\"Reading GloVe embeddings from {}\".format(glove_path))\n with open(glove_path) as glove_file:\n for line in tqdm(glove_file,\n total=get_num_lines(glove_path)):\n fields = line.strip().split(\" \")\n word = fields[0]\n if word in words_to_keep:\n vector = np.asarray(fields[1:], dtype=\"float32\")\n if embedding_dim is None:\n embedding_dim = len(vector)\n else:\n assert embedding_dim == len(vector)\n glove_embeddings[word] = vector\n\n all_embeddings = np.asarray(list(glove_embeddings.values()))\n embeddings_mean = float(np.mean(all_embeddings))\n embeddings_std = float(np.std(all_embeddings))\n logger.info(\"Initializing {}-dimensional pretrained \"\n \"embeddings for {} tokens\".format(\n embedding_dim, vocab_size))\n embedding_matrix = torch.FloatTensor(\n vocab_size, embedding_dim).normal_(\n embeddings_mean, embeddings_std)\n # Manually zero out the embedding of the padding token (0).\n embedding_matrix[0].fill_(0)\n # This starts from 1 because 0 is the padding token, which\n # we don't want to modify.\n for i in range(1, vocab_size):\n word = vocab.get_token_from_index(i)\n\n # If we don't have a pre-trained vector for this word,\n # we don't change the row and the word has random initialization.\n if word in glove_embeddings:\n embedding_matrix[i] = torch.FloatTensor(glove_embeddings[word])\n return embedding_matrix",
"def make_embedding(path, words, indices):\n #root = '/'.join(path.split('/')[0:-1])\n #all_paths = [root+'/'+x for x in os.listdir(root)] #'/'.join(path.split('/')[0:-1]))\n #for path in all_paths:\n vec_path = 'data/'+path.split('/')[-1]+'_'+mode\n print(vec_path)\n if os.path.exists(vec_path+'.npy'):\n np_vecs = np.load(vec_path+'.npy')\n else:\n words_len = len(words)\n vecs = []\n if mode == 'word':\n f = load_model('wiki.en.bin')\n for i, w in enumerate(words):\n if mode == 'word':\n vec = f.get_word_vector(w)\n else:\n vec = eye[indices[w]]\n vecs.append(vec) \n if i % 10000 == 0:\n print(\"{} / {}\".format(i, words_len))\n np_vecs = np.asarray(vecs, dtype=np.int8)\n np.save(vec_path, np_vecs)\n return np_vecs",
"def set_glove_embedding(self,fpath,embedding_dim):\n\t\temb = np.random.randn(self._count,embedding_dim)\n#\ttf.logging.info(emb[0])\n\t\twith open(fpath) as f: #python 3.x support \n\t\t\tfor k,line in enumerate(f):\n\t\t\t\tfields = line.split()\n\t\t\t\tif len(fields) - 1 != embedding_dim:\n\t\t\t\t\t# Sometimes there are funny unicode parsing problems that lead to different\n\t\t\t\t\t# fields lengths (e.g., a word with a unicode space character that splits\n\t\t\t\t\t# into more than one colum n). We skip those lines. Note that if you have\n\t\t\t\t\t# some kind of long header, this could result in all of your lines getting\n\t\t\t\t\t# skipped. It's hard to check for that here; you just have to look in the\n\t\t\t\t\t# embedding_misses_file and at the model summary to make sure things look\n\t\t\t\t\t# like they are supposed to.\n\t\t\t\t\t#logger.warning(\"Found line with wrong number of dimensions (expected %d, was %d): %s\",\n\t\t\t\t\t\t\t# embedding_dim, len(fields) - 1, line)\n\t\t\t\t\traise Exception(\"Found line with wrong number of dimensions (expected %d, was %d): %s\",\n\t\t\t\t\t\t\t\t\t\t\t embedding_dim, len(fields) - 1, line)\n\t\t\t\t\tcontinue\n\t\t\t\tword = fields[0]\n\t\t\t\tif word in self._word_to_id:\n\t\t\t\t\tvector = np.asarray(fields[1:], dtype='float32')\n\t\t\t\t\temb[self._word_to_id[word]] = vector\n#\t\tif k%1000 == 0:\n#\t\t tf.logging.info('glove : %d',k)\n\t\tself.glove_emb = emb",
"def learn_embeddings(walk_dir,dim,window_size,iter_num,workers,embedding_filename):\r\n logging.debug('begin learning embeddings')\r\n learning_begin_time = time.time()\r\n\r\n walk_files = glob.glob('%s/__random_walks_*.txt' % walk_dir)\r\n sentences = Sentences(walk_files)\r\n model = Word2Vec(sentences, size=dim, window=window_size, min_count=0, sg=1, hs=0, workers=workers, iter=iter_num)\r\n\r\n learning_end_time = time.time()\r\n logging.debug('done learning embeddings')\r\n logging.debug('learning time: {}'.format(learning_end_time - learning_begin_time))\r\n print('learning_time', learning_end_time - learning_begin_time, flush=True)\r\n model.wv.save_word2vec_format(fname=embedding_filename, binary=False)\r\n return model.wv",
"def load_embeddings(embeddings_path):\n\n embeddings_index = {}\n f = open(embeddings_path, encoding='utf-8')\n for line in tqdm(f):\n values = line.rstrip().split(' ')\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n print('Found {} word vectors.'.format(len(embeddings_index)))\n return embeddings_index",
"def get_docs_embedding(docs_tok, model, dim=300):\n all_docs_embedding = []\n for doc in docs_tok:\n all_docs_embedding.append(text2vec(doc, model, dim))\n cols = [str(i) for i in range(dim)]\n embeddings = pd.DataFrame(data=all_docs_embedding)\n embeddings.columns = cols\n embeddings.to_parquet('../model/docs_embeddings.parquet', index=False)\n return np.array(all_docs_embedding)",
"def main():\n for dim in (50, 100, 200, 300):\n data_file = data.FileFinder().get_file('GLOVE_TXT_FILE').format(dim)\n output_file = data.FileFinder().get_file('GLOVE_WORD2VEC_FILE').format(dim)\n print(\"Converting {} to {}\".format(data_file, output_file))\n glove2word2vec(data_file, output_file)",
"def test_gens():\n dataset_path = \"/home/kateryna/Documents\"\n train_gen, test_gen = generate_embeddings_gen(dataset_path)\n img, feature, labels = next(train_gen)\n print(len(img), len(feature), labels)",
"def load_data():\n t = time()\n print 'loading tweets, please wait...'\n trained_tweets = load_tweets('training_dataset')\n eval_tweets = load_tweets('evaluation_dataset')\n print 'Time taken {}'.format(time() - t)\n t = time()\n print 'loading w2v model, please wait...'\n model = w2v_load_model('GoogleNews-vectors-negative300.bin')\n print 'Time taken {}'.format(time() - t)\n return trained_tweets, eval_tweets, model",
"def get_weibo_data(vocab_file, vector_file):\n if os.path.exists(\"word_misc.pkl\"):\n return cPickle.load(open(\"word_misc.pkl\", \"rb\"))\n\n word_misc, word2id, id2word = {}, {}, {}\n word_count = 0\n\n # vocab file\n print \"Building vocabulary ...\"\n for lines in open(vocab_file).readlines():\n word = lines.split()[0]\n if not is_unwanted_words(word, ['', '\\n']):\n word2id[word] = word_count\n id2word[word_count] = word\n word_count += 1\n word2id['_START'] = word_count\n id2word[word_count] = '_START'\n word_count += 1\n word2id['_END'] = word_count\n id2word[word_count] = '_END'\n word_count += 1\n word2id['_UNK'] = word_count\n id2word[word_count] = '_UNK'\n word_count += 1\n word2id['_MASK'] = word_count\n id2word[word_count] = '_MASK'\n word_count += 1\n print \"Vocabulary size:\", word_count\n\n # Initialization is refered to in https://www.tensorflow.org/versions/r0.7/tutorials/word2vec/index.html\n word_emb = (1/np.sqrt(word_count)*(2*np.random.rand(word_count, options['embedding_size']) - 1)).tolist()\n\n # load word vectors\n for lines in open(vector_file).readlines()[1:]:\n word = lines.split()[0]\n #if word == '</s>' or word not in word2id.keys():\n # continue\n if word not in word2id.keys():\n continue\n ids = word2id[word]\n #print ids, lines, len(word_emb)\n word_emb[ids] = [float(w) for w in lines.split()[1:]]\n\n print len(word_emb), \"words have been loaded with\", len(word_emb[0]), \"dimensions\"\n\n # load word misc\n word_misc['id2word'] = id2word\n word_misc['word2id'] = word2id\n word_misc['word_count'] = word_count\n word_misc['word_emb'] = word_emb\n cPickle.dump(word_misc, open(\"word_misc.pkl\", \"wb\"))\n print \"Dump complete.\"\n return word_misc",
"def _extractGloveVects():\n \n embeddings_index = {}\n\n with open(GLOVE_CORPUS_FILE) as f:\n for line in f:\n values = line.split()\n word = values[0].lower()\n if word not in _cachedStopWords:\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n\n return embeddings_index",
"def try2():\n path = '/Users/mayankkejriwal/datasets/heiko-vectors/'\n model = Word2Vec.load(path+'DB2Vec_sg_500_5_5_15_4_500')\n\n print model['http://purl.org/dc/terms/subject']\n print model['dbo:birthPlace']\n print model['http://dbpedia.org/ontology/birthPlace']\n print len(model)\n print 'success'"
] | [
"0.7076395",
"0.64558613",
"0.6354679",
"0.61358154",
"0.61284745",
"0.60184133",
"0.6010079",
"0.59805787",
"0.5972328",
"0.59562606",
"0.5899516",
"0.5830953",
"0.57806957",
"0.5730951",
"0.5724267",
"0.57074106",
"0.5684446",
"0.5642139",
"0.5635222",
"0.5618081",
"0.5613138",
"0.5606697",
"0.5599058",
"0.558562",
"0.5566459",
"0.55639136",
"0.553498",
"0.5500314",
"0.5496083",
"0.54830796"
] | 0.7243812 | 0 |
Runs the Stanza module | def run_stanza (arguments):
if arguments.download:
# Download the full Stanza dataset
result = input("ATTENTION! This will download the full English Stanza corpus (approx. 400 MB). Do you wish to continue (y/n)? ")
if result == "y" or result == "yes":
# For a list of processors, see https://stanfordnlp.github.io/stanza/pipeline.html#processors
stanza.download('en', processors="tokenize,pos,lemma,ner,depparse")
sys.exit(0) # Afterwards exit normally | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run():\n main()",
"def run(self, args):\n pass",
"def main():\n\ttoken = os.getenv(\"BOT_TOKEN\")\n\tapplication = Application.builder().token(token).read_timeout(30).write_timeout(30).build()\n\tload_interactions(application)\n\tprint(\"Simple Media Converter instance started!\")\n\tapplication.run_polling()",
"def run(self, args):\n\n return",
"def run(self):\n print('A simple bot started the process.')\n try:\n self.calculate_before_process()\n\n if self.process == \"Like\":\n self.process_like()\n elif self.process == \"Like-and-follow\":\n self.process_like_and_follow()\n except Exception as e:\n print(e)\n finally:\n self.dump_all()\n print('A simple bot finished the process.')",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def Run():\r\n pass",
"def run(self):\n\t\t\n\t\tpass",
"def run(self, root):\r\n pass",
"def run_script(self):\n pass",
"def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])",
"def run(self, parsed):",
"def run(args):\n pub_command = []\n sub_command = []\n\n script_dir = os.path.dirname(os.path.realpath(__file__))\n\n if not os.path.isfile(args.pub):\n print(f'Publisher executable file does not exists: {args.pub}')\n sys.exit(1)\n\n if not os.access(args.pub, os.X_OK):\n print(\n 'Publisher executable does not have execution permissions:'\n f'{args.pub}')\n\n pub_command.append(args.pub)\n\n if not os.path.isfile(args.sub):\n print(f'Subscriber executable file does not exists: {args.sub}')\n sys.exit(1)\n\n if not os.access(args.sub, os.X_OK):\n print(\n 'Subscriber executable does not have execution permissions:'\n f'{args.sub}')\n sys.exit(1)\n\n sub_command.append(args.sub)\n\n if args.xml_pub and args.xml_sub:\n if args.xml_pub:\n xml_file_pub = os.path.join(script_dir, args.xml_pub)\n if args.xml_sub:\n xml_file_sub = os.path.join(script_dir, args.xml_sub)\n else:\n print('Not provided xml configuration files.')\n sys.exit(1)\n\n pub_command.extend(['--xmlfile', xml_file_pub])\n sub_command.extend(['--xmlfile', xml_file_sub])\n\n pub_command.extend(['--seed', str(os.getpid())])\n sub_command.extend(['--seed', str(os.getpid())])\n\n if args.wait:\n pub_command.extend(['--wait', str(args.wait)])\n\n if args.samples:\n pub_command.extend(['--samples', str(args.samples)])\n sub_command.extend(['--samples', str(args.samples)])\n\n if len(args.servers) != len(args.xml_servers):\n print(\n 'Number of servers arguments should be equal to the number of xmls provided.')\n sys.exit(1)\n\n ds_procs = []\n for i in range(0, len(args.servers)):\n server_cmd = []\n\n if not os.path.isfile(args.servers[i]):\n print(f'Discovery server executable file does not exists: {args.servers[i]}')\n sys.exit(1)\n\n if not os.access(args.servers[i], os.X_OK):\n print(\n 'Discovery server executable does not have execution permissions:'\n f'{args.servers[i]}')\n sys.exit(1)\n\n server_cmd.append(args.servers[i])\n server_cmd.extend(['--xml-file', args.xml_servers[i]])\n server_cmd.extend(['--server-id', str(i)])\n\n ds_proc = subprocess.Popen(server_cmd)\n print(\n 'Running Discovery Server - commmand: ',\n ' '.join(map(str, server_cmd)))\n\n ds_procs.append(ds_proc)\n\n sub_proc = subprocess.Popen(sub_command)\n print(\n f'Running Subscriber - commmand: ',\n ' '.join(map(str, sub_command)))\n\n pub_proc = subprocess.Popen(pub_command)\n print(\n 'Running Publisher - commmand: ',\n ' '.join(map(str, pub_command)))\n\n try:\n outs, errs = sub_proc.communicate(timeout=15)\n except subprocess.TimeoutExpired:\n print('Subscriber process timed out, terminating...')\n sub_proc.kill()\n pub_proc.kill()\n [ds_proc.kill() for ds_proc in ds_procs]\n try:\n sys.exit(os.EX_SOFTWARE)\n except AttributeError:\n sys.exit(1)\n\n\n pub_proc.kill()\n ds_proc.kill()\n [ds_proc.kill() for ds_proc in ds_procs]\n try:\n sys.exit(os.EX_OK)\n except AttributeError:\n sys.exit(0)",
"def run(_):\n pass",
"def main():\n\n merch_bot = MerchBot()\n merch_bot.run_bot()",
"def run(self):\n self.run()"
] | [
"0.6028765",
"0.59991264",
"0.5996253",
"0.5902331",
"0.5847812",
"0.58472365",
"0.58472365",
"0.58472365",
"0.58472365",
"0.58472365",
"0.58472365",
"0.58472365",
"0.58472365",
"0.58472365",
"0.58472365",
"0.58472365",
"0.58472365",
"0.58472365",
"0.58472365",
"0.58472365",
"0.5844716",
"0.5842434",
"0.5823516",
"0.5806499",
"0.57956946",
"0.57930464",
"0.5789155",
"0.5777277",
"0.5737962",
"0.56963444"
] | 0.6173642 | 0 |
Runs the GloVe module | def run_glove (arguments):
if arguments.download:
# Download the 6B GloVe dataset
result = input("ATTENTION! This will download approximately 1GB of data. Do you wish to continue (y/n)? ")
if result == "y" or result == "yes":
download_glove()
sys.exit(0) # Afterwards exit normally | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def main():\n obj = VplexStorageview()\n obj.perform_module_operation()",
"def main():\n\timport docopt\n\targs = docopt.docopt(main.__doc__)\n\tv = Ventilator(args[\"--port\"], int(args[\"--speed\"]))\n\tif args[\"--debug\"]:\n\t\tlogging.basicConfig(level=logging.DEBUG)\n\telse:\n\t\tlogging.basicConfig(level=logging.INFO)\n\n\tt = v.kernel(args[\"SOURCE\"], address=int(args[\"--address\"], 16),\n\t\truns=int(args[\"--runs\"]), repeats=int(args[\"--repeats\"]))\n\tasyncio.get_event_loop().run_until_complete(t)",
"def main():\n args = load_args()\n\n perturbation_file = args.perturbation_file\n vm_params = load_yaml(args.vm_params_location)\n processes = args.n_processes\n verbose = args.verbose\n\n if args.perturbation:\n if args.model:\n perturbation_model = pd.read_csv(args.model)\n generate_velocity_model_perturbation_file_from_model(\n vm_params, perturbation_model, perturbation_file, processes, verbose\n )\n elif args.parameter_file:\n common_params, layer_params = load_parameter_file(args.parameter_file)\n generate_velocity_model_perturbation_file_from_config(\n common_params, layer_params, perturbation_file, processes, verbose\n )\n else:\n create_constant_vm_file(\n perturbation_file, vm_params[\"nx\"] * vm_params[\"ny\"] * vm_params[\"nz\"]\n )\n\n if args.fault_damage_zone:\n apply_fault_damage_zone(\n srf_location=args.srf_location,\n vm_params=vm_params,\n pert_f_location=perturbation_file,\n depth_km=args.depth_km,\n max_depth_km=args.max_depth_km,\n width_km=args.width_km,\n max_width_km=args.max_width_km,\n min_damage_velocity=args.max_velocity_drop,\n n_processes=processes,\n )",
"def run():\n logger.info(f\"Process started:\")\n logger.info(f\"Converting Glove file to Word2Vec format\")\n convert_to_word2vec.convert(\n \"./data/source/glove.6B.50d.txt\", \"./data/source/glove.6B.50d.w2vformat.txt\"\n )\n\n logger.info(f\"Extracting Click Stream data\")\n extract_click_stream_data()\n\n logger.info(\"Extracting Wiki articles\")\n extract_wiki_articles()\n\n logger.info(f\"Generating Clickstream dataset\")\n generate_datasets()\n\n logger.info(\"Tokenizing articles\")\n WikiArticlesTokenizer().process()\n\n logger.info(\"Creating dataset with Wiki Articles\")\n create_wiki_articles_dataset()",
"def main():\n GRAPH = lambda_graph()\n GRAPH.save_graph(\"pylon\")\n meshName = \"pylon.mesh\"\n cmd = \"./population/linuxShow \"+meshName\n process = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stdin=subprocess.PIPE)\n process.communicate()\n print \"nodes:\", GRAPH.number_of_nodes()\n print \"edges\", GRAPH.number_of_edges()",
"def main():\n for dim in (50, 100, 200, 300):\n data_file = data.FileFinder().get_file('GLOVE_TXT_FILE').format(dim)\n output_file = data.FileFinder().get_file('GLOVE_WORD2VEC_FILE').format(dim)\n print(\"Converting {} to {}\".format(data_file, output_file))\n glove2word2vec(data_file, output_file)",
"def main():\n \n ## read parameters from command line\n parser = argparse.ArgumentParser() \n parser.add_argument(\"-d\", \"--density\", help=\"Density\")\n parser.add_argument(\"-k\", \"--kappa\", help=\"Bending rigidity\")\n parser.add_argument(\"-t\", \"--time\", help=\"Timestep\") \n parser.add_argument(\"-s\", \"--save\", help=\"Save options\", action=\"store_true\") \n args = parser.parse_args()\n\n ## load data \n database = \"/local/duman/SIMULATIONS/long_filaments\"\n datafolder = database + \"/density_\" + args.density + \"/kappa_\" + args.kappa\n infile = datafolder + \"/VORTEX/data.hdf5\"\n lx, ly, time, xedges, yedges, rho_hist, vx_hist, vy_hist, vorticity \\\n = load_data(infile, int(args.time))\n \n ## plot data\n savebase = \"~/RolfData/many_filaments_5\" \n if args.save:\n sfolder = savebase + \"/plots/VORTEX\" \n os.system('mkdir -p ' + sfolder)\n else:\n sfolder = \"~/Desktop\"\n plot_data(sfolder, lx, ly, time, xedges, yedges, rho_hist, vx_hist, vy_hist, vorticity, args.save)",
"def main():\r\n graphPerformance = False # Built in graphing ability, currently not functional, but mechanism is in place.\r\n trainData = \"2_1000_0_1600_0_0_CV_0_Train.txt\"\r\n testData = \"2_1000_0_1600_0_0_CV_0_Test.txt\"\r\n outProg = \"GH_GALE_ProgressTrack\"\r\n outPop = \"GH_GALE_PopulationOut\"\r\n bitLength = 1 # This implementation is not yet set up to handle other rule representations, or bit encoding lengths.\r\n CVpartitions = 10\r\n trackCycles = 1\r\n \r\n iterInput = '5.10.20' \r\n xdim = 10\r\n ydim = 10\r\n dist = 2\r\n wild = 0.75\r\n prune = 1\r\n \r\n #Figure out the iteration stops for evaluation, and the max iterations.\r\n iterList = iterInput.split('.')\r\n for i in range(len(iterList)):\r\n iterList[i] = int(iterList[i])\r\n lastIter = iterList[len(iterList)-1] \r\n\r\n #Sets up up algorithm to be run.\r\n GALEConstants.setConstants(prune, wild)\r\n e = GALE_Environment(trainData,testData,bitLength)\r\n sampleSize = e.getNrSamples()\r\n gale = GALE(e, outProg, outPop, bitLength, CVpartitions, graphPerformance, xdim, ydim, dist)\r\n \r\n #Set some GALE parameters.\r\n if trackCycles == 'Default':\r\n gale.setTrackingIterations(sampleSize)\r\n else:\r\n gale.setTrackingIterations(trackCycles) \r\n gale.setNumberOfTrials(lastIter, iterList) \r\n \r\n #Run the GALE Algorithm \r\n gale.runGALE()",
"def main():\n parser = argparse.ArgumentParser(description='MergeGVCFs and genotype them using the GATK')\n parser.add_argument('-g', '--gatk', dest='gatk', help=\"Location of the GATK\", required=True)\n parser.add_argument('-x', '--xmx', dest='xmx', help=\"Memory to use with JAVA\", required=True)\n parser.add_argument('-c', '--cores', dest='cores', help=\"Number of cores to use\")\n parser.add_argument('-o', '--output', dest='output', \n help='Final output from the haplotype caller')\n parser.add_argument('-r', '--reference', dest='reference', \n help='Reference FASTA file')\n parser.add_argument('-b','--bed', dest='bed_file',\n help=\"Bed file for limiting the GATK\")\n parser.add_argument('-p', '--ploidy', dest='ploidy', \n help=\"Sample ploidy\", default=2)\n parser.add_argument('-d', '--out_directory', dest='directory', help='Output director')\n parser.add_argument('bams', nargs=\"*\", help='gVCF variant call files output from the GATK')\n args = parser.parse_args()\n args.cores = int(args.cores)\n args.xmx = args.xmx.strip('\"')\n print args.bams\n genovcfs = haplotype_caller(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n bams=args.bams, reference=args.reference,\n out_directory=args.directory, ploidy=args.ploidy, bed_file=args.bed_file)\n outputs = merge_gvcfs(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n gvcfs=genovcfs, reference=args.reference)\n genotype_gvcfs(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n inputs=outputs, output=args.output, reference=args.reference,bed_file=args.bed_file)\n #haplotype_single(gatk=args.gatk, xmx=args.xmx, cores=args.cores,\n # inputs=args.gvcfs, reference=args.reference)",
"def main(argv):\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser = parse.parse_agglo_from_labelmask(parser)\n parser = parse.parse_common(parser)\n args = parser.parse_args()\n\n agglo_from_labelmask(\n args.inpufile,\n args.labelvolume,\n args.ratio_threshold,\n args.outputfile,\n args.save_steps,\n args.protective,\n )",
"def runGA(dressCode, color, budget, poplength, generations, boost, error, show, best):\n\n print(\"[-] Running genetic algorithm...\", end=\"\\n\\n\")\n ga = GeneticAlgorithm( \n popSize=poplength, \n eliteSize=2,\n crossoverRate=0.9, \n mutationRate=0.2, \n generations=generations, \n dressCode=dressCode, \n color=color, \n budget=budget,\n boost=boost,\n error=error,\n show=show,\n )\n # start the genetic algorithm \n ga.start()\n if (best != -1):\n ga.showBestOutfit(best)\n ga.plotPerformance()",
"def main():\r\n \r\n world = WorldModel()\r\n #uncomment these lines and comment out the next 2 if you want to use the\r\n #full Baxter model\r\n #print \"Loading full Baxter model (be patient, this will take a minute)...\"\r\n #world.loadElement(os.path.join(model_dir,\"baxter.rob\"))\r\n print \"Loading simplified Baxter model...\"\r\n world.loadElement(os.path.join(model_dir,\"baxter_col.rob\"))\r\n print \"Loading Kiva pod model...\"\r\n world.loadElement(os.path.join(model_dir,\"kiva_pod/model.obj\"))\r\n print \"Loading plane model...\"\r\n world.loadElement(os.path.join(model_dir,\"plane.env\"))\r\n \r\n #shift the Baxter up a bit (95cm)\r\n Rbase,tbase = world.robot(0).getLink(0).getParentTransform()\r\n world.robot(0).getLink(0).setParentTransform(Rbase,(0,0,0.95))\r\n \r\n #translate pod to be in front of the robot, and rotate the pod by 90 degrees \r\n Trel = (so3.rotation((0,0,1),math.pi/2),[1.1,0,0])\r\n T = world.rigidObject(0).getTransform()\r\n world.rigidObject(0).setTransform(*se3.mul(Trel,T))\r\n \r\n #run the visualizer\r\n visualizer = MyGLViewer(world)\r\n visualizer.run()",
"def main():\n # Model setup\n source = np.array([1500, 8, 10, 5]) # assume source concentration and 3D coordinates\n u, pg_stability = 2, 'F' # setup environment\n sample_path = r\"data/ObservedData.csv\"\n # Build model object\n func = GaussianPlumeEAAI(lower=(10, -500, -500, 0), upper=(5000, 500, 500, 10), u=u,\n pg_stability=pg_stability, sample_path=sample_path)\n # Generate sample observed data\n func.generate_observed_data(source[0], source[1], source[2], source[3])\n\n # Reverse search source use observed data and PSO (assume unknown the source)\n pso_search_with_recommended_param(func)\n pso_search_with_optimized_param(func)",
"def main(argv):\n\n parser = argparse.ArgumentParser(\n description=__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter\n )\n parser = parse.parse_agglo_from_labelsets(parser)\n parser = parse.parse_common(parser)\n args = parser.parse_args()\n\n agglo_from_labelsets(\n args.inpufile,\n args.labelset_files,\n args.fwmap,\n args.outputfile,\n args.save_steps,\n args.protective,\n )",
"def run():\n names=[i.__name__ for i in modList]\n res,action=kcs_ui.string_select('fake vitesse generator',\n 'Please select the module you want to generate fake vitesse py.',\n 'Press option to generate for all modules.',\n names)\n if res==kcs_util.ok():\n mod=modList[action-1]\n des=kcs_ui.string_req('Where do you want to place the file?',r'C:\\temp')\n if des[0]==kcs_util.ok():\n# des = os.path.join(os.path.join(os.getcwd(), \"FakeVitesse\"))\n fname = des[1] + \"\\\\\" + mod.__name__ + \".py\"\n GenPy(mod, fname)\n elif res==kcs_util.options():\n des=kcs_ui.string_req('Where do you want to place the file?',r'C:\\temp')\n if des[0]==kcs_util.ok():\n for mod in modList:\n fname = des[1] + \"\\\\\" + mod.__name__ + \".py\"\n GenPy(mod, fname)",
"def main():\n greetings()\n run_jarvis()",
"def main():\n log(\"NGG CLI\", color=\"green\", figlet=True)\n log(\"Welcome to NGG CLI!\", \"yellow\")",
"def run_gvanno(arg_dict, host_directories):\n ## set basic Docker run commands\n output_vcf = 'None'\n output_pass_vcf = 'None'\n uid = ''\n docker_user_id = arg_dict['docker_user_id']\n\n global GENCODE_VERSION, VEP_ASSEMBLY\n if arg_dict['genome_assembly'] == 'grch37':\n GENCODE_VERSION = 'v19'\n VEP_ASSEMBLY = 'GRCh37'\n\n logger = getlogger('gvanno-get-OS')\n if docker_user_id:\n uid = docker_user_id\n elif platform.system() == 'Linux' or platform.system() == 'Darwin' or sys.platform == 'darwin' or sys.platform == 'linux2' or sys.platform == 'linux':\n uid = os.getuid()\n else:\n if platform.system() == 'Windows' or sys.platform == 'win32' or sys.platform == 'cygwin':\n uid = getpass.getuser()\n \n if uid == '':\n logger.warning('Was not able to get user id/username for logged-in user on the underlying platform (platform.system(): ' + str(platform.system()) + ', sys.platform: ' + str(sys.platform) + '), now running gvanno as root')\n uid = 'root'\n \n vepdb_dir_host = os.path.join(str(host_directories['db_dir_host']),'.vep')\n vcf_validation = 1\n if arg_dict['no_vcf_validate']:\n vcf_validation = 0\n data_dir = '/data'\n output_dir = '/workdir/output'\n vep_dir = '/usr/local/share/vep/data'\n input_vcf_docker = 'None'\n \n if host_directories['input_vcf_basename_host'] != 'NA':\n input_vcf_docker = '/workdir/input_vcf/' + str(host_directories['input_vcf_basename_host'])\n \n vep_volume_mapping = str(vepdb_dir_host) + \":/usr/local/share/vep/data\"\n databundle_volume_mapping = str(host_directories['base_dir_host']) + \":/data\"\n input_vcf_volume_mapping = str(host_directories['input_vcf_dir_host']) + \":/workdir/input_vcf\"\n output_volume_mapping = str(host_directories['output_dir_host']) + \":/workdir/output\"\n\n if arg_dict['container'] == 'docker':\n container_command_run1 = \"docker run --rm -t -u \" + str(uid) + \" -v=\" + str(databundle_volume_mapping) + \" -v=\" + str(vep_volume_mapping) + \" -v=\" + str(output_volume_mapping)\n elif arg_dict['container'] == 'singularity':\n container_command_run1 = \"singularity exec \" + \" -B \" + str(databundle_volume_mapping) + \" -B \" + str(vep_volume_mapping) + \" -B \" + str(output_volume_mapping)\n\n if host_directories['input_vcf_dir_host'] != 'NA' and arg_dict['container'] == 'docker':\n container_command_run1 = container_command_run1 + \" -v=\" + str(input_vcf_volume_mapping)\n elif host_directories['input_vcf_dir_host'] != 'NA' and arg_dict['container'] == 'singularity':\n container_command_run1 = container_command_run1 + \" -B \" + str(input_vcf_volume_mapping)\n\n if arg_dict['container'] == 'docker':\n container_command_run1 = container_command_run1 + \" -w=/workdir/output \" + str(DOCKER_IMAGE_VERSION) + \" sh -c \\\"\"\n elif arg_dict['container'] == 'singularity':\n container_command_run1 = container_command_run1 + \" -W /workdir/output \" + 'src/gvanno.sif' + \" sh -c \\\"\"\n\n if arg_dict['container'] == 'docker':\n container_command_run2 = \"docker run --rm -t -u \" + str(uid) + \" -v=\" + str(databundle_volume_mapping) + \" -v=\" + str(output_volume_mapping)\n container_command_run2 = container_command_run2 + \" -w=/workdir/output \" + str(DOCKER_IMAGE_VERSION) + \" sh -c \\\"\"\n docker_command_run_end = '\\\"'\n elif arg_dict['container'] == 'singularity':\n container_command_run2 = \"singularity exec \" + \" -B \" + str(databundle_volume_mapping) + \" -B \" + str(output_volume_mapping)\n container_command_run2 = container_command_run2 + \" -W /workdir/output \" + 'src/gvanno.sif' + \" sh -c \\\"\"\n docker_command_run_end = '\\\"'\n\n if arg_dict['debug']:\n logger.info(container_command_run1)\n logger.info(container_command_run2)\n\n ## GVANNO|start - Log key information about sample, options and assembly\n logger = getlogger(\"gvanno-start\")\n logger.info(\"--- Generic variant annotation (gvanno) workflow ----\")\n logger.info(\"Sample name: \" + str(arg_dict['sample_id']))\n logger.info(\"Genome assembly: \" + str(arg_dict['genome_assembly']))\n print()\n\n ## GVANNO|validate - verify input file (contents/format)\n logger = getlogger('gvanno-validate-input')\n logger.info(\"STEP 0: Validate input data\")\n vcf_validate_command = str(container_command_run1) + \"gvanno_validate_input.py \" + str(data_dir) + \" \" + str(input_vcf_docker) + \" \" + \\\n str(vcf_validation) + \" \" + str(arg_dict['genome_assembly']) + docker_command_run_end\n if arg_dict['debug']:\n logger.info(vcf_validate_command)\n\n check_subprocess(vcf_validate_command)\n logger.info('Finished')\n \n if not input_vcf_docker == 'None':\n \n ## Define input, output and temporary file names\n output_vcf = os.path.join(output_dir, str(arg_dict['sample_id']) + '_gvanno_' + str(arg_dict['genome_assembly']) + '.vcf.gz')\n output_tsv = os.path.join(output_dir, str(arg_dict['sample_id']) + '_gvanno_' + str(arg_dict['genome_assembly']) + '.tsv')\n output_pass_vcf = os.path.join(output_dir, str(arg_dict['sample_id']) + '_gvanno_pass_' + str(arg_dict['genome_assembly']) + '.vcf.gz')\n output_pass_tsv = os.path.join(output_dir, str(arg_dict['sample_id']) + '_gvanno_pass_' + str(arg_dict['genome_assembly']) + '.tsv')\n input_vcf_gvanno_ready = os.path.join(output_dir, re.sub(r'(\\.vcf$|\\.vcf\\.gz$)','.gvanno_ready.vcf.gz',host_directories['input_vcf_basename_host']))\n vep_vcf = re.sub(r'(\\.vcf$|\\.vcf\\.gz$)','.vep.vcf',input_vcf_gvanno_ready)\n vep_vcfanno_vcf = re.sub(r'(\\.vcf$|\\.vcf\\.gz$)','.vep.vcfanno.vcf',input_vcf_gvanno_ready)\n vep_vcfanno_annotated_vcf = re.sub(r'\\.vcfanno','.vcfanno.annotated',vep_vcfanno_vcf) + '.gz'\n vep_vcfanno_annotated_pass_vcf = re.sub(r'\\.vcfanno','.vcfanno.annotated.pass',vep_vcfanno_vcf) + '.gz'\n \n ## Path for human genome assembly and human ancestor (FASTA)\n fasta_assembly = os.path.join(vep_dir, \"homo_sapiens\", str(VEP_VERSION) + \"_\" + str(VEP_ASSEMBLY), \"Homo_sapiens.\" + str(VEP_ASSEMBLY) + \".dna.primary_assembly.fa.gz\")\n ancestor_assembly = os.path.join(vep_dir, \"homo_sapiens\", str(VEP_VERSION) + \"_\" + str(VEP_ASSEMBLY), \"human_ancestor.fa.gz\")\n\n ## List all VEP flags used when calling VEP\n loftee_dir = '/opt/vep/src/ensembl-vep/modules'\n plugins_in_use = \"NearestExonJB\"\n vep_flags = \"--hgvs --dont_skip --failed 1 --af --af_1kg --af_gnomade --af_gnomadg --variant_class --domains --symbol --protein --ccds \" + \\\n \"--uniprot --appris --biotype --canonical --format vcf --mane --cache --numbers --total_length --allele_number --no_escape \" + \\\n \"--xref_refseq --plugin NearestExonJB,max_range=50000\"\n vep_options = \"--vcf --quiet --check_ref --flag_pick_allele_gene --pick_order \" + str(arg_dict['vep_pick_order']) + \\\n \" --force_overwrite --species homo_sapiens --assembly \" + str(VEP_ASSEMBLY) + \" --offline --fork \" + \\\n str(arg_dict['vep_n_forks']) + \" \" + str(vep_flags) + \" --dir /usr/local/share/vep/data\"\n \n gencode_set_in_use = \"GENCODE - all transcripts\"\n if arg_dict['vep_gencode_all'] == 0:\n vep_options = vep_options + \" --gencode_basic\"\n gencode_set_in_use = \"GENCODE - basic transcript set (--gencode_basic)\"\n if arg_dict['vep_skip_intergenic'] == 1:\n vep_options = vep_options + \" --no_intergenic\"\n if arg_dict['vep_coding_only'] == 1:\n vep_options = vep_options + \" --coding_only\"\n if arg_dict['vep_regulatory'] == 1:\n vep_options = vep_options + \" --regulatory\"\n if arg_dict['vep_lof_prediction'] == 1:\n plugins_in_use = plugins_in_use + \", LoF\"\n vep_options += \" --plugin LoF,loftee_path:\" + loftee_dir + \",human_ancestor_fa:\" + str(ancestor_assembly) + \",use_gerp_end_trunc:0 --dir_plugins \" + loftee_dir\n\n ## Compose full VEP command\n vep_main_command = str(container_command_run1) + \"vep --input_file \" + str(input_vcf_gvanno_ready) + \" --output_file \" + str(vep_vcf) + \\\n \" \" + str(vep_options) + \" --buffer_size \" + str(arg_dict['vep_buffer_size']) + \" --fasta \" + str(fasta_assembly) + docker_command_run_end\n vep_bgzip_command = container_command_run1 + \"bgzip -f -c \" + str(vep_vcf) + \" > \" + str(vep_vcf) + \".gz\" + docker_command_run_end\n vep_tabix_command = str(container_command_run1) + \"tabix -f -p vcf \" + str(vep_vcf) + \".gz\" + docker_command_run_end\n\n ## GVANNO|VEP - run consequence annotation with Variant Effect Predictor\n logger = getlogger('gvanno-vep') \n print()\n logger.info(\"STEP 1: Basic variant annotation with Variant Effect Predictor (v\" + str(VEP_VERSION) + \", GENCODE \" + str(GENCODE_VERSION) + \", \" + str(arg_dict['genome_assembly']) + \")\")\n logger.info(\"VEP configuration - one primary consequence block pr. alternative allele (--flag_pick_allele)\")\n logger.info(\"VEP configuration - transcript pick order: \" + str(arg_dict['vep_pick_order']))\n logger.info(\"VEP configuration - transcript pick order: See more at https://www.ensembl.org/info/docs/tools/vep/script/vep_other.html#pick_options\")\n logger.info(\"VEP configuration - GENCODE set: \" + str(gencode_set_in_use))\n logger.info(\"VEP configuration - buffer size: \" + str(arg_dict['vep_buffer_size']))\n logger.info(\"VEP configuration - skip intergenic: \" + str(arg_dict['vep_skip_intergenic']))\n logger.info(\"VEP configuration - coding only: \" + str(arg_dict['vep_coding_only']))\n logger.info(\"VEP configuration - look for overlap with regulatory regions: \" + str(arg_dict['vep_regulatory']))\n logger.info(\"VEP configuration - number of forks: \" + str(arg_dict['vep_n_forks']))\n logger.info(\"VEP configuration - loss-of-function prediction: \" + str(arg_dict['vep_lof_prediction']))\n logger.info(\"VEP configuration - plugins in use: \" + str(plugins_in_use))\n\n if arg_dict['debug']:\n logger.info(vep_main_command)\n check_subprocess(vep_main_command)\n check_subprocess(vep_bgzip_command)\n check_subprocess(vep_tabix_command)\n logger.info(\"Finished\")\n\n ## GVANNO|vcfanno - annotate VCF against a number of variant annotation resources\n print()\n logger = getlogger('gvanno-vcfanno')\n logger.info(\"STEP 2: Clinical/functional variant annotations with gvanno-vcfanno (Clinvar, ncER, dbNSFP, GWAS catalog)\")\n logger.info('vcfanno configuration - number of processes (-p): ' + str(arg_dict['vcfanno_n_processes']))\n gvanno_vcfanno_command = str(container_command_run2) + \"gvanno_vcfanno.py --num_processes \" + str(arg_dict['vcfanno_n_processes']) + \\\n \" --dbnsfp --clinvar --ncer --gvanno_xref --gwas \" + str(vep_vcf) + \".gz \" + str(vep_vcfanno_vcf) + \\\n \" \" + os.path.join(data_dir, \"data\", str(arg_dict['genome_assembly'])) + docker_command_run_end\n \n if arg_dict['debug']:\n logger.info(gvanno_vcfanno_command)\n check_subprocess(gvanno_vcfanno_command)\n logger.info(\"Finished\")\n\n ## GVANNO|summarise - expand annotations in VEP and vcfanno-annotated VCF file\n print()\n logger = getlogger(\"gvanno-summarise\")\n logger.info(\"STEP 3: Summarise gene and variant annotations with gvanno-summarise\")\n logger.info(\"Configuration - oncogenicity classification: \" + str(int(arg_dict['oncogenicity_annotation'])))\n gvanno_summarise_command = str(container_command_run2) + \"gvanno_summarise.py \" + str(vep_vcfanno_vcf) + \".gz \" + \\\n os.path.join(data_dir, \"data\", str(arg_dict['genome_assembly'])) + \" \" + str(int(arg_dict['vep_lof_prediction'])) + \\\n \" \" + str(int(arg_dict['oncogenicity_annotation'])) + \" \" + str(int(arg_dict['vep_regulatory'])) + \" \" + \\\n str(int(arg_dict['debug'])) + docker_command_run_end\n \n if arg_dict['debug']:\n logger.info(gvanno_summarise_command)\n check_subprocess(gvanno_summarise_command)\n logger.info(\"Finished\")\n \n ## GVANNO|clean - move output files and clean up temporary files\n create_output_vcf_command1 = str(container_command_run2) + 'mv ' + str(vep_vcfanno_annotated_vcf) + ' ' + str(output_vcf) + \"\\\"\"\n create_output_vcf_command2 = str(container_command_run2) + 'mv ' + str(vep_vcfanno_annotated_vcf) + '.tbi ' + str(output_vcf) + '.tbi' + \"\\\"\"\n create_output_vcf_command3 = str(container_command_run2) + 'mv ' + str(vep_vcfanno_annotated_pass_vcf) + ' ' + str(output_pass_vcf) + \"\\\"\"\n create_output_vcf_command4 = str(container_command_run2) + 'mv ' + str(vep_vcfanno_annotated_pass_vcf) + '.tbi ' + str(output_pass_vcf) + '.tbi' + \"\\\"\"\n clean_command = str(container_command_run2) + 'rm -f ' + str(vep_vcf) + '* ' + str(vep_vcfanno_annotated_vcf) + ' ' + \\\n str(vep_vcfanno_annotated_pass_vcf) + '* ' + str(vep_vcfanno_vcf) + '* ' + str(input_vcf_gvanno_ready) + \"* \" + docker_command_run_end\n check_subprocess(create_output_vcf_command1)\n check_subprocess(create_output_vcf_command2)\n check_subprocess(create_output_vcf_command3)\n check_subprocess(create_output_vcf_command4)\n if not arg_dict['debug']:\n check_subprocess(clean_command)\n \n print()\n ## GVANNO|vcf2tsv - convert VCF to TSV with https://github.com/sigven/vcf2tsv\n logger = getlogger(\"gvanno-vcf2tsv\")\n logger.info(\"STEP 4: Converting genomic VCF to TSV with https://github.com/sigven/vcf2tsvpy\")\n gvanno_vcf2tsv_command_pass = str(container_command_run2) + \"vcf2tsvpy --input_vcf \" + str(output_pass_vcf) + \" --compress --out_tsv \" + str(output_pass_tsv) + docker_command_run_end\n gvanno_vcf2tsv_command_all = str(container_command_run2) + \"vcf2tsvpy --input_vcf \" + str(output_vcf) + \" --compress --keep_rejected --out_tsv \" + str(output_tsv) + docker_command_run_end\n logger.info(\"Conversion of VCF variant data to records of tab-separated values - PASS variants only\")\n check_subprocess(gvanno_vcf2tsv_command_pass)\n logger.info(\"Conversion of VCF variant data to records of tab-separated values - PASS and non-PASS variants\")\n check_subprocess(gvanno_vcf2tsv_command_all)\n logger.info(\"Finished\")\n \n #return\n \n print",
"def main():\n opt = parse_opts()\n run(opt)",
"def main():\n opt = parse_opts()\n run(opt)",
"def main():\n utils.vip_main(AHUAgent, version=__version__)",
"def main( ):\n\n # Quasi constant\n FrequencyRange = np.logspace( 0, 5, 1000 )\n doc = curdoc()\n\n # ========================== GRAPHICAL PART ================================\n\n # CREATE ALL PLOTS:\n Input = figure( title = \"\",\n tools = \"\",\n width = 500,\n height = 500 )\n\n\n\n Graph = VibroP_GraphObject( [ \"Wave Velocities\",\n \"Wave Velocities plus Limit Frequencies\",\n \"Modes in Band\",\n \"Modal Density\",\n \"Modal Overlap Factor\",\n \"Maximum Element Size (FEM)\"],\n FrequencyRange,\n Width = 950,\n Height = 650)\n\n\n Graph.defineContainers([\"WaveVelocity\",\n \"WaveVElocityLimitFreq\",\n \"ModesInBand\",\n \"ModalDensity\",\n \"ModalOverlapFactor\",\n \"MaxElementSize\"\n \"EigenFrequency\"])\n\n\n\n # CREATE TABLES:\n # ........................ Elastic Modulus table ...........................\n ELASTIC_MODULUS_TITEL = Div( text = \"\"\"ELASTIC MODULUS:\"\"\" )\n ElasticModulus = VibroP_InteractiveTable( TableName = \"ELASTIC MODULUS\",\n Rows = 1,\n Columns = 3 )\n\n ElasticModulus.setTitels( [ [ EMODUL_X, EMODUL_Y, EMODUL_Z ] ] )\n\n OrthotropicData = [ [ \"1.10E+10\", \"3.67E+08\", \"3.67E+08\" ] ]\n IsotropicData = [ [ \"1.10E+10\", \"1.10E+10\", \"1.10E+10\" ] ]\n ElasticModulus.setValues( OrthotropicData )\n ElasticModulus.addBuffer( BufferName = \"DefaultIsotropic\",\n BufferData = IsotropicData )\n\n ElasticModulus.addBuffer( BufferName = \"DefaultOrthotropic\",\n BufferData = OrthotropicData )\n\n ElasticModulus.addBuffer( BufferName = \"GeneralIsotropic\",\n BufferData = IsotropicData )\n\n ElasticModulus.addBuffer( BufferName = \"GeneralOrthotropic\",\n BufferData = OrthotropicData )\n\n ElasticModulus.addBuffer( BufferName = \"Input\",\n BufferData = OrthotropicData )\n\n\n # ........................ Shear Modulus table .............................\n SHEAR_MODULUS_TITEL = Div( text = \"\"\"SHEAR MODULUS:\"\"\" )\n ShearModulus = VibroP_InteractiveTable( TableName = \"SHEAR MODULUS\",\n Rows = 1,\n Columns = 3 )\n\n ShearModulus.setTitels( [ [ EMODUL_XY, EMODUL_XZ, EMODUL_YZ ] ] )\n OrthotropicData = [ [ \"6.90E+08\", \"6.90E+08\", \"6.90E+07\" ] ]\n IsotropicData = [ [ \"6.90E+08\", \"6.90E+08\", \"6.90E+08\" ] ]\n\n ShearModulus.setValues( OrthotropicData )\n\n ShearModulus.addBuffer( BufferName = \"DefaultIsotropic\",\n BufferData = IsotropicData )\n\n ShearModulus.addBuffer( BufferName = \"DefaultOrthotropic\",\n BufferData = OrthotropicData )\n\n ShearModulus.addBuffer( BufferName = \"GeneralIsotropic\",\n BufferData = IsotropicData )\n\n\n ShearModulus.addBuffer( BufferName = \"GeneralOrthotropic\",\n BufferData = OrthotropicData )\n\n ShearModulus.addBuffer( BufferName = \"Input\",\n BufferData = OrthotropicData )\n\n\n # ........................ Poissons ratios ................................\n POISSON_RATIO_TITEL = Div( text = \"\"\"POISSON'S RATIOS:\"\"\" )\n PoissonRatios = VibroP_InteractiveTable( TableName = \"POISSON'S RATIOS\",\n Rows = 2,\n Columns = 3 )\n\n PoissonRatios.setTitels( [ [ POISSON_RATIO_XY,\n POISSON_RATIO_XZ,\n POISSON_RATIO_YZ ],\n [ POISSON_RATIO_YX + \"\\t(auto)\",\n POISSON_RATIO_ZX + \"\\t(auto)\",\n POISSON_RATIO_ZY + \"\\t(auto)\" ] ] )\n\n PoissonRatios.setDisabled(1, 0, True)\n PoissonRatios.setDisabled(1, 1, True)\n PoissonRatios.setDisabled(1, 2, True)\n\n DataIsotropic = [ [ \"0.42\", \"0.42\", \"0.42\" ],\n [ \"0.42\", \"0.42\", \"0.42\" ] ]\n\n DataOrthotropic = [ [ \"0.42\", \"0.42\", \"0.3\" ],\n [ \"0.014\", \"0.014\", \"0.3\" ] ]\n\n PoissonRatios.setValues( DataOrthotropic )\n\n\n PoissonRatios.addBuffer( BufferName = \"DefaultIsotropic\",\n BufferData = DataIsotropic )\n\n PoissonRatios.addBuffer( BufferName = \"DefaultOrthotropic\",\n BufferData = DataOrthotropic )\n\n PoissonRatios.addBuffer( BufferName = \"GeneralIsotropic\",\n BufferData = DataIsotropic )\n\n PoissonRatios.addBuffer( BufferName = \"GeneralOrthotropic\",\n BufferData = DataOrthotropic )\n\n PoissonRatios.addBuffer( BufferName = \"Input\",\n BufferData = DataOrthotropic )\n\n\n # ........................ Material Properties table .......................\n MATERIALS_TITEL = Div( text = \"\"\"FURTHER MATERIAL PROPERTIES:\"\"\" )\n MaterialProperties = VibroP_InteractiveTable( TableName = \"MATERIAL PROPERTIES\",\n Rows = 1,\n Columns = 2 )\n\n MaterialProperties.setTitels( [ [ \"Density\", \"Loss Factor\" ] ] )\n\n Data = [ [ \"450.0\", \"0.012\" ] ]\n MaterialProperties.setValues( Data )\n\n MaterialProperties.setValues( Data )\n\n MaterialProperties.addBuffer( BufferName = \"DefaultIsotropic\",\n BufferData = Data )\n\n MaterialProperties.addBuffer( BufferName = \"DefaultOrthotropic\",\n BufferData = Data )\n\n MaterialProperties.addBuffer( BufferName = \"General\",\n BufferData = Data )\n\n MaterialProperties.addBuffer( BufferName = \"Input\",\n BufferData = Data )\n\n\n # ........................ Geometry table .......................\n GEOMETRY_TITEL = Div( text = \"\"\"GEOMETRY:\"\"\" )\n GeometryProperties = VibroP_InteractiveTable( TableName = \"GEOMETRY\",\n Rows = 1,\n Columns = 3 )\n\n GeometryProperties.setTitels( [ [ \"Length\", \"Width\", \"Thicknesses of the layers*\" ] ] )\n\n Data = [ [ \"2.5\", \"3.0\", \"0.027\" ] ]\n GeometryProperties.setValues( Data )\n\n GeometryProperties.setValues( Data )\n\n GeometryProperties.addBuffer( BufferName = \"DefaultIsotropic\",\n BufferData = Data )\n\n GeometryProperties.addBuffer( BufferName = \"DefaultOrthotropic\",\n BufferData = Data )\n\n GeometryProperties.addBuffer( BufferName = \"General\",\n BufferData = Data )\n\n GeometryProperties.addBuffer( BufferName = \"Input\",\n BufferData = Data )\n\n\n\n ElasticModulus.fillTableWithBufferData( \"DefaultOrthotropic\" )\n ShearModulus.fillTableWithBufferData( \"DefaultOrthotropic\" )\n PoissonRatios.fillTableWithBufferData( \"DefaultOrthotropic\" )\n MaterialProperties.fillTableWithBufferData( \"DefaultOrthotropic\" )\n GeometryProperties.fillTableWithBufferData( \"DefaultOrthotropic\" )\n\n\n Tables = { \"ElasticModulus\" : ElasticModulus,\n \"ShearModulus\" : ShearModulus,\n \"PoissonRatios\" : PoissonRatios,\n \"MaterialProperties\" : MaterialProperties,\n \"GeometryProperties\" : GeometryProperties }\n\n\n # CREATE BUTTONS:\n SetDefaultButton = Button( label = \"Default\",\n button_type = \"success\",\n width = 100 )\n\n\n ApplyButton = Button( label = \"Apply\",\n button_type = \"success\",\n width = 100 )\n\n\n # PrintReport = Button( label = \"Print Report\",\n # button_type = \"primary\",\n # width = 100 )\n\n\n ShowInput = Button( label = \"Show Input\",\n button_type = \"success\",\n width = 100 )\n\n\n ModeRadioButtons = RadioButtonGroup( labels = [ \"Orthotropic Material\",\n \"Isotropic Material\" ],\n width = 500,\n active = 0 )\n\n\n \n LayersInfo = VibroP_Message( Color = \"black\",\n Size = 2,\n MessageHeader = \"Number of layers: \" )\n\n WarningMessage = VibroP_Message( Color = \"red\",\n Size = 3 ,\n MessageHeader = \"Warning: \" )\n\n\n Info = Div( text = \"*Thicknesses of top to center layer separated by \"\n \"semicolon or space: <br>\"\n\t\t\t\t\t \" Symmetric cross section with odd number of layers\"\n \" and crosswise layup assumed.\",\n render_as_text = False,\n width = 500,\n height = 40 )\n\n\n Scheme = Div( text = \"<img src='/Vibroacoustic_plates/static/images/scheme.png' width=464 height=220>\",\n width = 464,\n height = 220 )\n\n Description = Div( text = \"The application \\\"Vibroacoustics of Plates\\\" can be classified in two steps: <br><br>\"\n \"<b>1.</b> Insert the physical properties of a homogenous plate or of a single layer\"\n \" in the case of a layered plate (default values are given) on the left and press <i>'Apply'</i>. <br><br>\"\n \"<b>Notice</b> that in the case of a layered plate, a symmetric cross section\"\n \" with an odd number of layers and a crosswise layup is assumed (cf. scheme).\"\n \" Therefore, the thicknesses of the top to the center layer have to be inserted.\"\n \" The material properties are homogenized through the thickness.\"\n \" Thus, the input data of the single layer\"\n \" is overwritten by homogenized material parameters of the plate after pressing <i>'Apply'</i>.\"\n \" The input data of the single layers can be checked by pressing the\"\n \" button <i>'Show Input'</i>. <br><br>\"\n \"<b>2.</b> On the right, dynamic properties of the plate and of\"\n \" its wave types are plotted. These can be studied\"\n \" using e.g. the zoom function and saved as .png.<br><br>\"\n \" Please refer to the following publication for further explanations and references:<br><br>\"\n \" Winter, C.: Frequency Dependent Modeling for the Prediction of the Sound Transmission in Timber Constructions. (2018).\"\n\n\t\t\t\t\t ,\n render_as_text = False,\n width = 1000,\n height = 50 )\n \n Title = Div ( text = \"<b><h1> Vibroacoustics of Plates</b><h1>\",\n render_as_text = False,\n width = 900,\n height = 80)\n\n # SPECIFY THE LAYOUT:\n Buttons = row( row( Spacer( width = 50),\n ApplyButton,\n Spacer( width = 50),\n ShowInput,\n Spacer( width = 50),\n SetDefaultButton ) )\n\n Headline = row( column( Title, Description ), Spacer( width = 50 ), Scheme )\n\t\n LeftSide = column( ModeRadioButtons,\n Spacer(height=20),\n ELASTIC_MODULUS_TITEL,\n ElasticModulus.Table,\n Spacer(height=20),\n SHEAR_MODULUS_TITEL,\n ShearModulus.Table,\n Spacer(height=20),\n POISSON_RATIO_TITEL,\n PoissonRatios.Table,\n Spacer(height=20),\n MATERIALS_TITEL,\n MaterialProperties.Table,\n Spacer(height=20),\n GEOMETRY_TITEL,\n GeometryProperties.Table,\n LayersInfo.Widget,\n Spacer(height=10),\n Info,\n Spacer( height = 20 ),\n WarningMessage.Widget )\n\n\n RightSide = column( Graph.Widget, Spacer( height = 50 ),\n Buttons,\n Spacer( height = 100 ) )\n\n\n # ========================= COMMUNICATION PART =============================\n\n\n # Set up callback function for the \"Apply\" button\n ApplyButton.on_click( partial( updateData,\n Tables,\n Graph,\n LayersInfo,\n WarningMessage ) )\n\n\n # Set up callback function for all radion buttons that are responsible\n # for changing the mode, namely: Isotropic and Orthotropic material properties\n ModeRadioButtons.on_click( partial( updateMode,\n Tables,\n WarningMessage,\n Graph ) )\n\n\n # Set up callback function for all radion buttons that are responsible\n # for plotting different graphs\n Graph.GraphRadioButtons.on_click( partial( updateGraph, Graph ) )\n\n\n # Set up callback function for all the \"Default\" button that are responsible\n # for assigning the default data to all entries\n SetDefaultButton.on_click( partial( setDefaultSettings,\n Tables,\n Graph,\n LayersInfo,\n WarningMessage ) )\n\n\n ShowInput.on_click( partial( showInput, Tables, LayersInfo ) )\n\n\n # ================= RUN SIMULATION WITH DEFAULT DATA =====================\n updateData( Tables, Graph, LayersInfo, WarningMessage )\n\n\n # RUN ALL WIDGETS\n doc.add_root(Headline)\n doc.add_root( column( Spacer( height = 150 ),\n row( LeftSide,\n Spacer( width = 50 ),\n RightSide,\n Spacer( width = 50 ) ) ) )",
"def main():\n utils.vip_main(ahu_agent, version=__version__)",
"def main():\n run_nutanix_vm_creation_module()",
"def main():\n parser = argparse.ArgumentParser()\n parser.add_argument('--viewer', '-v', type=str, default='interactivemarker',\n help='The viewer to attach (none for no viewer)')\n parser.add_argument('--monitor', action='store_true',\n help='Display a UI to monitor progress of the planner')\n parser.add_argument('--planner', type=str, choices=['dfs', 'restart'], default='restart',\n help='The planner to use')\n parser.add_argument('--robot', type=str, default='herb',\n help='Robot to run the task on')\n\n openravepy.RaveInitialize(True, level=openravepy.DebugLevel.Info)\n openravepy.misc.InitOpenRAVELogging()\n\n args = parser.parse_args()\n\n env, robot = herbpy.initialize()\n\n # Get the desired manipulator\n manipulator = robot.GetManipulator('right')\n\n if args.viewer != 'none':\n env.SetViewer(args.viewer)\n\n monitor = None\n # Create a monitor\n if args.monitor:\n monitor = magi.monitor.ActionMonitor()\n\n def signal_handler(signum, frame):\n \"\"\"Signal handler to gracefully kill the monitor.\"\"\"\n monitor.stop()\n sys.exit(0)\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n\n # Create a planner\n if args.planner == 'restart':\n planner = RestartPlanner(monitor=monitor)\n elif args.planner == 'dfs':\n planner = DepthFirstPlanner(monitor=monitor, use_frustration=True)\n\n if monitor is not None:\n monitor.reset()\n\n # Detect objects\n table, glass = detect_objects(robot)\n\n try:\n # Create the task.\n action = grasp_glass_action_graph(manipulator, glass, table)\n\n # Plan the task\n with env:\n solution = planner.plan_action(env, action)\n\n # Execute the task\n execute_pipeline(env, solution, simulate=True, monitor=monitor)\n\n except ActionError as err:\n LOGGER.info('Failed to complete planning for task: %s', str(err))\n raise\n\n except ExecutionError as err:\n LOGGER.info('Failed to execute task: %s', str(err))\n raise\n\n IPython.embed()\n\n if monitor:\n monitor.stop()",
"def main():\n args = parameter_parser()\n tab_printer(args)\n trainer = GPNTrainer(args)\n # trainer.fit()\n \"\"\"\n Scoring on the prediction and learning ability.\n \"\"\"\n trainer.score()\n \"\"\"\n Scoring on the subgraph test set.\n \"\"\"\n # trainer.score2()\n \"\"\"\n Scoring on the generalization ability.\n \"\"\"\n # trainer.score3()\n \"\"\"\n Finetuning for downstream tasks.\n \"\"\"\n # model = finetune_GPN(args, trainer.number_of_labels)\n # model.finetune()",
"def run_combpso():\n\n #set environment variables for feature selection examples\n print('Initialize model data ***** \\n')\n start = datetime.now()\n\n SW = []\n for i in range(config.epochs):\n print(f'Processing Run {i} ******************** \\n')\n sw = combpso()\n SW.append(sw)\n\n # Union of all gbests + Local search\n print('\\n Processing local search ****** \\n')\n gbest_u = np.zeros(config.particle_size).astype(np.int)\n for sw in SW:\n gbest_u = np.bitwise_or(gbest_u, sw._gbest_b)\n sw_u = Swarm()\n sw_u._gbest_b = gbest_u\n sw_u._gbest_nbf = gbest_u.sum()\n sw_u._local_search()\n SW.append(sw_u)\n print(sw_u._final_str())\n\n print('Time elapsed: {} '.format(datetime.now() - start))\n\n print('****** Dump results to pickle file ***** \\n')\n g = config.genes[sw_u._gbest_b==1]\n with open(join(config.folder, str(sys.argv[1]) + '.pickle'), 'wb') as p_wb:\n pickle.dump(g, p_wb)",
"def main():\n parser = argparse.ArgumentParser(description=main.__doc__)\n parser.add_argument('--maxsteps', type=int, default=100000)\n parser.add_argument('--seed', type=int, default=0)\n parser.add_argument('--savefile', type=str, required=True)\n nproc = max(cpu_count() - 1, 1)\n parser.add_argument('--maxprocs', type=int, default=nproc)\n args = parser.parse_args()\n\n seed = args.seed\n np.random.seed(seed)\n venv = gen_vectorized_pong_env(args.maxprocs)\n policy = create_random_policy(venv)\n\n num_timesteps = 0\n paths = []\n while num_timesteps < args.maxsteps:\n print('{: 10d} of {: 10d} steps'.format(\n num_timesteps, args.maxsteps))\n new_paths = vsample(venv, policy)\n paths += new_paths\n num_timesteps += sum(len(path.obs) for path in new_paths)\n\n dataset = Dataset.from_paths(venv, paths)\n print('Generated', len(dataset.obs), 'timesteps total')\n dataset.save(args.savefile)",
"def main():\n ##############################\n # Options\n ##############################\n\n bev_generator = 'slices'\n slices_config = \\\n \"\"\"\n slices {\n height_lo: -0.2\n height_hi: 2.3\n num_slices: 5\n }\n \"\"\"\n # Use None for a random image\n #img_idx = None\n img_idx = 6\n\n show_ground_truth = True # Whether to overlay ground_truth boxes\n show_height_maps = False # Whether to show the five height maps\n show_images = False # Whether to show the images\n\n point_cloud_source = 'lidar'\n pre_label_dir = '/home/cecilia/leo_projects/bishe2019/3D-Detection/avod/data/outputs/pyramid_cars_with_aug_rep_loss/predictions/kitti_native_eval/0.1/112000/data/'\n ##############################\n # End of Options\n ##############################\n\n dataset_config = DatasetBuilder.copy_config(DatasetBuilder.KITTI_VAL)\n dataset_config = DatasetBuilder.merge_defaults(dataset_config)\n\n # Overwrite bev_generator\n if bev_generator == 'slices':\n text_format.Merge(slices_config,\n dataset_config.kitti_utils_config.bev_generator)\n else:\n raise ValueError('Invalid bev_generator')\n\n dataset = DatasetBuilder.build_kitti_dataset(dataset_config,\n use_defaults=False)\n\n if img_idx is None:\n img_idx = int(random.random() * dataset.num_samples)\n\n sample_name = \"{:06}\".format(img_idx)\n print('=== Showing BEV maps for image: {}.png ==='.format(sample_name))\n\n # Load image\n image = cv2.imread(dataset.get_rgb_image_path(sample_name))\n image_shape = image.shape[0:2]\n\n kitti_utils = dataset.kitti_utils\n point_cloud = kitti_utils.get_point_cloud(\n point_cloud_source, img_idx, image_shape)\n ground_plane = kitti_utils.get_ground_plane(sample_name)\n bev_images = kitti_utils.create_bev_maps(point_cloud, ground_plane)\n\n height_maps = np.array(bev_images.get(\"height_maps\"))\n density_map = np.array(bev_images.get(\"density_map\"))\n\n # Get groundtruth bev-info\n gt_box_points, gt_box_points_norm = [None, None]\n if show_ground_truth:\n gt_obj_labels = obj_utils.read_labels(dataset.label_dir, img_idx)\n gt_filtered_objs = gt_obj_labels\n\n gt_label_boxes = []\n for gt_label in gt_filtered_objs:\n gt_box = box_3d_encoder.object_label_to_box_3d(gt_label)\n gt_label_boxes.append(gt_box)\n\n gt_label_boxes = np.array(gt_label_boxes)\n gt_box_points, gt_box_points_norm = box_3d_projector.project_to_bev(\n gt_label_boxes, [[-40, 40], [0, 70]])\n\n # Get prediction bev-info\n pre_box_points, pre_box_points_norm = [None, None]\n pre_obj_labels = obj_utils.read_labels(pre_label_dir, img_idx)\n\n pre_filtered_objs = pre_obj_labels\n\n pre_label_boxes = []\n for pre_label in pre_filtered_objs:\n pre_box = box_3d_encoder.object_label_to_box_3d(pre_label)\n pre_label_boxes.append(pre_box)\n\n pre_label_boxes = np.array(pre_label_boxes)\n pre_box_points, pre_box_points_norm = box_3d_projector.project_to_bev(\n pre_label_boxes, [[-40, 40], [0, 70]])\n\n \n rgb_img_size = (np.array((1242, 375)) * 0.75).astype(np.int16)\n img_x_start = 60\n img_y_start = 330\n\n img_x = img_x_start\n img_y = img_y_start\n img_w = 400\n img_h = 350\n img_titlebar_h = 20\n\n # Show images if show_images = True\n if show_images:\n vis_utils.cv2_show_image(\"Image\", image,\n size_wh=rgb_img_size, location_xy=(img_x, 0))\n\n # Height maps if show_height_maps = True\n if show_height_maps:\n\n for map_idx in range(len(height_maps)):\n height_map = height_maps[map_idx]\n\n height_map = draw_boxes(height_map, gt_box_points_norm, pre_box_points_norm)\n vis_utils.cv2_show_image(\n \"Height Map {}\".format(map_idx), height_map, size_wh=(\n img_w, img_h), location_xy=(\n img_x, img_y))\n\n img_x += img_w\n # Wrap around\n if (img_x + img_w) > 1920:\n img_x = img_x_start\n img_y += img_h + img_titlebar_h\n\n # Density map (Normal BEV)\n density_map = draw_boxes(density_map, gt_box_points_norm, pre_box_points_norm)\n vis_utils.cv2_show_image(\n \"Density Map\", density_map, size_wh=(\n img_w, img_h), location_xy=(\n img_x, img_y))\n\n cv2.waitKey()",
"def main(_):\n hps = LM.get_default_hparams().parse(FLAGS.hpconfig)\n hps._set(\"num_gpus\", FLAGS.num_gpus)\n print ('*****HYPER PARAMETERS*****')\n print (hps)\n print ('**************************')\n\n vocab = Vocabulary.from_file(os.path.join(FLAGS.datadir, \"vocabulary.txt\"))\n\n if FLAGS.mode == \"train\":\n #hps.batch_size = 256\n dataset = Dataset(vocab, os.path.join(FLAGS.datadir, \"train.txt\"))\n run_train(dataset, hps, os.path.join(FLAGS.logdir, \"train\"), ps_device=\"/gpu:0\")\n elif FLAGS.mode.startswith(\"eval\"):\n data_dir = os.path.join(FLAGS.datadir, \"eval.txt\")\n #predict_model = prediction.Model('/dir/ckpt',os.path.join(FLAGS.datadir, \"vocabulary.txt\"), hps)\n\n dataset = Dataset(vocab, data_dir, deterministic=True)\n prefix_words = \"<brk>\".split()\n predict_model = predict.Model(hps, FLAGS.logdir, FLAGS.datadir)\n print ('start input')\n out = predict_model.predictnextkwords(prefix_words, FLAGS.num_sen)\n for row in out:\n print(' '.join(row) + \"\\n\")\n print(\"len_out: \" + str(len(out)))\n #prediction.topkwords(prefix_words, dataset, hps, FLAGS.logdir, FLAGS.mode)\n #sentence_ppl(prefix_words,dataset, hps, FLAGS.logdir, FLAGS.mode)\n #print vocab\n #dataset = Dataset(vocab, os.path.join(FLAGS.datadir, \"eval.txt\"))\n #run_eval(dataset, hps, FLAGS.logdir, FLAGS.mode, FLAGS.eval_steps)"
] | [
"0.6183873",
"0.61158395",
"0.6082892",
"0.60333556",
"0.5949687",
"0.59383035",
"0.5913861",
"0.58946764",
"0.5856102",
"0.58391666",
"0.58317477",
"0.5813667",
"0.5773393",
"0.5761071",
"0.57519263",
"0.5735813",
"0.5703431",
"0.56931114",
"0.569222",
"0.569222",
"0.5691295",
"0.5681782",
"0.56709695",
"0.565366",
"0.5640452",
"0.5634897",
"0.56184554",
"0.56158644",
"0.5594093",
"0.5592652"
] | 0.6541627 | 0 |
Runs the Data module | def run_data (arguments):
if arguments.define_labels:
data.define_labels()
elif arguments.preprocess:
# Preprocess from data_raw --> data_preprocessed
data.preprocess()
elif arguments.annotate:
# Annotate from data_preprocessed --> data_annotated
reverse = False # DEBUG
annotator.annotate(reverse)
elif arguments.split:
# Split from data_annotated --> train.txt/valid.txt
restrict = 100 # Default: Keep 100% of all files
splitter.train_valid(restrict_to=restrict) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self):\n\t\tself.print_header_information()\n\n\t\t#self.get_number_of_instances_from_user()\n\n\t\t#self.compile_dataframe(self.number_of_instances)\n\n\t\tprint \"\\n{}\".format(self.data)\n\n\t\t# Uncomment these lines for debugging\n\t\tself.compile_dataframe_default()\n\t\t# print \"\\n{}\".format(self.data)\n\n\t\tself.analysis_of_dataframe(self.data)",
"def run(self):\r\n self.collect_data()",
"def Run():\r\n pass",
"def main():\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n get_user_config()",
"def dataStats(self):\n print (\"Performing statistical analysis of the data\")\n # stuff to do",
"def run(self):\n\n # How to retrieve your input data.\n input_1_data = self.in_data['input_1']\n\n # How to retrieve your params value.\n param_1 = self.param['param_1']\n\n # How to process data.\n # Just write any number of methods you want and use them here.\n sample_out_data = self.sample_method(input_1_data, param_1)\n\n # Go to the definition of this method to see how to log.\n self.demo_log()\n\n # This is how to set output data.\n self.out_data['output_1'] = sample_out_data",
"def run(self):\r\n if not self.coreInst: self._gen_core_instructions()\r\n _core.init(self.coreInst)\r\n ec = _core.run()\r\n self.Data = _core.HDSTRUCT.copy()\r\n _ = self.Data.pop('last')\r\n self.Data = namedDict(**self.Data)\r\n if ec:\r\n print('An error occured. Intermediate data has been saved.')\r\n else:\r\n print('Finished.')\r\n return",
"def load_data(self):\n if self.debug:\n print(\"Loading data\")",
"def main():\n print(dumps(get_data()))\n return 0",
"def run():\n\twrite_fuel_data()",
"def RUN(self):",
"def run(self, data):\n\t\t# no processing here\n\t\treturn data",
"def run(self):\n\t\tself.data_source.connect()\n\t\twhile self.running:\n\t\t\tself.data_source.read()",
"def run(self):\n self.assign_inputs()\n self.execute()\n self.collect_outputs()",
"def run(self):\n\n self.load_file()\n self.cat_to_num()\n self.split()",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass",
"def run(self):\n pass"
] | [
"0.7152246",
"0.6843876",
"0.6765248",
"0.66976744",
"0.66915",
"0.6601861",
"0.6464472",
"0.64639926",
"0.6440317",
"0.6437759",
"0.64203024",
"0.6409332",
"0.6399841",
"0.63861287",
"0.63166547",
"0.6303447",
"0.6303447",
"0.6303447",
"0.6303447",
"0.6303447",
"0.6303447",
"0.6303447",
"0.6303447",
"0.6303447",
"0.6303447",
"0.6303447",
"0.6303447",
"0.6303447",
"0.6303447",
"0.6303447"
] | 0.6965196 | 1 |
tests where symbols are looked up properly | def test_symbol_lookup(self):
def check_lookup(symbol, expected):
op = BaseWhereOperator.get_operator(symbol)
self.assertEqual(op, expected)
check_lookup('EQ', EqualsOperator)
check_lookup('IN', InOperator)
check_lookup('GT', GreaterThanOperator)
check_lookup('GTE', GreaterThanOrEqualOperator)
check_lookup('LT', LessThanOperator)
check_lookup('LTE', LessThanOrEqualOperator) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_symbol_lookup(self):\r\n\r\n def check_lookup(symbol, expected):\r\n op = BaseWhereOperator.get_operator(symbol)\r\n self.assertEqual(op, expected)\r\n\r\n check_lookup('EQ', EqualsOperator)\r\n check_lookup('IN', InOperator)\r\n check_lookup('GT', GreaterThanOperator)\r\n check_lookup('GTE', GreaterThanOrEqualOperator)\r\n check_lookup('LT', LessThanOperator)\r\n check_lookup('LTE', LessThanOrEqualOperator)",
"def testSymbolHash(self):\n gScope = pykd.diaLoadPdb( str(target.module.pdb()) )\n symSet = set([ gScope[\"g_structTest\"], gScope[\"EnumWindowsProc1\"], gScope[\"g_structTest\"] ])\n self.assertEqual( 2, len(symSet) )\n self.assertTrue( gScope[\"g_structTest\"] in symSet )\n self.assertFalse( gScope[\"EnumWindowsProc2\"] in symSet )",
"def test_GetSymbolMapping_no_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\", \"\"]\n self.assertDictEqual({}, stack_utils.GetSymbolMapping(lines))",
"def check_symbols(self):\n # this method has a bug in that it never raises KeyError, it raises \n # ValueError instead.\n \n def is_valid(sym):\n # what symbols are valid? (, ), digits, atoms\n if sym in \"()\": return True\n #if sym.isdigit(): return True\n #if sym in _atomic_mass: return True\n if sym.isalnum(): return True\n return False\n\n for t in self._gettokens():\n if not is_valid(t): raise ValueError(\"bad symbol \" + t)\n if t.isalpha() and t not in _atomic_mass: raise KeyError(\"key error \" + t)\n return True",
"def test_symbol_repr(self):\n a = pybamm.Symbol(\"a\")\n b = pybamm.Symbol(\"b\")\n c = pybamm.Symbol(\"c\", domain=[\"test\"])\n d = pybamm.Symbol(\"d\", domain=[\"test\"])\n hex_regex = r\"\\-?0x[0-9,a-f]+\"\n self.assertRegex(\n a.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", a, children\\=\\[\\], domain\\=\\[\\]\\)\",\n )\n self.assertRegex(\n b.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", b, children\\=\\[\\], domain\\=\\[\\]\\)\",\n )\n self.assertRegex(\n c.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", c, children\\=\\[\\], domain\\=\\['test'\\]\\)\",\n )\n self.assertRegex(\n d.__repr__(),\n r\"Symbol\\(\" + hex_regex + r\", d, children\\=\\[\\], domain\\=\\['test'\\]\\)\",\n )\n self.assertRegex(\n (a + b).__repr__(),\n r\"Addition\\(\" + hex_regex + r\", \\+, children\\=\\['a', 'b'\\], domain=\\[\\]\\)\",\n )\n self.assertRegex(\n (c * d).__repr__(),\n r\"Multiplication\\(\"\n + hex_regex\n + r\", \\*, children\\=\\['c', 'd'\\], domain=\\['test'\\]\\)\",\n )\n self.assertRegex(\n pybamm.grad(a).__repr__(),\n r\"Gradient\\(\" + hex_regex + \", grad, children\\=\\['a'\\], domain=\\[\\]\\)\",\n )\n self.assertRegex(\n pybamm.grad(c).__repr__(),\n r\"Gradient\\(\"\n + hex_regex\n + \", grad, children\\=\\['c'\\], domain=\\['test'\\]\\)\",\n )",
"def test_GetSymbolMapping_parameter_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo?q=hello at /path/to/myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)",
"def test_symbol(self, data, symbol_first, symbol_second):\n layer = Points(data)\n assert layer.symbol == \"disc\"\n\n layer.symbol = symbol_first\n assert layer.symbol == symbol_first\n\n layer = Points(data, symbol=symbol_first)\n assert layer.symbol == symbol_first\n\n layer.symbol = symbol_second\n assert layer.symbol == symbol_second",
"def test_GetSymbolMapping_simple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)",
"def test_validate_self_input_symbol_subset(self):\n with nose.assert_raises(exceptions.MissingSymbolError):\n self.dtm1.input_symbols.add('2')\n self.dtm1.validate_self()",
"def test_functionallity(self):\n\n pp = Lexpp(external_dict=pkg_resources.resource_filename(\"lexpp\", \"tests/test.dict\"))\n\n test_word = \"キャプテン\"\n entries = list(pp.lookup(test_word))\n\n self.assertEqual(len(entries), 4)\n\n for e in entries:\n self.assertEqual(type(e), Entry)\n rep = pp.get_representative_form(e)\n self.assertEqual(rep, test_word)",
"def test_is_an_element_symbol():\n for el in roentgen.elements['symbol']:\n assert(is_an_element(el))",
"def test_GetSymbolMapping_multiple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/otherapp.mojo at /path/to/otherapp.mojo/.kW07s\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\",\n \"/path/to/otherapp.mojo/.kW07s\": \"libotherapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)",
"def test_all_extra_tokens(self):\n self.helper_test_evaluate_raises(\n '1 or 0',\n expected_exc_type=ExtraSymbolError,\n A=1,\n B=1,\n C=1)",
"def test_syntax_error_nested_symbol_table():\n reader = get_reader(\n \"\"\"\nmodule my_mod\ncontains\nFUNCTION dot_v_mod_2d( )\n REAL :: dot_v_mod_2d\n REAL, DIMENSION(:,:), POINTER, CONTIOUS :: z_msk_i\n dot_v_mod_2d = 0.0_wp\nEND FUNCTION dot_v_mod_2d\nend module my_mod\n\"\"\"\n )\n result = F2003.Module.match(reader)\n # There should be no match and, as a result, there should be no\n # symbol-table entries.\n assert result is None\n assert SYMBOL_TABLES._symbol_tables == {}",
"def test_symbol():\n token = Token(\"+\", TokenInfo(\"<stdin>\", 0, 1, \" ++\"))\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"+\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.ADD\n assert token.info.line == \" ++\"\n\n token = token()\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"+\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.ADD\n assert token.info.line == \" ++\"\n\n token += \"+\"\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"++\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.INC\n assert token.info.line == \" ++\"\n\n token = token()\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"++\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.INC\n assert token.info.line == \" ++\"\n\n with pytest.raises(LythSyntaxError) as err:\n token += \"5\"\n\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"++\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.INC\n assert token.info.line == \" ++\"\n\n assert err.value.msg is LythError.MISSING_SPACE_AFTER_OPERATOR\n assert err.value.filename == \"<stdin>\"\n assert err.value.lineno == 0\n assert err.value.offset == 1\n assert err.value.line == \" ++\"\n\n assert repr(token) == \"Token(INC, '++', 0, 1)\"\n assert str(token) == \"INC: '++'\"",
"def symbols(self):\n pass",
"def test_GetSymbolMapping_normalize(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/.//myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)",
"def _want_sym(sym):\n if sym is None or len(sym) < 2:\n return False\n if sym['name'] in extract_ignore_names:\n return False\n bad_types = ['t', 'b', 'r', 'd', 'w']\n return (sym['type'] not in bad_types\n and sym['name'] not in ['__bss_start', '_end', '_edata'])",
"def compilation_test(interp, source):\r\n print '*** Compiling symbols from file: %s ***' % util.within_VCode(source)\r\n interp.cleanup_dictionary()\r\n interp.parse_symbols_from_file(source)\r\n print '\\n\\nParsed symbols are: '\r\n interp.print_symbols()\r\n print 'Unresolved abbreviations are:'\r\n unresolved = interp.peek_at_unresolved()\r\n sorted_unresolved = unresolved.keys()\r\n sorted_unresolved.sort()\r\n for an_abbreviation in sorted_unresolved:\r\n symbol_list = unresolved[an_abbreviation].keys()\r\n symbol_list.sort()\r\n print '\\'%s\\': appears in %s' % (an_abbreviation, str(symbol_list))\r\n \r\n print '\\n*** End of compilation test ***\\n'",
"def __contains__(self, symbol):\n return str(symbol) in self.__alphabet",
"def _is_symbol(s):\n\tif (type(s) == types.StringType and s >= 'A' and s[0] <= 'Z'\n\t\t\tand (len(s) < 2 or s[1] < '0' or s[1] > '9')):\n\t\treturn 1\n\treturn 0",
"def test_override_symbol(self):\n i, j, k, l = dimify('i j k l')\n a = symbol(name='a', dimensions=(i, j, k, l), value=2.)\n a1 = symbol(name='a1', dimensions=(i, j, k, l), value=3.)\n a2 = symbol(name='a2', dimensions=(i, j, k, l), value=4.)\n op = Operator(Eq(a, a + 3))\n op()\n op(a=a1)\n op(a=a2)\n shape = [d.size for d in [i, j, k, l]]\n\n assert(np.allclose(a.data, np.zeros(shape) + 5))\n assert(np.allclose(a1.data, np.zeros(shape) + 6))\n assert(np.allclose(a2.data, np.zeros(shape) + 7))",
"def is_symbol(p):\n return len(p) == 1 and p.isalpha()",
"def test_issue_91():\n assert is_identifier(\"_results_bag\")\n assert is_identifier(\"hello__bag\")",
"def test_get_symbols_as_str(self) -> None:\n tape = TMTape(\n tape=\"abcdef\",\n blank_symbol=\".\",\n current_position=2,\n )\n self.assertEqual(tape.get_symbols_as_str(), \"abcdef\")",
"def test_strings_common_symbols():\n\n common_result = strings_ops.strings_common_symbols(\"hi\", \"hello\")\n assert common_result == \"h\"",
"def true(symbol):\n return True",
"def testCtor(self):\n try: pykd.DiaSymbol()\n except RuntimeError: pass",
"def symbols_details(self):\n pass",
"def checkLookup(self, name):\n if not self.symbols.has_key(name):\n # we don't care\n return None\n # is it one we really care about\n t = self.symbols[name].getType()\n if t == \"typedef\":\n t = self.symbols[name].getAliasType()\n if t == \"general\" or t == \"struct\" or t == \"union\":\n return self.symbols[name]"
] | [
"0.7615396",
"0.68382215",
"0.6708252",
"0.659964",
"0.64436066",
"0.6381638",
"0.63209176",
"0.6301587",
"0.6294034",
"0.62852746",
"0.62724996",
"0.62620026",
"0.61632264",
"0.61437625",
"0.6124064",
"0.61073846",
"0.6088397",
"0.6071878",
"0.60535747",
"0.60304224",
"0.60273457",
"0.6003884",
"0.5995109",
"0.5988009",
"0.59803414",
"0.5965833",
"0.59578043",
"0.59003764",
"0.58924884",
"0.5880775"
] | 0.7542443 | 1 |
matrix_set_diag operator implemented in numpy. Returns a numpy array with the diagonals of input array replaced with the provided diagonal values. | def matrix_set_diag(input_np, diagonal, k=0, align="RIGHT_LEFT"):
out = np.array(input_np, copy=True)
cols = input_np.shape[-1]
rows = input_np.shape[-2]
onlyOneDiagonal = True
if isinstance(k, (tuple, list)):
if len(k) < 2 or k[0] == k[1]:
k = k[0]
else:
onlyOneDiagonal = False
if onlyOneDiagonal:
for i in range(diagonal.shape[-1]):
if k >= 0:
out[..., i, i + k] = diagonal[..., i]
else:
out[..., i - k, i] = diagonal[..., i]
else:
for ki in range(k[0], k[1] + 1):
diag_len = min(cols - max(ki, 0), rows + min(ki, 0))
offset = 0
if ki >= 0:
if align[:5] == "RIGHT":
offset = diagonal.shape[-1] - diag_len
else:
if align[-5:] == "RIGHT":
offset = diagonal.shape[-1] - diag_len
for i in range(diag_len):
if ki >= 0:
out[..., i, i + ki] = diagonal[..., k[1] - ki, i + offset]
else:
out[..., i - ki, i] = diagonal[..., k[1] - ki, i + offset]
return out | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_diag(x, new_diag):\n arr_shape = x.shape\n off_diag = (1 - _torch.eye(arr_shape[-1])) * x\n diag = _torch.einsum(\"ij,...i->...ij\", _torch.eye(new_diag.shape[-1]), new_diag)\n return diag + off_diag",
"def set_mat_diag(mat, diag=0, val=0):\n m = mat.shape[0]\n step = m + 1\n start = diag\n end = m ** 2 - diag * m\n mat.flat[start:end:step] = val",
"def set_diag(M,d,idx=0):\n n, m = shape_mat(M)\n if idx >= 0:\n for i, di in enumerate( d ):\n M[i][i+idx] = di\n else:\n for i, di in enumerate( d ):\n M[i-idx][i] = di",
"def diag(self):\n in_diag = (self.rows == self.cols)\n diag = np.zeros(min(self.n, self.n), dtype=np.float64) # default 0.\n diag[self.rows[in_diag]] = self.vals[in_diag]\n return diag",
"def zero_diag(mat):\n\n return replace_diag(mat, np.zeros(mat.shape[0]))",
"def replace_diag(mat, newdiag):\n\n if newdiag.ndim>1: \n raise Exception(\"newdiag should be 1-dimensional\")\n if not (mat.shape[0]==mat.shape[1]==newdiag.size):\n raise Exception(\"Incorrect dimensions.\")\n return mat - np.diag(mat.diagonal()) + np.diag(newdiag)",
"def matDiag(vec):\n ret=matZeros((len(vec),len(vec)))\n for i in range(len(vec)):\n matSet(ret,i,i,vec[i])\n return ret",
"def diag(cls, diagonal, domain, shape=None):\n if shape is None:\n N = len(diagonal)\n shape = (N, N)\n return cls.from_rep(SDM.diag(diagonal, domain, shape))",
"def _modify_diag_with_comm(cov_mat, comm):\n\n modified_cov_mat = np.copy(cov_mat)\n np.fill_diagonal(\n modified_cov_mat,\n comm * np.diag(cov_mat)\n )\n\n return modified_cov_mat",
"def diagM(l):\r\n dim = len(l)\r\n M = np.zeros((dim, dim))\r\n np.fill_diagonal(M, l)\r\n return matrix(M)",
"def diag(diagnoal):\n raise NotImplementedError",
"def create_diagonal(m: NumpyRealArray) -> NumpyRealArray:\n indices = (..., *np.diag_indices(m.shape[-1]))\n retval = np.zeros((*m.shape, m.shape[-1]), dtype=m.dtype)\n retval[indices] = m\n return retval",
"def diag_operator(self, diag_elements, subsystem):\n dim = subsystem.truncated_dim\n index = range(dim)\n diag_matrix = np.zeros((dim, dim), dtype=np.float_)\n diag_matrix[index, index] = diag_elements\n return self.identity_wrap(diag_matrix, subsystem)",
"def AssembleDiagonal(self, diag):\n return _hypre.HypreParMatrix_AssembleDiagonal(self, diag)",
"def diag(x):\r\n xx = as_tensor_variable(x)\r\n if xx.type.ndim == 1:\r\n return alloc_diag(xx)\r\n elif xx.type.ndim == 2:\r\n return extract_diag(xx)\r\n else:\r\n raise TypeError('diag requires vector or matrix argument', x)",
"def from_diag(d, context = FloatContext):\n n = len(d)\n S = zeros(n,n,context)\n set_diag(S,d)\n return S",
"def _set_diag(laplacian, value, norm_laplacian):\n n_nodes = laplacian.shape[0]\n # We need all entries in the diagonal to values\n if not sparse.isspmatrix(laplacian):\n if norm_laplacian:\n laplacian.flat[::n_nodes + 1] = value\n else:\n laplacian = laplacian.tocoo()\n if norm_laplacian:\n diag_idx = (laplacian.row == laplacian.col)\n laplacian.data[diag_idx] = value\n # If the matrix has a small number of diagonals (as in the\n # case of structured matrices coming from images), the\n # dia format might be best suited for matvec products:\n n_diags = np.unique(laplacian.row - laplacian.col).size\n if n_diags <= 7:\n # 3 or less outer diagonals on each side\n laplacian = laplacian.todia()\n else:\n # csr has the fastest matvec and is thus best suited to\n # arpack\n laplacian = laplacian.tocsr()\n return laplacian",
"def diag(diag_elements):\n return tf.diag(tf.reshape(diag_elements, [-1]))",
"def diag(self, X):\n\n raise NotImplementedError(\"base class\")",
"def set_diagonal(self, value = 0):\n for d in range(self.size):\n self.write(d, d, value)",
"def diagonal(matrix):\n if sp.sparse.issparse(matrix):\n diag = np.array(matrix.diagonal())\n else:\n diag = np.diagonal(matrix).copy()\n return diag",
"def diag(self):\n assert len(self.shape) == 1 or len(self.shape) == 2\n if len(self.shape) == 1:\n dim = self.shape[0]\n qim = self.qhape[0]\n shape = [dim, dim]\n qhape = [qim, qim]\n d = self.dirs[0]\n dirs = [d, -d]\n sects = {}\n for k, v in self.sects.items():\n new_k = (k[0], k[0])\n sects[new_k] = np.diag(v)\n res = type(self)(\n shape,\n qhape=qhape,\n qodulus=self.qodulus,\n sects=sects,\n dirs=dirs,\n dtype=self.dtype,\n )\n return res\n else:\n assert self.invar\n assert self.compatible_indices(self, 0, 1)\n d = self.dirs[0]\n if self.dirs[1] + d != 0:\n warnings.warn(\n \"Automatically flipping dir 1 in diag.\", stacklevel=2\n )\n self = self.flip_dir(1)\n dim = self.shape[0]\n qim = self.qhape[0]\n shape = [dim]\n qhape = [qim]\n dirs = [d]\n sects = {}\n for qnum in qim:\n try:\n diag_block = self[(qnum, qnum)]\n sects[(qnum,)] = np.diag(diag_block)\n except KeyError:\n # The diagonal block was not found, so we move on.\n pass\n res = type(self)(\n shape,\n qhape=qhape,\n qodulus=self.qodulus,\n sects=sects,\n dtype=self.dtype,\n dirs=dirs,\n invar=False,\n )\n return res",
"def diag(v, k=0):\n\n if not use_origin_backend(v):\n if not isinstance(v, dparray):\n pass\n else:\n return dpnp_diag(v, k)\n\n return call_origin(numpy.diag, v, k)",
"def fill_diagonal(a, val):\r\n return fill_diagonal_(a, val)",
"def diag(cls, elements, domain):\n return DDM.diag(elements, domain).to_dfm()",
"def diag(v, k=0):\n if isinstance(v, cupy.ndarray):\n if v.ndim == 1:\n size = v.size + abs(k)\n ret = cupy.zeros((size, size), dtype=v.dtype)\n ret.diagonal(k)[:] = v\n return ret\n else:\n return v.diagonal(k)\n else:\n return cupy.array(numpy.diag(v, k))",
"def DiagExpand(A):\n \n G = np.zeros(A.shape + A.shape[-1:])\n Gd = np.diagonal(G, axis1=-2, axis2=-1)\n Gd.setflags(write=True)\n Gd[:] = A\n \n return G",
"def fill_diagonal(x1, val, wrap=False):\n\n x1_desc = dpnp.get_dpnp_descriptor(\n x1, copy_when_strides=False, copy_when_nondefault_queue=False\n )\n if x1_desc:\n if not dpnp.isscalar(val):\n pass\n elif wrap:\n pass\n else:\n return dpnp_fill_diagonal(x1_desc, val)\n\n return call_origin(numpy.fill_diagonal, x1, val, wrap, dpnp_inplace=True)",
"def diagonal(a, offset=0, axis1=0, axis2=1):\n # TODO(okuta): check type\n return a.diagonal(offset, axis1, axis2)",
"def r_diag_dot_sparse(mat, diag):\n return mat @ sp.diags(diag)"
] | [
"0.74522567",
"0.7357435",
"0.71183485",
"0.67566574",
"0.6728996",
"0.6719767",
"0.65984404",
"0.65970296",
"0.6580404",
"0.65264386",
"0.65135664",
"0.64551324",
"0.64260054",
"0.6417673",
"0.63621813",
"0.6301156",
"0.62709117",
"0.62459886",
"0.6244929",
"0.6234028",
"0.62320983",
"0.6208054",
"0.61953586",
"0.6195272",
"0.6190412",
"0.61325777",
"0.61288685",
"0.61011004",
"0.60977656",
"0.60781145"
] | 0.7570219 | 0 |
The stream effect has the colors of the LEDs move like a stream, where the color of a LEDs is given to its neighbor in the next update step. | def stream_handler(args_dict: dict):
color_sequence = args_dict['color_sequence']
color_seq_len = args_dict['color_seq_len']
color_itr = args_dict['color_itr']
n_leds = args_dict['n_leds']
step_sequence = [color_sequence[c % color_seq_len] for c in range(color_itr, n_leds + color_itr)]
# Updating step for the next iteration.
args_dict['color_itr'] = (color_itr + 1) % color_seq_len
return step_sequence | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def show(self):\n\t\tself.processQueue()\n\t\tself.flattenLayers()\n\t\tcount = 0\n\t\tfor v in self.ledsColorBuffer:\n\t\t\tself.strip.setPixelColor(count, v)\n\t\t\tcount += 1\n\t\tself.strip.show()",
"def demo(self):\n self.clear()\n\n white = neo.Color(255, 255, 255)\n black = neo.Color(0, 0, 0)\n red = neo.Color(120, 0, 0)\n green = neo.Color(0, 255, 0)\n blue = neo.Color(0, 0, 255)\n pink = neo.Color(255, 102, 178)\n \n state = [[[0,0,0]] * self.width] * self.height\n stepsize = (1.0/self.n_leds)\n lednr = 0\n for x in range(self.width):\n for y in range(self.height):\n h_start = (0 + lednr * (2*stepsize)) % 1 #* (y*self.width + x)\n lednr = lednr + 1\n s_start = 0\n v_start = 1\n hsv = [h_start,s_start,v_start]\n state[x][y] = hsv\n self.set([x,y], hsv_to_neopixel_color(hsv[0], hsv[1], hsv[2]))\n\n tint = 0\n while(True): \n for x in range(self.width):\n for y in range(self.height):\n hsv = state[x][y]\n\n new_h = (hsv[0] + stepsize/60.0) % 1.0\n new_s = (hsv[1] + stepsize/20.0) % 1.0\n new_v = hsv[2] #+ stepsize/20.0) % 1.0\n\n state[x][y][0] = new_h\n state[x][y][1] = new_h\n state[x][y][2] = new_v\n\n self.set([x,y], hsv_to_neopixel_color(\n (translate(new_h, 0.0, 1.0, 0.0, 0.1) + tint) % 1.0, \n to_sine(new_s), \n new_v))\n \n tint = (tint + stepsize/20.0) % 1\n\n self.draw()\n sleep(1.0/40)",
"def main():\n # color = rb.Color.BLUE.value\n # move_to_color(color)\n infared_sensor()\n\n # WHITE/RED does not work same with the BLUE/GREEN going down",
"def nextLight():\n global light\n pin.setAllOutPinsLow()\n light += 1\n light %= len(traffic_lights)\n print traffic_colors[light]\n pin.setOutPinHigh(traffic_lights[light])",
"def flicker_lights(self):\n print 'Lights Set'",
"def update(): # (12)\n with canvas(device) as draw:\n for led_pos in range(0, len(color_buffer)):\n color = color_buffer[led_pos]\n\n ## If your LED strip's colors are are not in the expected\n ## order, uncomment the following lines and adjust the indexes\n ## in the line color = (rgb[0], rgb[1], rgb[2])\n # rgb = getrgb(color)\n # color = (rgb[0], rgb[1], rgb[2])\n # if len(rgb) == 4:\n # color += (rgb[3],) # Add in Alpha\n\n draw.point((led_pos, 0), fill=color)",
"def transition(red, green, blue, new_red, new_green, new_blue):\n while (red != new_red) or (green != new_green) or (blue != new_blue):\n while red != new_red:\n if red > new_red:\n red = red - 1\n break\n else:\n red = red + 1\n break\n while green != new_green:\n if green > new_green:\n green = green - 1\n break\n else:\n green = green + 1\n break\n while blue != new_blue:\n if blue > new_blue:\n blue = blue - 1\n break\n else:\n blue = blue + 1\n break\n logi_led.logi_led_set_lighting(red, green, blue)\n time.sleep(0.01)",
"def update_leds():\n # zaświeć\n for i in range(current_leds):\n volume_leds[i].configure(background=TURN_ON_COLOR)\n\n # zgaś\n for i in range(current_leds, LEDS):\n volume_leds[i].configure(background=TURN_OFF_COLOR)",
"def led_rainbow(strip, wait_ms=2, iterations=1):\n for j in range(256*iterations):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, color_wheel((i+j) & 255))\n strip.show()\n gevent.sleep(wait_ms/1000.0)",
"def rainbow_all(self):\n while True:\n for g in range(0, 255, 1):\n self.BridgeObj.send_rgb_value(255, g, 0)\n time.sleep(self.speed)\n\n for r in range(255, 0, -1):\n self.BridgeObj.send_rgb_value(r, 255, 0)\n time.sleep(self.speed)\n\n for b in range(0, 255, 1):\n self.BridgeObj.send_rgb_value(0, 255, b)\n time.sleep(self.speed)\n\n for g in range(255, 0, -1):\n self.BridgeObj.send_rgb_value(0, g, 255)\n time.sleep(self.speed)\n\n for r in range(0, 255, 1):\n self.BridgeObj.send_rgb_value(r, 0, 255)\n time.sleep(self.speed)\n\n for b in range(255, 0, -1):\n self.BridgeObj.send_rgb_value(255, 0, b)\n time.sleep(self.speed)",
"def ControlLights(state):\n for led in (RED,YELLOW,GREEN):\n GPIO.output(LED[led],state[led])\n time.sleep(FLASH_TIME)",
"def control_lights(state):\n for led in (RED, AMBER, GREEN):\n GPIO.output(LED[led],state[led])",
"def led_rainbowCycle(strip, wait_ms=2, iterations=1):\n for j in range(256*iterations):\n for i in range(strip.numPixels()):\n strip.setPixelColor(i, color_wheel((int(i * 256 / strip.numPixels()) + j) & 255))\n strip.show()\n gevent.sleep(wait_ms/1000.0)",
"def apply(self):\n \n i = 0\n for LEDStrip in self._LEDStrips:\n LEDStrip.applyColor(self._colors[i][0], self._colors[i][1], self._colors[i][2])\n i += 1",
"def RedLED(firstPixel, secondPixel):\n led = LPD8806.strand() \n count1 = 250\n count2 = 0\n while count1 != 0:\n \"\"\" Fade green off \"\"\"\n led.set(firstPixel, 0, count1, 0)\n led.set(secondPixel, 0, count1, 0)\n led.update()\n count1 -= 25\n while count2 != 250:\n \"\"\" Fade red on \"\"\"\n led.set(firstPixel, count2, 0, 0)\n led.set(secondPixel, count2, 0, 0)\n led.update()\n count2 += 25\n return",
"def led(red: int, green: int, blue: int, /) -> None:",
"def startColorLoop():\n b.set_group(1, 'on', True)\n b.set_group(1, 'bri', 254)\n b.set_group(1, 'hue', 255)\n b.set_group(1, 'sat', 255)\n b.set_group(1, 'effect', 'colorloop')",
"def led(color: Tuple[int, int, int], /) -> None:",
"def update_color(self):\n self.plot(update_traces=False, update_waveforms=True)",
"def update(self):\n super().update()\n time_since_start = self.time_since_start() \n curr_mod = time_since_start%self.game.time_cycle_secs\n grade = abs(curr_mod - self.game.time_cycle_secs/2) / (self.game.time_cycle_secs/2)\n color_value = grade*(255-self.game.max_darkness) + self.game.max_darkness\n for sprite in self.all_sprites:\n sprite.color = (color_value, color_value, color_value)",
"def change_led_floor_color(update: 'Update', context: 'CallbackContext'):\n args = context.args\n message = \" \".join(args)\n\n try:\n publish.single(\"ledfloorupdates\", message, hostname=\"10.90.154.80\", port=1883, client_id=\"kolabbot\")\n update.message.reply_text('Changing LED floor color to \"{}\".'.format(message))\n except (ConnectionRefusedError, TimeoutError) as err:\n msg = \"Could not connect to LED-floor: {}\".format(err)\n print(msg)\n update.message.reply_text(msg)",
"def changeColor( self ):\n\t\t\n\t\tx, y = self.position.xy\n\t\tself.color = ( int((x / WINDOW_X) * 128), int((x / WINDOW_X) * 128) + int((y / WINDOW_Y) * 128 ), int((y / WINDOW_Y) * 128))",
"def cycle_colors(colors=(\"red\", \"green\", \"blue\"), delay_secs=1):\n set_color('black') # Start with all LED's \"off\"\n\n for c in colors:\n print(\"LEDs are all \" + c)\n set_color(c)\n update()\n sleep(delay_secs)",
"def tween_hsv_at(self, progress, output):\n for cell_id in self.next.keys():\n next_color = self.next[cell_id]\n\n if cell_id in self.last:\n last_color = self.last[cell_id]\n else:\n last_color = color.BLACK\n\n cell_color = color.Color(tween.hsvLinear(last_color, next_color, progress))\n output(cell_id, cell_color)",
"def color_cycle():\n while True:\n for color in colors:\n yield color",
"def led_theaterChase(strip, color, wait_ms=50, iterations=5):\n for j in range(iterations):\n for q in range(3):\n for i in range(0, strip.numPixels()-q, 3):\n strip.setPixelColor(i+q, color)\n strip.show()\n gevent.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels()-q, 3):\n strip.setPixelColor(i+q, 0)",
"def update_io(self, dt):\n self.light.change_color(traffic_lights_binary())\n self.seven_segment_display.activate_segments(seven_segment_binary())\n self.ascii.update_ascii_grid()",
"def led(color: int, /) -> None:",
"def led_theaterChaseRainbow(strip, wait_ms=25):\n for j in range(256):\n for q in range(3):\n for i in range(0, strip.numPixels()-q, 3):\n strip.setPixelColor(i+q, color_wheel((i+j) % 255))\n strip.show()\n gevent.sleep(wait_ms/1000.0)\n for i in range(0, strip.numPixels()-q, 3):\n strip.setPixelColor(i+q, 0)",
"def setColourLevels(self):\n minsg = np.min(self.sg)\n maxsg = np.max(self.sg)\n brightness = self.brightnessSlider.value()\n contrast = self.contrastSlider.value()\n colourStart = (brightness / 100.0 * contrast / 100.0) * (maxsg - minsg) + minsg\n colourEnd = (maxsg - minsg) * (1.0 - contrast / 100.0) + colourStart\n for btn in self.picbuttons:\n btn.stopPlayback()\n btn.setImage(self.lut, colourStart, colourEnd, False)\n btn.update()"
] | [
"0.6599297",
"0.65627533",
"0.6331775",
"0.62021",
"0.6179241",
"0.61761546",
"0.6153618",
"0.59917074",
"0.59736294",
"0.59636015",
"0.5950049",
"0.59427375",
"0.5905459",
"0.5854121",
"0.58495724",
"0.5800509",
"0.57976097",
"0.5783879",
"0.577515",
"0.57635885",
"0.5758384",
"0.5709361",
"0.57027143",
"0.56851876",
"0.56773853",
"0.56729347",
"0.56697524",
"0.56690097",
"0.5668358",
"0.56430364"
] | 0.67854285 | 0 |
Retrieve al cursos from graph | def get_cursos(request):
if request.method == 'GET':
cursos = Curso.nodes.all()
cursos_list = []
for i in range(0, len(cursos)):
cursos_list.append(cursos[i].__dict__["nombre"])
return JsonResponse({"cursos": cursos_list}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_full_graph(self):",
"def graph(self):\n ...",
"def download_chicago_graph():\n\n\tG = ox.graph_from_place(\"Chicago,IL, United States\", network_type='drive')\n\treturn G",
"def getData(graph, request):\r\n results = list(graph.query(request))\r\n return results",
"def get_graph(self) -> dict:\n response = requests.get(self.channel, params=\"get_graph\")\n return json_to_graph(response.content)",
"def getGraph(self):\n\t\treturn self.graph",
"def graphcall():\n endpoint = config.RESOURCE + config.API_VERSION + '/me'\n http_headers = {'client-request-id': str(uuid.uuid4())}\n graphdata = SESSION.get(endpoint, headers=http_headers, stream=False).json()\n return {'graphdata': graphdata, 'endpoint': endpoint, 'sample': 'ADAL'}",
"def get_graph(**options):\r\n graph = bonobo.Graph()\r\n graph.add_chain(get_stock_list,extract, process, load)\r\n\r\n return graph",
"def get_graph_summary(self):\n\n pass",
"def iograph():\n global _iograph\n if _iograph: return _iograph",
"def _getCadastroCursos(self, id_cadastro):\n return self.execSql(\"select_cadastro_cursos\",\n id_cadastro=int(id_cadastro))",
"def evaluate_graph(dictionary, corpus, texts, limit):\n c_v = []\n lm_list = []\n for num_topics in range(1, limit):\n lm = LdaModel(corpus=corpus, num_topics=num_topics, id2word=dictionary)\n lm_list.append(lm)\n cm = CoherenceModel(model=lm, texts=texts, dictionary=dictionary, coherence='c_v')\n c_v.append(cm.get_coherence())\n\n # Show graph\n x = range(1, limit)\n plt.plot(x, c_v)\n plt.xlabel(\"num_topics\")\n plt.ylabel(\"Coherence score\")\n plt.legend((\"c_v\"), loc='best')\n plt.show()\n\n return lm_list, c_v",
"def getcurso(curso):\n\n dataset = {\n \"curso\": [],\n \"materia\": [],\n \"professor\": [],\n \"horas\": [],\n \"ids\": []\n }\n request_data_get = cursos_collections.find({\"curso\": curso})\n\n for result in request_data_get:\n dataset['curso'].append(result[\"curso\"])\n dataset['materia'].append(result[\"materia\"])\n dataset['professor'].append(result[\"professor\"])\n dataset['horas'].append(result[\"horas\"])\n dataset['ids'].append(str(result[\"_id\"]))\n\n return dataset",
"def components_graph(geo, stereo=True):\n return automol.graph.connected_components(graph(geo, stereo=stereo))",
"def getArcs(self):\n return self.getArcsFrom()",
"def graphs(self):\n return self.__graphs",
"def get_cert_graphs_connection(self):\n return self.m_connection.cert_graphs",
"def graph(self):\n return self._graph",
"def graph(self):\n return self._graph",
"def get(self, *args):\n return _libsbml.ListOfGraphicalObjects_get(self, *args)",
"def graph(self):\n return self.__graph",
"def graph_course(self):\n group = self.__data[\"filted_general_groupby\"]\n graph = {}\n if self.analysis[\"courses\"] is None:\n self.courses_list()\n\n # inicializa o dicionario que vai guardar o grafico\n for course in self.analysis[\"courses\"].index:\n graph[course] = []\n\n for i in range(18):\n min_v = i * 5\n max_v = min_v + 4.99\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n min_v = 95\n max_v = 100\n self.__calc_graph_mean(group, min_v, max_v, graph)\n\n self.analysis[\"graph_course\"] = graph",
"def cc_visited(ugraph):\r\n\tremaining_node = ugraph.keys()\t\t#The keys are accessible directly.\r\n\t\r\n\tcon_com = [] #connected component\r\n\twhile len(remaining_node) != 0 :\r\n\t\tnode = random.choice(remaining_node)\r\n\t\tvisited = bfs_visited(ugraph,node)\r\n\t\tcon_com.append(visited)\r\n\t\tfor item in visited:\r\n\t\t\tremaining_node.remove(item)\r\n\treturn con_com",
"def get_graph(self, path):\n raise NotImplementedError",
"def edges(self, irc, msg, args, channel):\n pie = self.instances[irc.network]\n irc.reply([str(x) for x in pie.graphs[channel].edges.values()])",
"def get_ecg_graph():\n titles = ['ecg1', 'ecg2', 'ecg3']\n colors = ['rgb(240,0,0)', 'rgb(0,240,0)', 'rgb(0,0,240)']\n update()\n signames_ecg = queries['signames_ecg']\n signals = queries['signals']\n latesthr = queries['latesthr']\n return html.Div(className='ecg', children=[\n html.Div(style={'display': 'flex', 'height': '40vh'},\n children=[dcc.Graph(\n id=titles[i] + signame,\n style={'width': '100%'},\n figure={\n 'data': [\n {'x': signals[signame]['time'],\n 'y': signals[signame][titles[i]],\n 'mode': 'line', 'name': signame, 'line': {'color':colors[i]}}\n ],\n 'layout': {\n 'font': {'color':'#fff'},\n 'title': '{}-{}'.format(signame, titles[i]),\n 'xaxis': {'title': 'time', 'color': '#fff', 'showgrid': 'False'},\n 'yaxis': {'title': 'voltage (mv)', 'color': '#fff', 'showgrid': 'False', 'range': np.linspace(-2.5, 2.5, 10)},\n 'paper_bgcolor':'#000', 'plot_bgcolor':'#000'\n }\n }\n ) for i in range(len(titles))]\n +\n [html.Div(\n style={'justify-content': 'center', 'display': 'flex',\n 'align-items': 'center', 'width': '10vh', 'font-size': '30pt', 'color': 'white'},\n children=['{}'.format(latesthr[signame][0])])\n ]\n ) for signame in signames_ecg])",
"def get_available_cops():\n allIncidents = Incident.get_all()\n cops = []\n \n for i in allIncidents:\n if(inicioAmostragem <= i.reporting_date and i.reporting_date <=terminoAmostragem):\n cops.append(i['operations_center']['id'])\n \n allReports = RelatoDeSituacao.get_all()\n \n for r in allReports:\n if (\n inicioAmostragem <= r.data_hora and \n r.data_hora <=terminoAmostragem and\n 'cop' in r.relator and # todos tem que ter o COP\n 'id' in r.relator['cop'] # todos tem que ter o id \n ):\n cops.append(r.relator['cop']['id'])\n \n return set(cops)",
"def FindClumps_graph(self):\n # IMPORT STUFF\n import string\n # END IMPORT\n \n maxima = self['CL_LOC'].copy()\n maxima = num.where(maxima)\n maxima = (maxima[1],maxima[0])\n detectimg = self['STAMP'].copy()\n \n id = self._getGraphId()\n root = 'FindClumps_%s' % (id,)\n pngname = root + '.png' ; epsname = root + '.eps'\n jpgname = root + '.jpg'\n\n doStamp(detectimg,pngname,format='PNG')\n Convert(pngname,jpgname)\n \n Painted = Paint(jpgname)\n Painted.load()\n Painted.DrawCross(maxima,length=7,color='green')\n \n strpeaks = string.strip('%i'% (self['M_NUM_CL']))\n text = 'NC=%s' % strpeaks \n \n # Painted.Graffiti(text,commtextpos)\n \n Painted.save(jpgname)\n Painted.release()\n \n Convert(jpgname,epsname)\n os.system('rm %s %s' % (pngname,jpgname))\n self['figures']['FindClumps'] = epsname\n self['figcomms']['FindClumps'] = text",
"def neato_graph_from_corpus( corpus, max_nodes ) :\n\n O, row_dois, column_dois = cites_matrix( corpus )\n neato_cooccurrence_graph( O, column_dois )\n return None\n\n \n v = total_occurrences( O ) \n nv = v.astype( float32 ) / v.max()\n C = cooccurrence_matrix ( O )\n nC = normalized_cooccurrence_matrix( O )\n\n # now find our cutoff!\n # find the max number of cocites and start there\n cocite_cutoff = C.max()\n num_nodes = nodes_from_c( C[C >= cocite_cutoff] )\n # then reduce the number until we exceed max_nodes\n while num_nodes < max_nodes :\n cocite_cutoff = cocite_cutoff - 1\n num_nodes = nodes_from_c( C[C >= cocite_cutoff] )\n\n if num_nodes > max_nodes :\n cocite_cutoff = cocite_cutoff + 1\n \n C = C.copy()\n C[ C < cocite_cutoff ]= 0\n\n graph = pydot.Dot( graph_type = 'graph' )\n graph.set_overlap(\"false\")\n coords = zip(*(C >= cocite_cutoff).nonzero())\n\n # make a dict of all nodes which are mentioned in the coords\n nodes = {}\n index = 1\n for coord in set(chain.from_iterable(coords)) :\n if not nodes.has_key( coord ) :\n node = pydot.Node( str(coord) )\n if v != None :\n doi = column_dois[coord]\n node.set_label( str(index) )\n node.set_penwidth( nv[ coord ] )\n node.set_fixedsize(\"true\")\n node.set_width( 1.0 *nv[ coord ] )\n #node.set_shape(\"circle\")\n nodes[ coord ] = node\n graph.add_node( node )\n index = index + 1\n\n for coord in coords :\n \n edge = pydot.Edge( nodes[coord[0]], nodes[coord[1]] )\n edge.set_weight( nC[coord] )\n edge.set_penwidth( nC[coord]*5 )\n #edge.set_label( str(int(m[coord]) ))\n graph.add_edge(edge)\n\n \n legend = pydot.Node( \"legend\" )\n nodelist = nodes.items()\n nodelist.sort( lambda a,b : cmp(node_index(a[1].get_label()),node_index(b[1].get_label())) )\n legend.set_label( \"\\l\".join([x[1].get_label()+\":\"+column_dois[x[0]] for x in nodelist])+\"\\l\" )\n legend.set_shape(\"box\")\n graph.add_node(legend)\n\n print graph.to_string()\n #graph.write_dot('test.dot', prog='neato' )\n #graph.write_png('test.png', prog='neato' )\n #graph.write_pdf('test.pdf', prog='neato' )",
"def get_graph_blogcatalog():\n from scipy.io import loadmat\n\n def sparse2graph(x):\n from collections import defaultdict\n from six import iteritems\n\n G = defaultdict(lambda: set())\n cx = x.tocoo()\n for i, j, v in zip(cx.row, cx.col, cx.data):\n G[i].add(j)\n return {str(k): [str(x) for x in v] for k, v in iteritems(G)}\n\n mat = loadmat('./samples/blogcatalog.mat')\n A = mat['network']\n data = sparse2graph(A)\n\n G = eg.Graph()\n for u in data:\n for v in data[u]:\n G.add_edge(u, v)\n\n return G"
] | [
"0.60265046",
"0.5684899",
"0.5633646",
"0.56242675",
"0.5566034",
"0.5564065",
"0.55356544",
"0.55326086",
"0.5355961",
"0.53407145",
"0.5306177",
"0.5233318",
"0.52299714",
"0.51916367",
"0.51910627",
"0.5189231",
"0.51728517",
"0.5166251",
"0.5166251",
"0.51599175",
"0.51570344",
"0.51521486",
"0.5148673",
"0.5137239",
"0.5123791",
"0.5122635",
"0.5113447",
"0.51015574",
"0.5095254",
"0.50833553"
] | 0.57523143 | 1 |
Predict the BDEs of each bond in a list of molecules. | def predict(smiles_list, drop_duplicates=True, batch_size=1, verbose=False):
molecule_list = [Molecule(smiles=smiles) for smiles in smiles_list]
smiles_list = [mol.smiles for mol in molecule_list]
pred_df = pd.concat(
(
get_fragments(mol, drop_duplicates=drop_duplicates)
for mol in tqdm(molecule_list, disable=not verbose)
)
)
max_bonds = get_max_bonds(molecule_list)
input_dataset = tf.data.Dataset.from_generator(
lambda: (
get_features(mol.smiles, max_num_edges=2 * max_bonds)
for mol in tqdm(molecule_list, disable=not verbose)
),
output_signature=preprocessor.output_signature,
).cache()
batched_dataset = input_dataset.padded_batch(batch_size=batch_size).prefetch(
tf.data.experimental.AUTOTUNE
)
bdes, bdfes = model.predict(batched_dataset, verbose=1 if verbose else 0)
bde_df = (
pd.DataFrame(bdes.squeeze(axis=-1), index=smiles_list)
.T.unstack()
.reindex(pred_df[["molecule", "bond_index"]])
)
bdfe_df = (
pd.DataFrame(bdfes.squeeze(axis=-1), index=smiles_list)
.T.unstack()
.reindex(pred_df[["molecule", "bond_index"]])
)
pred_df["bde_pred"] = bde_df.values
pred_df["bdfe_pred"] = bdfe_df.values
is_valid = pd.Series(
{
smiles: not validate_inputs(input_)[0]
for smiles, input_ in zip(smiles_list, input_dataset)
},
name="is_valid",
)
pred_df = pred_df.merge(is_valid, left_on="molecule", right_index=True, how="left")
pred_df = pred_df.merge(
bde_dft[["molecule", "bond_index", "bde", "bdfe", "set"]],
on=["molecule", "bond_index"],
how="left",
)
return pred_df | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def predict(smiles_list, drop_duplicates=True, verbose=True):\n\n is_valid = pd.Series({smiles: not check_input(smiles)[0] for smiles in smiles_list}, name='is_valid')\n pred_df = pd.concat([predict_bdes(smiles, draw=False) for smiles in smiles_list])\n pred_df = pred_df.merge(is_valid, left_on='molecule', right_index=True)\n\n if drop_duplicates:\n pred_df = pred_df.drop_duplicates([\n 'fragment1', 'fragment2']).reset_index(drop=True)\n\n return pred_df.sort_values(['molecule', 'bond_index'])",
"def predict(list_of_smiles):\n\tmy_logger.debug(\"Inside the /predict\")\n\tmy_logger.debug(\"Received: \", str(list_of_smiles))\n\tlist_of_smiles = list_of_smiles.split(',')\n\tmy_logger.debug(\"List of smiles splitted : \", list_of_smiles)\n\ttry:\n\t\tmy_logger.debug(\"inside try\")\n\t\tlist_of_molecules = [Chem.MolFromSmiles(x) for x in list_of_smiles]\n\t\tmy_logger.debug(\"molecules parsed\")\n\t\t\n\t\t# use this\n\t\trdkit_desc_for_posted_list_of_mols = np.array([rdkit_descriptors_calculator.CalcDescriptors(mol) for mol in list_of_molecules])\n\t\tmy_logger.debug(\"rdkit calculated\")\n\n\t\tmordred_desc_for_posted_list_of_mols_df = mordred_desc_calculator.pandas(list_of_molecules)\n\t\tmy_logger.debug(\"mordred1\")\n\t\tmordred_desc_for_posted_list_of_mols_df = mordred_desc_for_posted_list_of_mols_df.drop([\"SpAbs_Dt\", \"SpMax_Dt\", \"SpDiam_Dt\", \"SpAD_Dt\", \"SpMAD_Dt\", \"LogEE_Dt\", \n\t \"SM1_Dt\", \"VE1_Dt\", \"VE2_Dt\", \"VE3_Dt\", \"VR1_Dt\", \"VR2_Dt\", \"VR3_Dt\", \n\t \"DetourIndex\"], axis = 1)\n\t\tmy_logger.debug(\"mordred2\")\n\t\tmordred_desc_for_posted_list_of_mols_df = mordred_desc_for_posted_list_of_mols_df.apply(pd.to_numeric, errors='coerce').fillna(0)\n\t\tmy_logger.debug(\"mordred calculated\")\n\n\t\t# and this\n\t\tmordred_desc_for_posted_list_of_mols = mordred_desc_for_posted_list_of_mols_df.values\n\t\tmy_logger.debug(\"mordred values passed\")\n\n\t\t# print(\"\\n\\n\\nThis happens before return\\n\\n\\n\")\n\n\t\t# print(rdkit_366_random_logreg.predict_proba(rdkit_desc_for_posted_list_of_mols)[:,-1]) # being active\n\t\t# print(\"\\n\\n\")\n\t\t# print(rdkit_366_random_svm.predict(rdkit_desc_for_posted_list_of_mols)) # \n\n\t\treturn {\n\t\t\t\"rdkit_366_random_logreg\": \",\".join([str(x) for x in rdkit_366_random_logreg.predict_proba(rdkit_desc_for_posted_list_of_mols)[:,-1]]),\n\t\t\t\"rdkit_366_random_rf\": \",\".join([str(x) for x in rdkit_366_random_rf.predict_proba(rdkit_desc_for_posted_list_of_mols)[:,-1]]),\n\t\t\t\"mordred_366_random_logreg\": \",\".join([str(x) for x in mordred_366_random_logreg.predict_proba(mordred_desc_for_posted_list_of_mols)[:,-1]]),\n\t\t\t\"mordred_366_random_rf\": \",\".join([str(x) for x in mordred_366_random_rf.predict_proba(mordred_desc_for_posted_list_of_mols)[:,-1]]),\n\t\t\t\"rdkit_autoencoder_logreg\": \",\".join([str(x) for x in rdkit_autoencoder_logreg.predict_proba(rdkit_desc_for_posted_list_of_mols)[:,-1]]),\n\t\t\t\"rdkit_autoencoder_rf\": \",\".join([str(x) for x in rdkit_autoencoder_rf.predict_proba(rdkit_desc_for_posted_list_of_mols)[:,-1]]),\n\t\t\t\"mordred_autoencoder_logreg\": \",\".join([str(x) for x in mordred_autoencoder_logreg.predict_proba(mordred_desc_for_posted_list_of_mols)[:,-1]]),\n\t\t\t\"mordred_autoencoder_rf\": \",\".join([str(x) for x in mordred_autoencoder_rf.predict_proba(mordred_desc_for_posted_list_of_mols)[:,-1]]),\n\t\t}\n\texcept:\n\t\tmy_logger.debug(\"inside except\")\n\t\treturn {\n\t\t\t\"error_code\": \"1\"\n\t\t}",
"def derivatives(self):\n self.rdot = self.v\n self.vdot[:,:] = 0.0\n \n for nl in self.nlists: \n nl.separations()\n \n for force in self.forces:\n force.apply()\n\n # Controllers is the new implementation of forces\n for controller in self.controllers:\n controller.apply()",
"def _calcDerivs(self, seq):\n self.module.reset()\n for sample in seq:\n self.module.activate(sample[0])\n error = 0\n ponderation = 0.\n for offset, sample in reversed(list(enumerate(seq))):\n # need to make a distinction here between datasets containing\n # importance, and others\n target = sample[1]\n outerr = target - self.module.outputbuffer[offset]\n if len(sample) > 2:\n importance = sample[2]\n error += 0.5 * dot(importance, outerr ** 2)\n ponderation += sum(importance)\n self.module.backActivate(outerr * importance)\n else:\n error += 0.5 * sum(outerr ** 2)\n ponderation += len(target)\n # FIXME: the next line keeps arac from producing NaNs. I don't\n # know why that is, but somehow the __str__ method of the\n # ndarray class fixes something,\n str(outerr)\n self.module.backActivate(outerr)\n\n return error, ponderation",
"def derivatives(self):\n self.rdot = self.v\n self.vdot[:,:] = 0.0\n self.udot[:] = 0.0\n\n t = time()\n for nl in self.nlists: \n nl.separations()\n #nl.apply_minimum_image()\n self.timing['pairsep time'] = (time() - t)\n\n t = time()\n if SPROPS:\n properties.spam_properties(self,self.nl_default \\\n ,self.h[0:self.n],self.hlr[0:self.n])\n self.timing['SPAM time'] = time() - t\n \n t = time()\n for force in self.forces:\n force.apply()\n self.timing['force time'] = time() - t\n \n if ADVECTIVE:\n self.rdot[:,:] = 0.0",
"def _derivatives(self, state, forces_moments):\n # extract the states\n pn = state[0]\n pe = state[1]\n pd = state[2]\n e0 = state[3]\n e1 = state[4]\n e2 = state[5]\n e3 = state[6]\n u = state[7]\n v = state[8]\n w = state[9]\n # state[6:10] = normalize(state[6:10])\n p = state[10]\n q = state[11]\n r = state[12]\n # extract forces/moments\n fx = forces_moments[0]\n fy = forces_moments[1]\n fz = forces_moments[2]\n l = forces_moments[3]\n m = forces_moments[4]\n n = forces_moments[5]\n\n\n # with warnings.catch_warnings():\n # warnings.filterwarnings('error')\n # try:\n # # position kinematics\n # except Warning as e:\n # pdb.set_trace()\n # print(e)\n\n pn_dot = (e1**2+e0**2-e2**2-e3**2)*u + 2*(e1*e2-e3*e0)*v + 2*(e1*e3+e2*e0)*w\n pe_dot = 2*(e1*e2+e3*e0)*u + (e2**2+e0**2-e1**2-e3**2)*v + 2*(e2*e3-e1*e0)*w\n pd_dot = 2*(e1*e3-e2*e0)*u + 2*(e2*e3+e1*e0)*v + (e3**2+e0**2-e1**2-e2**2)*w\n\n # pn_dot = (e0**2+e1**2-e2**2-e3**2)*u + 2*(e1*e2+e3*e0)*v + 2*(e1*e3-e2*e0)*w\n # pe_dot = 2*(e1*e2-e3*e0)*u + (e0**2-e1**2+e2**2-e3**2)*v + 2*(e2*e3+e1*e0)*w\n # pd_dot = 2*(e1*e3+e2*e0)*u + 2*(e2*e3-e1*e0)*v + (e0**2-e1**2-e2**2+e3**2)*w\n\n # pdb.set_trace()\n\n # position dynamics\n mass = self.mass\n u_dot = (r*v-q*w)+fx/mass\n v_dot = (p*w-r*u)+fy/mass\n w_dot = (q*u-p*v)+fz/mass\n\n # rotational kinematics\n e0_dot = 0.5*(-p*e1-q*e2-r*e3)\n e1_dot = 0.5*(p*e0+r*e2-q*e3)\n e2_dot = 0.5*(q*e0-r*e1+p*e3)\n e3_dot = 0.5*(r*e0+q*e1-p*e2)\n\n # rotatonal dynamics\n p_dot = self.gamma1*p*q - self.gamma2*q*r + self.gamma3*l + self.gamma4*n\n q_dot = self.gamma5*p*r - self.gamma6*(p**2-r**2) + m/self.Jy\n r_dot = self.gamma7*p*q - self.gamma1*q*r + self.gamma4*l + self.gamma8*n\n\n # collect the derivative of the states\n x_dot = np.array([pn_dot, pe_dot, pd_dot, e0_dot, e1_dot, e2_dot, e3_dot,\n u_dot, v_dot, w_dot, p_dot, q_dot, r_dot])\n # pdb.set_trace()\n\n\n # print(x_dot)\n return x_dot",
"def bond_dist_delta(ase_mol1, ase_mol2):\n #convert to molmod\n mol1 = to_molmod(ase_mol1)\n mol2 = to_molmod(ase_mol2)\n\n #get bond distances between neighbouring carbon atoms\n mol1_bdists_inds = bond_distances_v2(mol1)\n #seperate the bond distances and the atom indices the bonds correspond to\n #nb indexes are python_like so start at zero programs (e.g. pyMol/Avogadro) often number atoms starting at 1\n mol1_bdists, mol1_inds = zip(*mol1_bdists_inds)\n\n mol2_bdists_inds = bond_distances_v2(mol2, bonds=mol1_inds)\n mol2_bdists, mol2_inds = zip(*mol2_bdists_inds)\n\n if mol1_inds != mol2_inds:\n raise RuntimeError('Comparison of bond distances for different molecules not yet implemented')\n\n mol1_bdists = np.array(mol1_bdists)\n mol2_bdists = np.array(mol2_bdists)\n\n delta_bdists = mol1_bdists - mol2_bdists\n return np.array([mol1_inds, delta_bdists])",
"def bond_distances_v2(molmod_atoms, bonds=None, ignored_elements=None):\n if not ignored_elements:\n ignored_elements = []\n\n m=molmod_atoms\n\n if not bonds:\n bonds = m.graph.edges\n\n bond_dists = []\n indices = []\n\n for ind1, ind2 in bonds:\n if not m.symbols[ind1] in ignored_elements and not m.symbols[ind2] in ignored_elements:\n bond_dists.append(m.distance_matrix[ind1,ind2]/molmod.angstrom)\n indices.append((ind1, ind2))\n\n #we sort by bond index so that comparison between two bdist_inds objects is possible (without sorting we can get variation in the order)\n bdist_inds = zip(bond_dists, indices)\n bdist_inds.sort(key=lambda e: e[1])\n\n return bdist_inds",
"def _derivatives(self, state, forces_moments):\n # extract the states\n pn = state.item(0)\n pe = state.item(1)\n pd = state.item(2)\n u = state.item(3)\n v = state.item(4)\n w = state.item(5)\n e0 = state.item(6)\n e1 = state.item(7)\n e2 = state.item(8)\n e3 = state.item(9)\n p = state.item(10)\n q = state.item(11)\n r = state.item(12)\n # extract forces/moments\n fx = forces_moments.item(0)\n fy = forces_moments.item(1)\n fz = forces_moments.item(2)\n l = forces_moments.item(3)\n m = forces_moments.item(4)\n n = forces_moments.item(5)\n\n # position kinematics\n pn_dot =\n pe_dot =\n pd_dot =\n\n # position dynamics\n u_dot =\n v_dot =\n w_dot =\n\n # rotational kinematics\n e0_dot =\n e1_dot =\n e2_dot =\n e3_dot =\n\n # rotatonal dynamics\n p_dot =\n q_dot =\n r_dot = \n\n # collect the derivative of the states\n x_dot = np.array([[pn_dot, pe_dot, pd_dot, u_dot, v_dot, w_dot,\n e0_dot, e1_dot, e2_dot, e3_dot, p_dot, q_dot, r_dot]]).T\n return x_dot",
"def differential_coding(blocks: np.ndarray):\n dc_comps = [transform.dc_component(b) for b in blocks]\n return utils.differences(dc_comps)",
"def compute_hydration_energies(molecules, parameters):\n\n energies = dict() # energies[index] is the computed solvation energy of molecules[index]\n\n platform = openmm.Platform.getPlatformByName(\"Reference\")\n\n for molecule in molecules:\n # Create OpenMM System.\n system = openmm.System()\n for atom in molecule.GetAtoms():\n mass = OEGetDefaultMass(atom.GetAtomicNum())\n system.addParticle(mass * units.amu)\n\n # Add nonbonded term.\n # nonbonded_force = openmm.NonbondedSoftcoreForce()\n # nonbonded_force.setNonbondedMethod(openmm.NonbondedForce.NoCutoff)\n # for atom in molecule.GetAtoms():\n # charge = 0.0 * units.elementary_charge\n # sigma = 1.0 * units.angstrom\n # epsilon = 0.0 * units.kilocalories_per_mole\n # nonbonded_force.addParticle(charge, sigma, epsilon)\n # system.addForce(nonbonded_force)\n\n # Add GBVI term\n # gbvi_force = openmm.GBVISoftcoreForce()\n gbvi_force = openmm.GBVIForce() \n gbvi_force.setNonbondedMethod(openmm.GBVIForce.NoCutoff) # set no cutoff\n gbvi_force.setSoluteDielectric(1)\n gbvi_force.setSolventDielectric(78)\n\n # Use scaling method.\n # gbvi_force.setBornRadiusScalingMethod(openmm.GBVISoftcoreForce.QuinticSpline)\n # gbvi_force.setQuinticLowerLimitFactor(0.75)\n # gbvi_force.setQuinticUpperBornRadiusLimit(50.0*units.nanometers)\n\n # Build indexable list of atoms.\n atoms = [atom for atom in molecule.GetAtoms()] \n \n # Assign GB/VI parameters.\n for atom in molecule.GetAtoms(): \n atomtype = atom.GetStringData(\"gbvi_type\") # GBVI atomtype\n charge = atom.GetPartialCharge() * units.elementary_charge\n radius = parameters['%s_%s' % (atomtype, 'radius')] * units.angstroms\n gamma = parameters['%s_%s' % (atomtype, 'gamma')] * units.kilocalories_per_mole \n # gamma *= -1.0 # DEBUG\n lambda_ = 1.0 # fully interacting\n # gbvi_force.addParticle(charge, radius, gamma, lambda_) # for GBVISoftcoreForce\n gbvi_force.addParticle(charge, radius, gamma) # for GBVIForce\n\n # Add bonds.\n for bond in molecule.GetBonds():\n # Get atom indices.\n iatom = bond.GetBgnIdx()\n jatom = bond.GetEndIdx()\n # Get bond length.\n (xi, yi, zi) = molecule.GetCoords(atoms[iatom])\n (xj, yj, zj) = molecule.GetCoords(atoms[jatom])\n distance = math.sqrt((xi-xj)**2 + (yi-yj)**2 + (zi-zj)**2) * units.angstroms\n # Identify bonded atoms to GBVI.\n gbvi_force.addBond(iatom, jatom, distance)\n\n # Add the force to the system.\n system.addForce(gbvi_force)\n \n # Build coordinate array.\n natoms = len(atoms)\n coordinates = units.Quantity(numpy.zeros([natoms, 3]), units.angstroms)\n for (index,atom) in enumerate(atoms):\n (x,y,z) = molecule.GetCoords(atom)\n coordinates[index,:] = units.Quantity(numpy.array([x,y,z]),units.angstroms) \n \n # Create OpenMM Context.\n timestep = 1.0 * units.femtosecond # arbitrary\n integrator = openmm.VerletIntegrator(timestep)\n context = openmm.Context(system, integrator, platform)\n\n # Set the coordinates.\n context.setPositions(coordinates)\n \n # Get the energy\n state = context.getState(getEnergy=True)\n energies[molecule] = state.getPotentialEnergy()\n\n return energies",
"def predict(self, X):\n N, D = X.shape\n\n # init prediction array\n prediction = np.array([-1] * N)\n\n # retrieve the probability of predicting fraud for each model (K models)\n predict_proba_fraud = [-1] * self.K\n\n # we do the computation for all input test examples\n for i, instance in enumerate(X):\n sum_weight = 0\n F_k = 0\n\n # for k in= {1,2.....K} do\n k = -1\n for model in self.models.islice(start=0, stop=self.K, reverse=True):\n k += 1\n clf = model.clf\n sum_weight += model.weight\n\n # (1) compute the corresponding Fk(x)\n # compute one part of Fk(y) with the weights (be careful: sum_weight may be 0)\n F_k = (F_k * sum_weight) / sum_weight if sum_weight != 0 else 0\n\n # if the probability is not initialized we call the predict proba method\n if (type(predict_proba_fraud[k]) is int and predict_proba_fraud[k] == -1) \\\n or (predict_proba_fraud[k].shape[0] != self.S):\n predict_proba_fraud[k] = clf.predict_proba(self.X_chunk)\n\n # if we don't have the probability of predicting fraud --> p = 0, do nothing\n if len(predict_proba_fraud[k][i]) == 2:\n F_k += (model.weight * predict_proba_fraud[k][i][1]) / sum_weight\n\n # (2) we assign Fk value to a bin j\n t_y = instance[-1] # amount of the transaction (in the last column of the features)\n found = False # found: if a label has been decided (deal with 2 for's break)\n j = 0\n eps = len(self.bins)\n\n # while we haven't found the bin AND no prediction has not yet been given\n while j < eps and not found:\n stat = self.bins[j][k]\n\n # find the bin i y belongs to\n if (j / eps) <= F_k < ((j + 1) / eps):\n # (3) apply rule (10) for this bin (What if the amount is 0 ?)\n if t_y != 0:\n if F_k - stat['mean'] - self.t * stat['var'] > (self.cost / t_y): # FRAUD\n found = True\n prediction[i] = 1\n elif F_k + stat['mean'] + self.t * stat['var'] <= (self.cost / t_y): # NON-FRAUD\n found = True\n prediction[i] = 0\n else:\n found = True\n prediction[i] = 0\n\n j = j + 1\n\n if found: # if we found a value we go to the next example\n break\n\n # (4) if no classifier left i.e. we have consulted every classifier without having an answer\n # --> prediction[i] is not yet given\n if prediction[i] == -1:\n if instance[-1] != 0 and F_k > self.cost / instance[-1]: # instance[-1] is just t(y)\n prediction[i] = 1\n else:\n prediction[i] = 0\n\n return prediction",
"def test_assign_fractional_bond_orders(self):\n # TODO: Test only one molecule for speed?\n # TODO: Do we need to deepcopy each molecule, or is setUp called separately for each test method?\n\n # Do not modify the original molecules.\n molecules = copy.deepcopy(mini_drug_bank())\n\n toolkits_to_bondorder_method = {\n (OpenEyeToolkitWrapper,): [\"am1-wiberg\", \"pm3-wiberg\"]\n }\n # Don't test AmberTools here since it takes too long\n # (AmberToolsToolkitWrapper, RDKitToolkitWrapper):['am1-wiberg']}\n for toolkits in list(toolkits_to_bondorder_method.keys()):\n toolkit_registry = ToolkitRegistry(toolkit_precedence=toolkits)\n for bond_order_model in toolkits_to_bondorder_method[toolkits]:\n for molecule in molecules[\n :5\n ]: # Just test first five molecules for speed\n molecule.generate_conformers(toolkit_registry=toolkit_registry)\n molecule.assign_fractional_bond_orders(\n bond_order_model=bond_order_model,\n toolkit_registry=toolkit_registry,\n use_conformers=molecule.conformers,\n )\n # fbo1 = [bond.fractional_bond_order for bond in molecule.bonds]\n # TODO: Now that the assign_fractional_bond_orders function takes more kwargs,\n # how can we meaningfully cache its results?\n # # Call should be faster the second time due to caching\n # molecule.assign_fractional_bond_orders(bond_order_model=bond_order_model,\n # toolkit_registry=toolkit_registry)\n # fbo2 = [bond.fractional_bond_order for bond in molecule.bonds]\n # np.testing.assert_allclose(fbo1, fbo2, atol=1.e-4)",
"def test_assign_fractional_bond_orders(self):\n # TODO: Test only one molecule for speed?\n # TODO: Do we need to deepcopy each molecule, or is setUp called separately for each test method?\n\n # Do not modify the original molecules.\n molecules = copy.deepcopy(mini_drug_bank())\n\n toolkits_to_bondorder_method = {\n (OpenEyeToolkitWrapper,): [\"am1-wiberg\", \"pm3-wiberg\"]\n }\n # Don't test AmberTools here since it takes too long\n # (AmberToolsToolkitWrapper, RDKitToolkitWrapper):['am1-wiberg']}\n for toolkits in list(toolkits_to_bondorder_method.keys()):\n toolkit_registry = ToolkitRegistry(toolkit_precedence=toolkits)\n for bond_order_model in toolkits_to_bondorder_method[toolkits]:\n for molecule in molecules[\n :5\n ]: # Just test first five molecules for speed\n molecule.generate_conformers(toolkit_registry=toolkit_registry)\n molecule.assign_fractional_bond_orders(\n bond_order_model=bond_order_model,\n toolkit_registry=toolkit_registry,\n use_conformers=molecule.conformers,\n )\n fbo1 = [bond.fractional_bond_order for bond in molecule.bonds]\n # TODO: Now that the assign_fractional_bond_orders function takes more kwargs,\n # how can we meaningfully cache its results?\n # # Call should be faster the second time due to caching\n # molecule.assign_fractional_bond_orders(bond_order_model=bond_order_model,\n # toolkit_registry=toolkit_registry)\n # fbo2 = [bond.fractional_bond_order for bond in molecule.bonds]\n # np.testing.assert_allclose(fbo1, fbo2, atol=1.e-4)",
"def n1derivative_clbb(cl_array,bins,n1bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n a=compute_n1_py(clpp,norms,cls,cltt,clee,array1001[i],clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n b=compute_n1_py(clpp,norms,cls,cltt,clee,array999[i],clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n \n \n \n keys=['TT','EE','EB','TE','TB']\n \n derlist=[]\n for k in range(len(keys)):\n diff=[n1bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n1bins)]-N0999[k][i][:len(n1bins)])*(n1bins*(n1bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n1{}dclbb.txt'.format(keys[k]),der)\n return derlist",
"def n0derivative_clbb(cl_array,bins,n0bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n a=compute_n1_py(clpp,cls,cltt,clee,array1001[i],clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lmin_out,Lstep)\n b=compute_n1_py(clpp,cls,cltt,clee,array999[i],clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lmin_out,Lstep)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n \n \n \n keys=['TT','EE','EB','TE','TB']\n \n derlist=[]\n for k in range(len(keys)):\n diff=[n0bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n0bins)]-N0999[k][i][:len(n0bins)])*(n0bins*(n0bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n0{}dclbb.txt'.format(keys[k]),der)\n return derlist",
"def compute_bonding(molecule):\n bonder = SimpleBondPerceiver()\n bonder.SetInputData(molecule)\n bonder.SetTolerance(0.1)\n bonder.Update()\n deep_copy_molecule(molecule, bonder.GetOutput())",
"def compute_derivs_matrices(vecs, adv_vecs, dt):\n return (adv_vecs - vecs)/(1.*dt)",
"def gather_derivatives(self):\n self.xdot[0,0:self.n] = self.mdot[0:self.n] \n self.xdot[1,0:self.n] = self.rdot[0:self.n,0]\n self.xdot[2,0:self.n] = self.rdot[0:self.n,1]\n self.xdot[3,0:self.n] = self.rdot[0:self.n,2]\n self.xdot[4,0:self.n] = self.vdot[0:self.n,0]\n self.xdot[5,0:self.n] = self.vdot[0:self.n,1]\n self.xdot[6,0:self.n] = self.vdot[0:self.n,2]\n self.xdot[7,0:self.n] = self.rhodot[0:self.n] \n self.xdot[8,0:self.n] = 0\n self.xdot[9,0:self.n] = 0\n self.xdot[10,0:self.n] = self.udot[0:self.n]\n return self.xdot",
"def getBondVectors(struct,tol,prec): \n \n \n binary_matrix= getDistMat(struct,tol)\n bond_dir = {}\n distance_matrix = struct.distance_matrix\n lattice = np.array(struct.lattice.as_dict()['matrix'])\n iterations = list(itertools.product([1,0,-1],repeat=3))\n # Loop over list of atoms\n for i in range(len(binary_matrix)):\n for j in range(i+1,len(binary_matrix)):\n # Proceed if the entries are listed as \"bonded\" \n if binary_matrix[i][j]==1: \n s1 = struct.species[i]\n s2 = struct.species[j]\n # Organize dictionary so it is always in order of increasing\n # atomic number\n if s1.number>s2.number:\n s1 = struct.species[j]\n s2 = struct.species[i] \n if s1 not in bond_dir:\n bond_dir[s1]={}\n if s2 not in bond_dir[s1]:\n bond_dir[s1][s2]=[]\n valid_vs = []\n \n # Get the vector between atomic positions\n \n bond_vector = np.array(struct.sites[j].coords-\n struct.sites[i].coords) \n \n # The positions of the atoms may not be in the right locations\n # to be the minimum distance from each other. As a result,\n # a translation is applied to the resulting \"bond vector\" \n # (alternatively, one of the atoms is translated)\n for shift in iterations:\n bondShift = bond_vector + np.dot(lattice.T,shift)\n if abs(distance_matrix[i][j]-magni(bondShift))<=prec:\n valid_vs.append(bondShift)\n break\n # See if the vector is already present in the collection of \n # vectors. If so, add the coordinates to the entry. Else,\n # create a new entry for the direction of the bond.\n for v in valid_vs:\n if np.any([magni(v-x[0])<=prec for x in bond_dir[s1][s2]]):\n for k in range(len(bond_dir[s1][s2])):\n if magni(v-bond_dir[s1][s2][k][0])<=prec:\n bond_dir[s1][s2][k][1].append([i,j])\n break\n \n else:\n bond_dir[s1][s2].append([v,[[i,j]]])\n return(bond_dir)",
"def gather_derivatives(self):\n self.xdot[0,0:self.n] = self.mdot[0:self.n] \n self.xdot[1,0:self.n] = self.rdot[0:self.n,0]\n self.xdot[2,0:self.n] = self.rdot[0:self.n,1]\n self.xdot[3,0:self.n] = self.rdot[0:self.n,2]\n self.xdot[4,0:self.n] = self.vdot[0:self.n,0]\n self.xdot[5,0:self.n] = self.vdot[0:self.n,1]\n self.xdot[6,0:self.n] = self.vdot[0:self.n,2]\n return self.xdot",
"def extract_bonds(self):\n atom_types = self.contents['Sub_ID']\n atom_ids = self.contents['ID']\n bond_list = []\n for key, value in self.bonds.items():\n a = value[0]\n b = value[1]\n\n A = np.asarray(atom_types).reshape(-1, 3)\n B = np.asarray(atom_ids).reshape(-1, 3)\n\n D = np.where(A == a, B, np.nan)\n E = np.where(A == b, B, np.nan)\n\n D = D[:, ~np.all(np.isnan(D), axis=0)]\n E = E[:, ~np.all(np.isnan(E), axis=0)]\n\n D_ = np.tile(D, (1, E.shape[1]))\n E_ = np.repeat(E, D.shape[1], axis=1)\n\n F = np.asarray([D_, E_]).T\n\n idd = np.ones((F.shape[1], F.shape[0])) * key\n # g = np.arange(1, )\n fi = np.arange(F.shape[1])\n iff = np.repeat(fi[:,np.newaxis], 2, axis=1)\n\n concate = np.concatenate((iff[:,:,np.newaxis], idd[:,:,np.newaxis], F.swapaxes(0, 1)), axis=-1)\n concate = concate.reshape(-1, 4)\n df = pd.DataFrame(data=concate, columns=['Mol_ID', 'Bond_type', 'Atom_1', 'Atom_2'])\n bond_list.append(df)\n self.bond_df = pd.concat(bond_list)\n self.num_bonds = len(self.bond_df)",
"def _computeDerivative(self,angles, distances):\n slope=[]\n slope.append(0)\n for i in xrange(1,len(angles)):\n der = (distances[i]-distances[i-1])/(angles[i]-angles[i-1])\n slope.append(der)\n #slope.append(0)\n return slope",
"def iter_bonds(self):\n for bond in self.bond_list:\n yield bond",
"def deredden_cepheids(df_variables):\n extinction_coefficients = {'2365-2764-1': np.array([0.2622, 0.844]), '4109-638-1': np.array([0.0524, 0.1576]),\n '2058-56-1': np.array([0.0751, 0.248]), '3642-2459-1': np.array([0.1907, 0.608]),\n '3999-1391-1': np.array([0.3911, 1.2480]), '2607-1448-1': np.array([0.0430, 0.1310])}\n print \"Dereddening Cepheids:\"\n for tyc in extinction_coefficients.keys():\n print \"%s..\" % tyc\n b_minus_v = df_variables[df_variables.tycho2_id == tyc].B_V\n m_v = df_variables[df_variables.tycho2_id == tyc].M_V\n extinc = extinction_coefficients[tyc]\n df_variables.set_value(df_variables.tycho2_id == tyc, 'B_V', b_minus_v - extinc[0])\n df_variables.set_value(df_variables.tycho2_id == tyc, 'M_V', m_v - extinc[1])\n print \"..Done\\n----------\"\n\n return df_variables",
"def get_bessel_derivative(self):\n return np.array([t.der_bessel for t in self._trc])",
"def n0derivative_clee(cl_array,bins,n0bins,clpp,norms,cls,cltt,clee,clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lstep,Lmin_out):\n \n bins=bins-2\n array1001=perturbe_clist(cl_array,bins,1.001)\n array999=perturbe_clist(cl_array,bins,0.999)\n \n N1001=[[],[],[],[],[]] #list of lists containing tt,ee,eb,te,tb\n N0999=[[],[],[],[],[]]\n \n for i in range(len(array1001)):\n print(i)\n a=compute_n0_py(clpp,cls,cltt,array1001[i],clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lmin_out,Lstep)\n b=compute_n0_py(clpp,cls,cltt,array999[i],clbb,clte,NOISE_LEVEL,polnoise,lmin,LMAXOUT,LMAX_TT,Lmin_out,Lstep)\n for j in range(len(N1001)):\n N1001[j].append(a[j])\n N0999[j].append(b[j])\n\n delta=diff_cl(cl_array,bins)\n\n \n keys=['TT','EE','EB','TE','TB']\n \n derlist=[]\n for k in range(len(keys)):\n diff=[n0bins]\n for i in range(len(N1001[1])):\n der=((N1001[k][i][:len(n0bins)]-N0999[k][i][:len(n0bins)])*(n0bins*(n0bins+1))**2*0.25)/delta[i]\n diff.append(der)\n der=np.insert(np.transpose(diff),0,np.insert(bins,0,0),axis=0)\n derlist.append(der)\n np.savetxt('../data/n0{}dclee.txt'.format(keys[k]),der)\n print(derlist)\n return derlist",
"def Decoupler(data,decoupled_name,list_to_decouple=None,decimals=False):\n list_dec = parameters.outputs if list_to_decouple is None else copy.copy(list_to_decouple)\n # Get the arrays of mH, mA ordered as in the outputs\n list_rest = [i for i in data.columns if i not in list_dec] # All but outputs\n n_weights = len(list_dec)\n mHmA = np.empty((0,2))\n for ol in list_dec:\n if decimals:\n arr = np.array([[float(re.findall(r\"\\d*\\.\\d+|\\d+\", ol)[0]),float(re.findall(r\"\\d*\\.\\d+|\\d+\", ol)[1])]])\n else:\n arr = np.array([[int(re.findall(r'_\\d+', ol)[0].replace('_','')),int(re.findall(r'_\\d+', ol)[1].replace('_',''))]])\n mHmA = np.append(mHmA,arr,axis=0)\n # Get the numpy arrays #\n decouple = data[list_dec].values\n repeat = data[list_rest].values\n\n # Repeat and decouple #\n repeat = Repeater(repeat,n_weights)\n masses = np.tile(mHmA,(data.shape[0],1))\n decouple = decouple.flatten()\n\n # Concatenate and make DF #\n new_arr = np.c_[repeat,masses,decouple]\n df = pd.DataFrame(new_arr,columns=list_rest+['mH_MEM','mA_MEM',decoupled_name])\n\n return df",
"def identify_bonds(chosen_atom, atom_list):\n list_of_hydrogens = ['H15', 'H14', 'H13', 'H12', 'H11', 'H10', 'H9', 'H8', 'H7', 'H6', 'H5', 'H4', 'H3', 'H2', 'H1'] \n if ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name != \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 2)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n elif ((chosen_atom.atom_name not in list_of_hydrogens) and (chosen_atom.residue_name == \"P1A\")):\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 2) and (abs(chosen_atom.y - atom.y) <= 2) and (abs(chosen_atom.z - atom.z) <= 2))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.8)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n else:\n nearby_atoms_crude = [atom for atom in atom_list if ((abs(chosen_atom.x - atom.x) <= 1.6) and (abs(chosen_atom.y - atom.y) <= 1.6) and (abs(chosen_atom.z - atom.z) <= 1.6))]\n nearby_atoms = [atom for atom in nearby_atoms_crude if (0 < calculate_3D_distance_2_atoms(chosen_atom,atom) <= 1.6)]\n identified_bonds = [[atom, calculate_3D_distance_2_atoms(chosen_atom, atom)] for atom in nearby_atoms if (check_bond(chosen_atom, atom) == True)] \n for elements in nearby_atoms:\n if (check_if_no_bond(chosen_atom, elements, bond_list, bond_list_3) == True):\n nearby_atoms.remove(elements)\n if (len(nearby_atoms) == len(identified_bonds)):\n return identified_bonds\n else:\n return []",
"def _evalAndDer(self, x):\n m = len(x)\n fx = np.zeros((m, self.funcCount))\n for j in range(self.funcCount):\n fx[:, j] = self.functions[j](x)\n i = self.argcompare(fx, axis=1)\n y = fx[np.arange(m), i]\n dydx = np.zeros_like(y)\n for j in range(self.funcCount):\n c = i == j\n dydx[c] = self.functions[j].derivative(x[c])\n return y, dydx"
] | [
"0.6377305",
"0.57994026",
"0.56501853",
"0.52041197",
"0.5169816",
"0.5126491",
"0.51183426",
"0.51093304",
"0.50972986",
"0.50279194",
"0.5017863",
"0.49950963",
"0.49750766",
"0.4968196",
"0.49513084",
"0.4942393",
"0.49264514",
"0.49236074",
"0.4922351",
"0.4922289",
"0.49147126",
"0.49139398",
"0.4909021",
"0.4896904",
"0.4887673",
"0.48779267",
"0.4876267",
"0.48718554",
"0.4859709",
"0.4854957"
] | 0.58677953 | 1 |
This function makes a list of all the songs in album | def songs_list(name_of_album):
songs = ""
data = dbase()
data = data[name_of_album][0]
for song in data.keys():
songs += song
songs += ", "
return songs[:-2] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def simple_songs_list(name_of_album):\r\n songs = []\r\n data1 = dbase()\r\n data1 = data1[name_of_album][0]\r\n for song in data1.keys():\r\n songs += [song]\r\n return songs",
"def album_list(self):\n\n artist_id = self.addon_args[\"artist_id\"][0]\n\n xbmcplugin.setContent(self.addon_handle, \"albums\")\n\n for album in self.connection.walk_artist(artist_id):\n self.add_album(album)\n\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_UNSORTED)\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_ALBUM)\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_ARTIST)\n xbmcplugin.addSortMethod(\n self.addon_handle, xbmcplugin.SORT_METHOD_VIDEO_YEAR)\n\n xbmcplugin.endOfDirectory(self.addon_handle)",
"def get_album_list():\n\n # TODO: Paginate this, etc\n entities = PhotoAlbum.query().order(-PhotoAlbum.title).fetch(1000)\n\n return entities",
"def simple_album_list():\r\n album_list = []\r\n data = dbase()\r\n for album in data.keys():\r\n album_list += [album]\r\n return album_list",
"def get_albums(self):\n self.artist = self.artists_list.currentText()\n self.c_albums = [x['album'] for x in dmlc.list_albums(self.artist)\n if [x['album'] in self.albums_map[self.artist]]]\n self.albums_list.clear()\n self.albums_list.addItems(self.c_albums)\n self.update_navigation_buttons()",
"def get_songs_by_album(self, album_id):\n return self.__get('song', album_id)",
"def get_album_songs(self, album_id):\n url = get_album_url(album_id)\n result = self.get_request(url)\n\n return result['album']['songs']",
"def get_albums():\n return query_multiple(request.args, album_search, \\\n album_filter, Album, albums_schema)",
"def search_for_album(album_name):\n\n print(f'Searching for album: {album_name}')\n\n search_result = spotifyObject.search(q=f'\"{album_name}\"', limit=20, type='album')\n\n items = search_result['albums']['items']\n\n results = []\n\n for item in items:\n if len(item['artists']) > 1:\n artists = tuple(art['name'] for art in item['artists'])\n else:\n artists = item['artists'][0]['name']\n\n results.append((artists, item['name'], item['id']))\n\n return results",
"def get_all_songs():\r\n return [Song.song_json(song) for song in Song.query.all()]",
"def read_album_tracks(id, artist_name, album_name):\n list_a = [x.name for x in dmla.list_tracks(id)]\n list_c = [x['title'] for x in dmlc.list_tracks_for_album(artist_name, album_name)\n if x['track'] != -1]\n return list_a, list_c",
"def get_albums(playlist_name):\n\n playlist_id = find_playlist(playlist_name)\n \n items = get_playlist_tracks(playlist_id=playlist_id)\n \n track_values = []\n \n for item in items:\n track = item['track']\n album = track['album']\n artists = tuple(artist['name'] for artist in album['artists'])\n \n track_values.append((album['name'], artists[0]))\n \n album_details = namedtuple('AlbumDetails', 'album artist')\n \n for tup in dict.fromkeys(track_values):\n yield album_details(*tup)",
"def getSongsFromAlbum(albumLink):\n albumLink = str(albumLink)\n try:\n html = urllib.request.urlopen(albumLink).read()\n soup = bs4.BeautifulSoup(html, 'html.parser')\n table = soup.findAll(\"a\")[5:]\n songLinks = []\n for entry in table:\n text = str(re.findall(\"\\\".*\\\"\", str(entry)))\n text = re.sub(\"[\\]\\['\\\"]\", \"\", text)\n link = albumLink + str(text)\n songLinks.append(link)\n except:\n return []\n return songLinks",
"def get_albums(self):\n artist = self.get_request_arg(\"artist\")\n if artist:\n lib = self.ctrl.library\n lst = sorted(self.ctrl.library.get_albums(artist))\n albums = [{\"artist\": artist,\n \"album\": album,\n \"path\": lib.get_path(artist, album)} for album in lst]\n if lst:\n return self.resp_from_data(albums)\n return self.resp_from_data(\n {\"message\": f\"No album found for artist={artist}\"}, 400)",
"def get_albums_alpha(session_):\n artists = session_.query(Album).order_by(Album.title.asc()).all()\n return artists",
"def get_album_tracks(self):\n track_list = self.soup.findAll('div', class_='chart_row')\n number_of_tracks = 0\n titles = []\n urls = []\n track_numbers = []\n \n for track in track_list:\n track_title = re.sub(' Lyrics', '', \" \".join(track.h3.text.split()))\n lyrics_url = track.a['href']\n track_number = track.span.span.text.strip()\n \n if track_number == '':\n # Sometimes there are additional urls that are not a song's lyrics. Skip these.\n continue\n else:\n track_number = int(track_number)\n \n number_of_tracks += 1\n titles.append(track_title)\n urls.append(lyrics_url)\n track_numbers.append(track_number)\n \n if self.song_order:\n # Check that order values are okay.\n for number in self.song_order:\n if number > number_of_tracks:\n raise SongOrderValueError(f'Track number given ({number}) exceeds number of tracks ({number_of_tracks})')\n \n for title, url, number in zip(titles, urls, track_numbers):\n if self.song_order:\n if number not in self.song_order:\n print(f'Skipping song: {number:02d} {title}')\n continue\n \n lyrics = self.get_single_lyrics(url)\n self.album.add_song(Song(title=title, track_number=number, lyrics=lyrics))\n\n self.album.number_of_tracks = number_of_tracks",
"def selectSongs():\n\tsql =\"select songs.title, artist.name, album.name from songs, album, \" \\\n\t+ \"artist join songs_album on songs.id=songs_album.songs_id \" \\\n\t+ \"join songs_artist on songs.id=songs_artist.songs_id \" \\\n\t+ \"where album.id=songs_album.album_id \" \\\n\t+ \"and artist.id=songs_artist.artist_id\"\n\tc, conn = connect()\n\tretr = c.execute(sql)\n\tsongs = []\n\tfor entry in retr:\n\t\tsongs.append(music.song(title=entry[0], artist=entry[1], album=entry[2]))\n\treturn songs",
"def get_song_list(self):\n return self.song_list",
"def get_albums_by_artist(albumtype, search_for, sort_on):\n return list(dmla.list_albums_by_artist(albumtype, search_for, sort_on))",
"def song_album(ans):\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n if ans == song:\r\n return album",
"def tracked_albums():\n print('Your Google Photos Albums ([X] = tracked):')\n albums = get_albums(service)\n for i, a in enumerate(albums):\n check = 'X' if a.id in library.get_album_ids() else ' '\n print('[{}] {}. {}'.format(check, i+1, a.title))\n return albums",
"def album_list_for_user():\r\n answer = \"\"\r\n data = dbase()\r\n for album in data.keys():\r\n answer += album + \", \"\r\n return answer[:-2]",
"def get_songs(library):\n songs = []\n for song in library:\n title, artist, album = song['title'], song['artist'], song['album']\n seconds = int(song['durationMillis']) // 1000\n songs.append({'artist': artist, 'title': title, 'album': album, 'seconds': seconds})\n return songs",
"def get_songs(self, song_list):\n self.songs = [[s.name, s.movie_name] for s in song_list\n if s.raga == self.name]",
"def read_artist_albums(id, name):\n list_a = [(x.name, str(x.release_year), str(x.id))\n for x in dmla.list_albums_by_artist('', id, 'Jaar')]\n list_c = [(x['album'], x['year']) for x in dmlc.list_albums(name)]\n return list_a, list_c",
"def getAllSongs(self):\n return self.__songDictionary",
"def get_songs(self):\n search_object = {\"size\":25000,\n 'query': {'term': {FIELD_FINGERPRINTED: True}}, \"fields\": [FIELD_SONGNAME, FIELD_FILE_SHA1,\n FIELD_TOTAL_HASHES]}\n response = self.cursor.search(index = SONGS_INDEXNAME, body=search_object)\n #print(\"get_songs response: \",response)\n arr = []\n for hit in response[\"hits\"][\"hits\"]:\n dct = {\"song_name\":hit['_source'][FIELD_SONGNAME],\"total_hashes\":hit['_source'][FIELD_TOTAL_HASHES],\n \"file_sha1\":hit['_source'][FIELD_FILE_SHA1]}\n arr.append(dct)\n return arr",
"def get_albums(entity_url: str) -> list:\n entity_url = entity_url.rstrip(\"/\")\n response = requests.get(entity_url)\n soup = BeautifulSoup(response.text, \"html.parser\")\n albums = []\n for link in soup.find_all('a'):\n url = link.get('href')\n if url is not None and \"/album/\" in url:\n if url.startswith(\"http\"):\n albums.append(url)\n else:\n albums.append(f\"{entity_url}{url}\")\n return albums",
"def albums_by_genre_list(self):\n\n genre = self.addon_args[\"foldername\"][0].decode(\"utf-8\")\n\n xbmcplugin.setContent(self.addon_handle, \"albums\")\n\n for album in self.connection.walk_album_list_genre(genre):\n self.add_album(album, show_artist=True)\n\n xbmcplugin.endOfDirectory(self.addon_handle)",
"def all_titles(our_data):\n return [album['album'] for album in our_data]"
] | [
"0.8295144",
"0.78646994",
"0.78634316",
"0.7836907",
"0.7691762",
"0.76080465",
"0.75683284",
"0.7421076",
"0.738293",
"0.7337441",
"0.7323385",
"0.7289032",
"0.7279224",
"0.72707593",
"0.7217044",
"0.7155472",
"0.71446615",
"0.7133181",
"0.71268225",
"0.7097782",
"0.70707077",
"0.70656383",
"0.70617455",
"0.70434916",
"0.70361507",
"0.6992326",
"0.69890827",
"0.69252425",
"0.6912125",
"0.68963075"
] | 0.81815284 | 1 |
This func calc the number of words in one song | def get_len(song, album):
length = 0
words = dbase()[album][0][song]
words = words[2]
words = words.split()
for word in words:
length += 1
return str(length) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def song_length(ans):\r\n length = 0\r\n flag = 1\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n if ans == song:\r\n words = dbase()[album][0][song]\r\n words = words[2]\r\n words = words.split()\r\n for word in words:\r\n length += 1\r\n flag = 1\r\n return str(length)\r\n\r\n elif ans != song and flag == 0:\r\n return \"song not found!\"",
"def get_number_of_words(self):\n filename = f'{self.path}/{self.filename}'\n # word_counter = {}\n # w_cnt = 0\n # x = 0\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n # for word in word_list:\n # w_cnt += 1\n # if word not in word_counter:\n # word_counter[word] = 1\n # else:\n # word_counter[word] = word_counter[word] + 1\n\n # for word in word_list:\n # x += 1\n # print(word, word.isalpha(), x)\n\n w_cnt = sum([a[0].isalpha() for a in word_list])\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_words', w_cnt)\n print(datetime.now(), '-', 'number_of_words for', self.filename, 'calculated =', w_cnt)\n return None",
"def count_words(filename):",
"def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)",
"def wordCount(document):\n return float(len(document.split(None)))",
"def count_words_and_dublicates(novel):",
"def count_words_in_file(file_name):\n\n\treturn len(get_words_in_file(file_name))",
"def count_word(doc):\n count = count = 0\n for w in document.split(\" \"):\n count = count + 1\n return count",
"def count(self, word):\n pass",
"def word_frequencies(url):\n\ttexts = get_all_texts(url)\n\tcount = count_words_in_sentence_list(texts)\n\treturn count",
"def count_all_words(file_name):\n\n return len(separate_words(file_name))",
"def count(word):\n\n return len(word)",
"def count_words_per_sentence(doc):\n s = 0\n for sentence in document.split(\".\"):\n s = s + 1\n w = count_word(doc) \n return w/s",
"def how_many_vocals(word):\n\n word= word.lower()\n result1 = word.count('a')\n result2 = word.count('e')\n result3 = word.count('i')\n result4 = word.count('o')\n result5 = word.count('u')\n\n total_result = result1 +result2+result3+result4+result5\n return total_result",
"def num_words():\n # Load the GT.\n df = pd.read_csv(config.META_FQN, sep=\"\\t\")\n stats = {\n \"T\": {\"words\": [], \"duration\": []},\n \"P\": {\"words\": [], \"duration\": []},\n \"sess\": {\"words\": [], \"duration\": []},\n }\n\n for _, row in df.iterrows():\n if row[\"asr_test\"]:\n stats[\"P\"][\"words\"].append(float(row[\"gt_patient_num_words\"]))\n stats[\"T\"][\"words\"].append(float(row[\"gt_therapist_num_words\"]))\n stats[\"P\"][\"duration\"].append(float(row[\"gt_patient_time_spoken\"]))\n stats[\"T\"][\"duration\"].append(\n float(row[\"gt_therapist_time_spoken\"])\n )\n stats[\"sess\"][\"duration\"].append(float(row[\"sess_dur\"]))\n n_words = (\n row[\"gt_therapist_num_words\"] + row[\"gt_patient_num_words\"]\n )\n stats[\"sess\"][\"words\"].append(n_words)\n\n for speaker in stats:\n for metric in stats[speaker]:\n print(f\"------ {speaker} | {metric} ------\")\n print_stats(stats[speaker][metric])",
"def total_words(target_text):\n\n splited_text = target_text.split()\n nbwords = len(splited_text)\n return nbwords",
"def word_count(text, word):\n \n #answer\n word_list = text.split(\" \")\n return (word_list.count(word))\n \n #return (text.count(word)) - deoesn't work",
"def freq(word, document):\n return document.split(None).count(word)",
"def word_frequency():\n\n song = open(\"data/yellow_submarine.txt\")\n d = dict()\n for line in song:\n line = line.strip()\n line = line.lower()\n punctuations = \"\"\"!()-[]{};:'\"\\,<>./?@#$%^&*_~\"\"\" # remove punctuation https://www.programiz.com/python-programming/examples/remove-punctuation\n no_punct = \"\" # remove punctuation\n for char in line: # remove punctuation\n if char not in punctuations: # remove punctuation\n no_punct = no_punct + char # remove punctuation\n words = line.split(\" \")\n for word in words:\n d[word] = d.get(word, 0) + 1\n return d",
"def count_words(self, clean_func=clean_up):\n return (\n len(clean_func(self.transcript_file.text()).split())\n if self.validate()\n else 0\n )",
"def count_words(input): \n return len(input.split())",
"def num_of_syllables(self, word):\n\n if word.lower() in self.cmu_dict:\n return len([phoneme for phoneme in self.cmu_dict[word.lower()][0]\n if phoneme[-1].isdigit()])\n # If word is unknown, assume 1 syllable/3 letters (average for English)\n else:\n return len(word)//3",
"def test_run():\r\n print(count_words(\"cat bat mat cat bat cat\", 3))\r\n print(count_words(\"betty bought a bit of butter but the butter was bitter\", 3))",
"def num_of_words(line, context):\n return [('num_of_word', len(line.txt.split()))]",
"def find_words_no_e():\n f = open('session09/words.txt')\n num_no_e = 0\n num_words = 0\n for line in f:\n num_words += 1\n word = line.strip()\n if has_no_e(word):\n # print(word)\n num_no_e += 1\n # print(num_no_e, num_words)\n return num_no_e/num_words",
"def wc(filename):\n f = open(filename, 'rt')\n data = f.readlines()\n f.close()\n word_count_tot = 0\n for s in data:\n words = s.split()\n word_count = len(words)\n word_count_tot = word_count_tot+word_count\n return word_count_tot",
"def words(self, path, filemoving, parser):\n root = parser.parsing_xml(path, filemoving)\n root_tag = root.tag[0:(root.tag.find('}') + 1)]\n number_of_words = 0\n for i in root.iter(root_tag+'p'):\n if str(type(i.text)) == \"<class 'str'>\":\n number_of_words = number_of_words + len(list(i.text.split()))\n return number_of_words",
"def count_words(stream):\n return sum(len(line.split()) for line in stream)",
"def average_word_length(self):\n len_words_only = [len(s) if s.isalpha() else 0 for s in self.text]\n if (len_words_only == 0):\n print('Input file contains no words.')\n return 0, 0, 0\n else:\n return sum(len_words_only) / len(len_words_only), median(len_words_only), mode(len_words_only)",
"def count_words(sentence):\n\tblob = tb.TextBlob(sentence.decode('utf-8','ignore'))\n\tword_list = [w for w in blob.words if '\\'' not in w]\n\treturn len(word_list)"
] | [
"0.7641895",
"0.74189013",
"0.7375139",
"0.7181407",
"0.71441495",
"0.70609474",
"0.70113283",
"0.6958899",
"0.6921988",
"0.6921822",
"0.6886437",
"0.6875009",
"0.6845606",
"0.68437195",
"0.68406963",
"0.6803877",
"0.67994726",
"0.67805064",
"0.6773257",
"0.67365235",
"0.6724534",
"0.6717556",
"0.6714447",
"0.668457",
"0.66709393",
"0.66696763",
"0.66659576",
"0.6643951",
"0.66283727",
"0.662353"
] | 0.764421 | 0 |
This func calc how many words there is in all of the songs, albums. using "get_len" function | def song_length(ans):
length = 0
flag = 1
albums = simple_album_list()
for album in albums:
songs = simple_songs_list(album)
for song in songs:
if ans == song:
words = dbase()[album][0][song]
words = words[2]
words = words.split()
for word in words:
length += 1
flag = 1
return str(length)
elif ans != song and flag == 0:
return "song not found!" | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_len(song, album):\r\n length = 0\r\n words = dbase()[album][0][song]\r\n words = words[2]\r\n words = words.split()\r\n for word in words:\r\n length += 1\r\n return str(length)",
"def common():\r\n full_song = \"\"\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n full_song += str(song_lyrics(song))\r\n split_lyrics = full_song.lower().split()\r\n counter = collections.Counter(split_lyrics)\r\n most_words = counter.most_common(50)\r\n return most_words",
"def count_song(self):\n return len(self.playlist)",
"def song_lyrics(ans):\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n if ans == song:\r\n words = dbase()[album][0][song]\r\n words = words[2]\r\n return words",
"def get_number_of_words(self):\n filename = f'{self.path}/{self.filename}'\n # word_counter = {}\n # w_cnt = 0\n # x = 0\n file = open(filename, 'r', encoding='utf-8')\n data = file.read()\n head, sep, tail = data.partition('<binary')\n head = re.sub('\\\\s\\\\s*', ' ', (re.sub('\\\\W|\\\\d', ' ', re.sub('<.*?>', '', head))))\n word_list = head.split()\n # for word in word_list:\n # w_cnt += 1\n # if word not in word_counter:\n # word_counter[word] = 1\n # else:\n # word_counter[word] = word_counter[word] + 1\n\n # for word in word_list:\n # x += 1\n # print(word, word.isalpha(), x)\n\n w_cnt = sum([a[0].isalpha() for a in word_list])\n sqlite_for_ht.CreateTable.update_table(f_1, self.filename, 'number_of_words', w_cnt)\n print(datetime.now(), '-', 'number_of_words for', self.filename, 'calculated =', w_cnt)\n return None",
"def makeWordLengths(self):\r\n clean_s = self.cleanString(self.text)\r\n LoW = clean_s.split() \r\n for x in LoW: \r\n if len(x) not in self.wordlengths: \r\n self.wordlengths[len(x)] = 1\r\n else: \r\n self.wordlengths[len(x)] += 1\r\n return self.wordlengths",
"def makeSentenceLengths(self):\r\n count = 0\r\n LoW = self.text.split()\r\n list = []\r\n for x in range(len(LoW)): \r\n if '.' in LoW[x] or '?' in LoW[x] or '!' in LoW[x] : \r\n length = x\r\n list += [len(LoW[count: x+1])]\r\n count = length + 1\r\n for x in list:\r\n if x not in self.sentencelengths :\r\n self.sentencelengths[x] = 1\r\n else:\r\n self.sentencelengths[x] += 1",
"def count_words_and_dublicates(novel):",
"def __len__(self):\n return len(self.words)",
"def get_song_length_milliseconds(result):\n return int(result['metadata']['music'][0]['duration_ms'])",
"def test_song_counts(self):\n self.assertEqual(self.show.total_song_count, 19)\n self.assertEqual(self.show.set1_song_count, 9)\n self.assertEqual(self.show.set2_song_count, 8)\n self.assertEqual(self.show.set3_song_count, 0)\n self.assertEqual(self.show.encore_song_count, 1)\n self.assertEqual(self.show.encore2_song_count, 1)",
"def how_many_vocals(word):\n\n word= word.lower()\n result1 = word.count('a')\n result2 = word.count('e')\n result3 = word.count('i')\n result4 = word.count('o')\n result5 = word.count('u')\n\n total_result = result1 +result2+result3+result4+result5\n return total_result",
"def count_words(filename):",
"def count_words(all_articles):\n total_words = 0\n for title in all_articles:\n total_words += all_articles[title]['word-count']\n print(f\"There are {total_words} words written.\")",
"def average_word_length(self):\n len_words_only = [len(s) if s.isalpha() else 0 for s in self.text]\n if (len_words_only == 0):\n print('Input file contains no words.')\n return 0, 0, 0\n else:\n return sum(len_words_only) / len(len_words_only), median(len_words_only), mode(len_words_only)",
"def word_count(self):\n print(self.words())\n return len(self.words())\n #count = 0\n #for lines in self.lines:\n # line = lines.strip(os.linesep)\n # wordslst = line.split()\n # count += len(wordslst)\n #return count\n #joined_string = ''.join(self.lines)\n #for word in joined_string:\n # if word != ' ' and word != '\\n' and word != '\\t':\n # count += 1\n #print('READ ME ––––––––––', self.lines)\n #print(joined_string)\n #print(line)\n #print(wordslst)\n #print(count)",
"def count_all_words(file_name):\n\n return len(separate_words(file_name))",
"def _raw_word_count(self, job):\n return sum(len(sentence.words) for sentence in job)",
"def word_frequencies(url):\n\ttexts = get_all_texts(url)\n\tcount = count_words_in_sentence_list(texts)\n\treturn count",
"def count_words_in_file(file_name):\n\n\treturn len(get_words_in_file(file_name))",
"def count(word):\n\n return len(word)",
"def num_words():\n # Load the GT.\n df = pd.read_csv(config.META_FQN, sep=\"\\t\")\n stats = {\n \"T\": {\"words\": [], \"duration\": []},\n \"P\": {\"words\": [], \"duration\": []},\n \"sess\": {\"words\": [], \"duration\": []},\n }\n\n for _, row in df.iterrows():\n if row[\"asr_test\"]:\n stats[\"P\"][\"words\"].append(float(row[\"gt_patient_num_words\"]))\n stats[\"T\"][\"words\"].append(float(row[\"gt_therapist_num_words\"]))\n stats[\"P\"][\"duration\"].append(float(row[\"gt_patient_time_spoken\"]))\n stats[\"T\"][\"duration\"].append(\n float(row[\"gt_therapist_time_spoken\"])\n )\n stats[\"sess\"][\"duration\"].append(float(row[\"sess_dur\"]))\n n_words = (\n row[\"gt_therapist_num_words\"] + row[\"gt_patient_num_words\"]\n )\n stats[\"sess\"][\"words\"].append(n_words)\n\n for speaker in stats:\n for metric in stats[speaker]:\n print(f\"------ {speaker} | {metric} ------\")\n print_stats(stats[speaker][metric])",
"def __sent_len(self, title, text):\n total = 0\n text_sent = nltk.sent_tokenize(text)\n for sent in text_sent:\n total += len(nltk.word_tokenize(sent))\n return (len(nltk.word_tokenize(title)), total / len(text_sent))",
"def count_ngrams(self, corpus):\n \n self.unigramcounts = {} # might want to use defaultdict or Counter instead\n self.bigramcounts = {} \n self.trigramcounts = {} \n\n self.total = 2\n ##Your code here\n\n for sentence in corpus:\n temp_1 = get_ngrams(sentence,1)\n temp_2 = get_ngrams(sentence,2)\n temp_3 = get_ngrams(sentence,3)\n for i in range(len(temp_1)):\n if temp_1[i] in self.unigramcounts:\n self.unigramcounts[temp_1[i]] += 1\n else:\n self.unigramcounts[temp_1[i]] = 1\n self.total += 1\n\n for i in range(len(temp_2)):\n if temp_2[i] in self.bigramcounts:\n self.bigramcounts[temp_2[i]] += 1\n else:\n self.bigramcounts[temp_2[i]] = 1\n\n for i in range(len(temp_3)):\n if temp_3[i] in self.trigramcounts:\n self.trigramcounts[temp_3[i]] += 1\n else:\n self.trigramcounts[temp_3[i]] = 1\n return",
"def total_length():\n return",
"def num_of_syllables(self, word):\n\n if word.lower() in self.cmu_dict:\n return len([phoneme for phoneme in self.cmu_dict[word.lower()][0]\n if phoneme[-1].isdigit()])\n # If word is unknown, assume 1 syllable/3 letters (average for English)\n else:\n return len(word)//3",
"def wordCount( aList ):\n return len( aList )",
"def __len__(self):\n if self._words is None:\n return 0\n return len(self._words)",
"def count_ngrams(self, corpus):\n \n self.unigramcounts = defaultdict(int)\n self.bigramcounts = defaultdict(int)\n self.trigramcounts = defaultdict(int)\n\n self.sentence_counts = 0\n self.word_count = 0\n\n for line in corpus:\n sequence = line\n self.sentence_counts +=1\n\n unigrams = get_ngrams(sequence, n=1)\n for gram in unigrams:\n self.word_count += 1\n self.unigramcounts[gram] +=1\n\n bigrams = get_ngrams(sequence, n=2)\n for gram in bigrams:\n self.bigramcounts[gram] +=1\n\n trigrams = get_ngrams(sequence, n=3)\n for gram in trigrams:\n self.trigramcounts[gram] +=1\n\n #self.unigramcounts[('START')] = self.sentence_counts *2\n self.bigramcounts[('START', 'START')] = self.sentence_counts\n\n #return self",
"def get_length(self):\r\n check_mixer()\r\n frequency, format, channels = (ffi.new('int*'), ffi.new('uint16_t*'),\r\n ffi.new('int*'))\r\n sdl.Mix_QuerySpec(frequency, format, channels)\r\n if format == sdl.AUDIO_S8 or format == sdl.AUDIO_U8:\r\n mixerbytes = 1.0\r\n else:\r\n mixerbytes = 2.0\r\n numsamples = self.chunk.alen / mixerbytes / channels[0]\r\n return numsamples / frequency[0]"
] | [
"0.8198582",
"0.66658145",
"0.64683807",
"0.6397444",
"0.6326289",
"0.6298644",
"0.623144",
"0.62150586",
"0.62072754",
"0.61455053",
"0.6131659",
"0.61273545",
"0.6073577",
"0.60692155",
"0.6067708",
"0.60635406",
"0.60574627",
"0.59762967",
"0.5966263",
"0.59660995",
"0.5954424",
"0.59510195",
"0.59242725",
"0.5914967",
"0.58997446",
"0.58873224",
"0.5886054",
"0.5858932",
"0.58366907",
"0.58247346"
] | 0.81630695 | 1 |
This function returns the lyrics of specific song | def song_lyrics(ans):
albums = simple_album_list()
for album in albums:
songs = simple_songs_list(album)
for song in songs:
if ans == song:
words = dbase()[album][0][song]
words = words[2]
return words | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def lyrics(self):\n return get_lyrics(self.artist, self.title,'')",
"def get_lyrics(artist, song, language='', linesep='\\n', timeout=None):\n return get_all_lyrics(artist, song, language, linesep, timeout)[0]",
"def get_lyrics(self):\n url = 'http://api.lyricsnmusic.com/songs?api_key=[5358b25688164e6c2f771954f17460&q]=' + self.artist + '%20' + self.name\n r = requests.get(url)\n r_text = r.text\n for (old, new) in [('false', 'False'), ('true', 'True'), ('null', 'None')]:\n r_text = r_text.replace(old, new)\n r_text_as_data = eval(r_text)\n if len(r_text_as_data) != 0:\n r_text_dict = r_text_as_data[0]\n return r_text_dict['snippet']\n else:\n return ''",
"def get_lyrics(self):\n\t\treturn self._lyrics_list",
"def get_single_lyrics(self, url):\n r = requests.get(url)\n soup = BeautifulSoup(r.text, 'html.parser')\n lyrics = ''\n \n all_divs = soup.findAll('div')\n filtered_divs = [x for x in all_divs if x.has_attr('class') and ('Lyrics__Container-sc' in x['class'][0] or x['class'] == ['lyrics'])]\n filtered_divs_classes = [x['class'] for x in filtered_divs if x.has_attr('class')]\n \n if len(filtered_divs) == 0:\n lyrics = ''\n elif len(filtered_divs) == 1 and filtered_divs_classes[0][0] == 'lyrics':\n lyrics = filtered_divs[0].text\n else:\n for part in filtered_divs:\n for e in part.descendants:\n if isinstance(e, str):\n lyrics += e.strip()\n elif e.name == 'br' or e.name == 'p':\n lyrics += '\\n'\n \n return lyrics",
"def lyrics(self) -> str:\n if self._lyrics == None:\n response = self._web_session.request(\"GET\", self.path)\n soup = BeautifulSoup(response.content, \"html.parser\")\n lyrics_div = soup.find(\"div\", class_=re.compile(\"^lyrics$|Lyrics__Root\"))\n if lyrics_div:\n self._lyrics = lyrics_div.get_text(separator=\"\\n\")\n else:\n self._lyrics = \"\"\n\n return self._lyrics",
"def get_lyrics(self, artist, song):\n\n # Disable lyrics display\n self.status_bar.hide()\n self.lyrics_view.hide()\n self.scroll.hide()\n\n lyrics = None\n in_database = False\n\n if self.database.status: # Testing connection to database\n lyrics = self.database.retrieve_lyrics(artist, song)\n if lyrics: # False if not found in database\n in_database = True\n\n if not lyrics: # Try next to retrieve from web\n url = self.make_url(artist, song)\n try:\n lyrics = self.fetch_lyrics(url)\n except:\n self.display_message('Internet Connection Problem') # Could not connect to internet\n return\n\n if not lyrics: # Not available in database or on web\n self.display_message('Lyrics Not Available')\n else:\n # Set the display\n lyrics_buffer = self.lyrics_view.get_buffer()\n lyrics_buffer.set_text(lyrics)\n\n if not in_database: # Save if not in database\n self.database.save(artist, song, lyrics)\n\n # Re-enable lyrics display\n self.scroll.show()\n self.lyrics_view.show()\n self.display_message('Lyrics Extracted Successfully')",
"async def lyrics(\n self, ctx: commands.Context, query: str = None\n ) -> Optional[Tuple[str, str, str]]:\n\n query = await self.now_playing(ctx) if query is None else query\n if not query:\n return\n\n url = f\"https://some-random-api.ml/lyrics?title={query}\"\n\n async with aiohttp.ClientSession() as session:\n request = await session.get(url)\n request_json = await request.json(content_type=None)\n\n authors = request_json.get(\"author\")\n title = request_json.get(\"title\")\n lyrics = request_json.get(\"lyrics\")\n\n return (title, authors, lyrics) if lyrics else None",
"def getLyricsFromSong(songLink):\n try:\n html = urllib.request.urlopen(songLink).read()\n soup = bs4.BeautifulSoup(html, 'html.parser')\n soup = soup.find(\"pre\")\n text = soup.contents[0].strip().split(\"\\n\")[5:]\n except:\n return \".\\n.\"\n\n clean_text = \"\"\n lyrics_list=[]\n for line in text:\n lyrics_list.append(line)\n clean_text += line + \"\\n\"\n return clean_text",
"def get_existing_lyrics(self, song_id):\n\t\tlyrics = self.db.lyrics.find_one({'song_id': song_id})['lyrics']\n\t\treturn lyrics",
"async def _lyrics(self, ctx: commands.Context):\n if not ctx.voice_state.is_playing:\n raise commands.CommandError('Nothing being played at the moment.')\n\n # Get song name listed on youtube\n song_title = ctx.voice_state.current.source.track\n if not song_title:\n return await ctx.send(\"Couldn't find lyrics for this track!\")\n\n song_title = re.sub(\"[(\\[].*?[)\\]]\", \"\", song_title).strip() # Remove parenthesis from song title\n # Get artist name listed on youtube\n artist_name = ctx.voice_state.current.source.artist\n # Instance of GeniusSong class using the Genius API\n genius_song = GeniusSong(song_title, artist_name)\n # Try getting the lyrics using the lyricsgenius library\n lyrics = genius_song.fastlyrics()\n\n # In case of no lyrics found. Use the other (slower) method\n if not lyrics:\n res = genius_song.get_response() # Generate a response using the Genius API to get the songs\n if res:\n # Find the most similar artist comparing the artist on YouTube and Genius\n artist_name = genius_song.return_similar_artist(res)\n # Artist didn't match\n if not artist_name:\n await ctx.send(\"Couldn't find similar artists. The lyrics might not be the expected.\")\n\n # Get the lyrics using the lyricsgenius library with the new artist\n lyrics = genius_song.fastlyrics(artist_name)\n\n else:\n return await ctx.send(\n \"**Error!**\\nThere is a problem with Genius.\\nTry again in a few minutes. \"\n \"\\nYou can also try the command `fastlyrics`.\")\n\n if lyrics:\n # Split lyrics into fields\n fields = genius_song.split_lyrics(lyrics)\n # Create an embed message\n embed = embed_msg(\n title=song_title.capitalize() + \"\\n{}\".format(artist_name),\n description=\"\",\n footer=\"Lyrics provided by Genius.\",\n field_values=fields,\n inline=False\n )\n return await ctx.send(embed=embed)\n\n return await ctx.send(\"Lyrics couldn't be found.\")",
"def song_lyrics(message):\n spaceless_message = message.partition(' ')[2]\n if \":\" in spaceless_message:\n # Try and find the lyrics.\n band_name = spaceless_message[0]\n song_title = spaceless_message[2]\n\n response = requests.get(\"https://api.lyrics.ovh/v1/{}/{}\".format(band_name, song_title))\n\n if response.status_code == 404:\n return response.json()['error']\n return response.json()['lyrics']\n else:\n return (\"Message not formatted correctly. Please try again using this format:\\nlyrics band name:song title\")",
"def get_lyrics(self, html):\n #gets lyricks trough a css selector\n raw_lyrics = html.select('div[class*=\"Lyrics__Container\"]')\n if len(raw_lyrics) == 0:\n raw_lyrics = html.select('div[class=\"lyrics\"]')\n\n lyrics = [] \n for lyric in raw_lyrics:\n temp_lyrics = re.sub(r'[\\(\\[].*?[\\)\\]]', '', lyric.get_text()).strip()\n temp_lyrics = re.sub('\\n+', '', temp_lyrics)\n lyrics.append(re.findall('[A-Z][^A-Z]*', temp_lyrics))\n\n all_words = ''\n # Format lyrics \n for section in lyrics:\n if len(section) == 0:\n continue\n\n for verse in section:\n all_words += verse.strip() + \"[*]\"\n \n return all_words",
"def lyrics_plaintext(song):\n output = \"\"\n\n song = validate_song(song)\n\n output += song.default_arrangement\n output += \"\\n\\n\\n\\n\"\n output += song.composer\n output += \"\\n\"\n output += song.copyright\n output += \"\\n\\n\"\n\n for section, lyrics in song.lyrics.items():\n output += section\n output += \"\\n\"\n output += lyrics\n output += \"\\n\\n\"\n return output",
"def scrape_song(url):\n soup = scrapekit.handle_url(url)\n\n contents = scrape_id_to_div(soup, \"Lyrics\")\n if not contents:\n return None\n\n filetext = ''.join(c.text for c in contents)\n\n # Check if there is a reprise\n REPRISE = 'Reprise'\n\n reprise = soup.find(id=REPRISE)\n if reprise:\n filetext += '\\n\\n'\n filetext += REPRISE + ':\\n\\n'\n\n contents = scrape_id_to_div(soup, REPRISE)\n filetext += ''.join(c.text for c in contents)\n\n # Get song title, fix blank spaces for file name\n songtitle = soup.title.text.split('|')[0]\n\n song_text = ''\n song_text += 'Song: {}\\n'.format(songtitle)\n song_text += get_infobox_info(soup)\n song_text += '\\n\\n'\n song_text += filetext\n\n return song_text",
"def get_lyrics(self, name: str, artists: List[str], **_) -> Optional[str]:\n\n # Join every artist by comma in artists\n artist_str = \", \".join(artist for artist in artists if artist)\n\n song_name = name.replace(\" \", \"+\").lower()\n song_artists = artist_str.replace(\" \", \"+\").lower()\n song_artists = song_artists.replace(\",\", \"%2C\")\n\n url = f\"https://search.azlyrics.com/search.php?q={song_name}+{artists}\"\n\n response = requests.get(url, headers=self.headers)\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n td_tags = soup.find_all(\"td\")\n if len(td_tags) == 0:\n return None\n\n result = td_tags[0]\n\n a_tags = result.find_all(\"a\", href=True)\n if len(a_tags) != 0:\n lyrics_url = a_tags[0][\"href\"]\n else:\n return None\n\n if lyrics_url.strip() == \"\":\n return None\n\n response = requests.get(lyrics_url, headers=self.headers)\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n # Find all divs that don't have a class\n div_tags = soup.find_all(\"div\", class_=False, id_=False)\n\n # Find the div with the longest text\n lyrics_div = sorted(div_tags, key=lambda x: len(x.text))[-1]\n\n lyrics = lyrics_div.get_text()\n\n # Remove the 3 first new lines\n lyrics = lyrics[3:]\n\n return lyrics",
"def lyrics_by_word(ans):\r\n songs_list = \"\"\r\n ans = ans.lower()\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n x = song_lyrics(song)\r\n song = str(song)\r\n if ans in x:\r\n songs_list += song + \", \"\r\n return songs_list[:-2]",
"def azlyrics(song, artist):\n song = song.replace(\" \", \"\")\n artist = artist.replace(\" \", \"\")\n url = 'http://www.azlyrics.com/lyrics/' + artist + '/' + song + '.html'\n html_text = urllib.urlopen(url).read()\n soup = BeautifulSoup(html_text, \"lxml\")\n find_lyrics = soup.find_all(\"div\")\n div = [x for x in find_lyrics if str(x).find(\"class=\") == -1]\n if(len(div) > 1):\n return div[1]\n else:\n return -1",
"def get_lyrics_by_language(artist, song, language, linesep='\\n', timeout=None):\n return get_all_lyrics(artist, song, language, linesep, timeout)[0]",
"def get_all_lyrics(artist, song, language='', linesep=' \\n ', timeout=None):\n url = create_url(artist, song, language)\n response = _requests.get(url, timeout=timeout)\n soup = _BeautifulSoup(response.content, \"html.parser\")\n lyricboxes = soup.findAll('div', {'class': 'lyricbox'})\n\n if not lyricboxes:\n raise LyricsNotFound('Cannot download lyrics')\n\n for lyricbox in lyricboxes:\n for br in lyricbox.findAll('br'):\n br.replace_with(linesep)\n\n return [lyricbox.text.strip() for lyricbox in lyricboxes]",
"def get_lyrics(self) -> Optional[str]:\n return self.lyrics",
"def get_existing_lyrics_of_artist(self, artist_name=None, artist_id=None):\n\t\tif artist_name:\n\t\t\tsongs = self.db.artists.find_one({'name': str(artist_name).lower()})\n\t\t\tlyrics = []\n\t\t\tfor song in songs:\n\t\t\t\tlyrics.append((song, self.get_existing_lyrics(song)))\n\t\t\treturn lyrics\n\t\tif artist_id:\n\t\t\tsongs = self.db.artists.find_one({'id': artist_id})['songs']\n\t\t\tprint(len(songs))\n\t\t\tlyrics = []\n\t\t\tfor song in songs:\n\t\t\t\ttry:\n\t\t\t\t\tlyrics.append((song, self.get_existing_lyrics(song)))\n\t\t\t\texcept:\n\t\t\t\t\tcontinue\n\t\t\treturn lyrics",
"def get_lyrics_mouritz(artist, song_title, use_spotify_api, rapidapi_key):\n\n # \"featuring\" makes the string messy and Spotify API can find the song\n # without this info\n artist = artist.lower().split(\"feat\", 1)[0].strip()\n song_title = song_title.lower().strip()\n\n url = \"https://mourits-lyrics.p.rapidapi.com\"\n\n headers = {\n \"x-rapidapi-host\": \"mourits-lyrics.p.rapidapi.com\",\n \"x-rapidapi-key\": rapidapi_key,\n }\n\n if use_spotify_api:\n payload = {\"q\": artist + \" \" + song_title}\n else:\n payload = {\"a\": artist, \"s\": song_title}\n try:\n r = requests.get(url, params=payload, headers=headers)\n lyric = r.json()[\"result\"][\"lyrics\"]\n\n return lyric, \"mourits\"\n\n except Exception:\n return None, None",
"def get_lyrics_or_chords(url):\n html = ''\n\n if 'cifraclub' in url:\n if url.startswith('https://m.'):\n url = 'https://www.' + url[10:] # So we don't have to deal with mobile URLs\n url += 'imprimir.html#columns=false' # Printer Friendly page (it's cleaner)\n soup = getsoup(url)\n sections = soup.find_all('pre')\n for s in sections:\n html += str(s)\n\n if 'letras.mus.br' in url:\n if url.startswith('https://m.'):\n url = 'https://www.' + url[10:] # So we don't have to deal with mobile URLs\n soup = getsoup(url)\n article = soup.find('article')\n html = str(article)\n\n if 'e-chords' in url:\n soup = getsoup(url)\n pre = soup.find('pre', id='core')\n # Remove Tab Div, keep raw tab\n div = pre.find('div')\n if div is not None:\n tab = div.find('div', class_='tab')\n html = '<pre>' + tab.text + '</pre>'\n div.extract()\n html += str(pre)\n\n if 'freak' in url:\n soup = getsoup(url)\n content = soup.find('div', id='content_h')\n html = str(content)\n\n return html",
"def fetch_lyrics(self, url):\n # data=requests.get(url,proxies=proxyDict) # will be used when internet is accessed via proxy server\n page = requests.get(url) # for accessing internet without proxy server\n # Regex out the lyrics from the html content\n lyrics = re.search(b'<!-- start of lyrics -->(?:\\r\\n)+(.+)(?:\\r\\n)+<!-- end of lyrics -->', page.content, re.DOTALL)\n if lyrics:\n # Strip html tags from decoded lyrics\n return re.sub(r'<.+>', '', lyrics.group(1).decode('utf8'))\n else:\n return None",
"def song_has_lyrics():\n pass",
"def get_lyrics_text(lyrics_url):\n\n headers = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n 'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',\n 'Accept-Encoding': 'none',\n 'Accept-Language': 'en-US,en;q=0.8',\n 'Connection': 'keep-alive'\n }\n\n page_html = utils.get_request(lyrics_url, headers=headers)\n if page_html:\n bsoup = BeautifulSoup(page_html, \"lxml\")\n lyrics_text = bsoup.find('div', {'class': 'lyrics'}).text\n return utils.ascii_string(lyrics_text)\n else:\n return None",
"def get_lyrics_for_all_languages(artist, song, linesep='\\n', timeout=None):\n url = create_url(artist, song, '')\n response = _requests.get(url, timeout=timeout)\n soup = _BeautifulSoup(response.content, \"html.parser\")\n lyricboxes = soup.find('table', {'class': 'banner banner-song'})\n result = dict()\n result['default'] = get_lyrics_by_language(artist, song, '', linesep='\\n', timeout=None)\n \n for a in lyricboxes.findAll('a', href=True):\n result[a.getText()] = get_lyrics_by_language(artist, song, a['href'].split('/')[-1], linesep='\\n', timeout=None)\n \n return result",
"def fetch_lyrics(self) -> None:\n if self.artist is None or self.title is None:\n return\n Logger.Logger.log('Looking for song lyrics...')\n finder = LyricsFinder.LyricsFinder(self)\n finder.fetch()\n self.lyrics = finder.get_lyrics()\n self.lyrics_writer = finder.get_lyrics_writer()\n if not self.lyrics:\n Logger.Logger.log('No lyrics found for this song.')",
"def get_genius_page(self, artist: str, song: str) -> str:\n\n artist = self.just_replace_strings_with_dashes(artist)\n song = self.just_replace_strings_with_dashes(song)\n\n url = self.gen_url + artist + '-' + song + '-lyrics'\n\n resp = requests.get(url)\n\n if resp.status_code == 200:\n try:\n content = bs4.BeautifulSoup(resp.content)\n lyrics = content.text[content.text.rindex(\n '[Verse 1]'):content.text.index('Embed')]\n lyrics = self.clean_lyrics_response(lyrics)\n return lyrics\n\n except (ValueError, IndexError) as e:\n print('Lyrics not found {}, due to error {}'.format(song, e))\n\n try:\n lyrics = content.text[content.text.rindex(\n '[Verse]'):content.text.index('Embed')]\n lyrics = self.clean_lyrics_response(lyrics)\n return lyrics\n\n except ValueError as e:\n print(\n 'Lyrics not found {}, due to error {}, single verse song'.format(song, e))"
] | [
"0.82025915",
"0.7692463",
"0.760172",
"0.72425085",
"0.71974313",
"0.71780723",
"0.7152949",
"0.71083266",
"0.70352906",
"0.7034073",
"0.7004775",
"0.6972529",
"0.69618744",
"0.6934143",
"0.6913037",
"0.68426704",
"0.68249315",
"0.6794879",
"0.6770472",
"0.67184097",
"0.6686466",
"0.6625731",
"0.6570719",
"0.6562009",
"0.6555208",
"0.6540905",
"0.6511219",
"0.64437807",
"0.6421905",
"0.6397985"
] | 0.81326777 | 1 |
This function finds what album the song in | def song_album(ans):
albums = simple_album_list()
for album in albums:
songs = simple_songs_list(album)
for song in songs:
if ans == song:
return album | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_album_playlist(data):\n\n return data['album'].lower() + '.m3u'",
"def search_for_album(album_name):\n\n print(f'Searching for album: {album_name}')\n\n search_result = spotifyObject.search(q=f'\"{album_name}\"', limit=20, type='album')\n\n items = search_result['albums']['items']\n\n results = []\n\n for item in items:\n if len(item['artists']) > 1:\n artists = tuple(art['name'] for art in item['artists'])\n else:\n artists = item['artists'][0]['name']\n\n results.append((artists, item['name'], item['id']))\n\n return results",
"def find_album(self):\n item = self.clementine_albums.currentItem()\n if not item:\n self.focus_albums()\n item = self.clementine_albums.currentItem()\n if item.text(0) in self.albums_map[self.c_artist]:\n ok = qtw.QMessageBox.question(self, self.appname, 'Album already has a '\n 'match - do you want to reassign?',\n qtw.QMessageBox.Yes | qtw.QMessageBox.No,\n qtw.QMessageBox.Yes)\n if ok == qtw.QMessageBox.No:\n return\n self.albums_map[self.c_artist].pop(item.text(0))\n # select albums for self.a_artist and remove the ones that are already matched\n albums = dmla.list_albums_by_artist('', self.a_artist, 'Titel')\n album_list = []\n for album in albums:\n test = album.id\n found = False\n for a_item in self.albums_map[self.c_artist].values():\n if a_item[1] == test:\n found = True\n break\n if not found:\n album_list.append((build_album_name(album), album.id))\n if album_list:\n albums = [x[0] for x in album_list]\n selected, ok = qtw.QInputDialog.getItem(self, self.appname, 'Select Album',\n albums, editable=False)\n if ok:\n a_item = self.albums_albums.findItems(\n str(album_list[albums.index(selected)][1]),\n core.Qt.MatchFixedString, 2)[0]\n c_year = str(item.data(0, core.Qt.UserRole))\n if c_year:\n a_year = a_item.text(1)\n if c_year != a_year:\n ask = f\"Clementine year ({c_year}) differs from Albums year ({a_year})\"\n ok = qtw.QMessageBox.question(self, self.appname, f\"{ask}, replace?\",\n qtw.QMessageBox.Yes | qtw.QMessageBox.No,\n qtw.QMessageBox.Yes)\n if ok == qtw.QMessageBox.Yes:\n a_item.setText(1, c_year)\n\n self.albums_to_update[self.c_artist].append(\n (a_item.text(0), a_item.text(1), int(a_item.text(2)), False, []))\n self.update_item(a_item, item)\n return\n self.add_album()",
"def album(self, q, page=None):\r\n return self.get('album', q, page)",
"def search_albums(self, needle):\n return self._album_search.search(searchable(needle))",
"def find_by_name(our_data,name):\n for album in our_data:\n if album['album'] == name:\n return album\n return None",
"def get_albums(self):\n self.artist = self.artists_list.currentText()\n self.c_albums = [x['album'] for x in dmlc.list_albums(self.artist)\n if [x['album'] in self.albums_map[self.artist]]]\n self.albums_list.clear()\n self.albums_list.addItems(self.c_albums)\n self.update_navigation_buttons()",
"async def search_song(album_name):\n async with aiohttp.ClientSession() as session:\n async with session.get('https://bandcamp.com/api/fuzzysearch/1/autocomplete?q=' + album_name) as resp:\n response = await resp.json()\n\n results = response.get('auto', {}).get('results', [])\n results = [res for res in results if res.get('type') == 't']\n if not results:\n raise NotFound\n result = results[0]\n async with session.get(result.get('url', 'https://bandcamp.com/')) as resp:\n response = await resp.text()\n try:\n result['release_date'] = response.split('album_release_date: \"')[-1].split('\",')[0].split(':')[0]\n except:\n result['release_date'] = '01 Jan 1970 00'\n result['TrackAlbum'] = bs4.BeautifulSoup(response, 'html.parser').find('span', itemprop='inAlbum').text.strip()\n\n return BandcampSong(result)",
"def media_album_name(self):\n return self.coordinator.data.nowplaying[self.zone.SourceID].CurrSong.Album",
"def get_songs_by_album(self, album_id):\n return self.__get('song', album_id)",
"def get_album(self):\n return self._album",
"def test_get_songs_by_album(self, track_elms, service_config, request):\n album_id = uuid.UUID(avalon.compat.to_uuid_input('f83fdec7-510f-44a5-87dc-61832669a582'))\n service_config.track_store.get_by_album.return_value = track_elms\n service_config.id_cache.get_album_id.return_value = album_id\n request.args['album'] = 'Album'\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'\n service_config.track_store.get_by_album.assert_called_with(album_id)",
"def get_album(self) -> Optional[str]:\n return self.album",
"def song_lyrics(ans):\r\n albums = simple_album_list()\r\n for album in albums:\r\n songs = simple_songs_list(album)\r\n for song in songs:\r\n if ans == song:\r\n words = dbase()[album][0][song]\r\n words = words[2]\r\n return words",
"def get_albums_by_artist(albumtype, search_for, sort_on):\n return list(dmla.list_albums_by_artist(albumtype, search_for, sort_on))",
"def get_albums(self):\n artist = self.get_request_arg(\"artist\")\n if artist:\n lib = self.ctrl.library\n lst = sorted(self.ctrl.library.get_albums(artist))\n albums = [{\"artist\": artist,\n \"album\": album,\n \"path\": lib.get_path(artist, album)} for album in lst]\n if lst:\n return self.resp_from_data(albums)\n return self.resp_from_data(\n {\"message\": f\"No album found for artist={artist}\"}, 400)",
"def get_album(album_id):\n return query_single(album_id, Album, album_schema)",
"def get_album_by_id(self, album_id):\n self.app.curs.execute('select * from album where alid=%s', (album_id,))\n if self.app.curs.rowcount == 1:\n return self.app.curs.fetchone()\n else: # pragma: no cover\n return None",
"def read_album_tracks(id, artist_name, album_name):\n list_a = [x.name for x in dmla.list_tracks(id)]\n list_c = [x['title'] for x in dmlc.list_tracks_for_album(artist_name, album_name)\n if x['track'] != -1]\n return list_a, list_c",
"def album(self):\n return self.getItunesAttribute('Album')",
"def simple_songs_list(name_of_album):\r\n songs = []\r\n data1 = dbase()\r\n data1 = data1[name_of_album][0]\r\n for song in data1.keys():\r\n songs += [song]\r\n return songs",
"def album_assignment(self):\n log.debug(\"Called album_assignment for %s.\" % self.name)\n self.success = False\n for splitter in splitters:\n if splitter in self.name:\n self.artist, self.album = self.name.split(splitter, 1) # May need to look at this again, can be more than 1!\n self.success = True\n break\n if self.success:\n results = self.sp.search(q='artist: ' + self.artist + 'album: ' + self.album, type='album', limit=1)\n if results['albums']['total'] >= 1:\n for items in results['albums']['items']:\n self.album = items['name']\n self.album_uri = items['uri']\n for artist in items['artists'][0]:\n self.artist = artist['name']\n self.artist_uri = artist['uri']\n else:\n self.success = False",
"async def search_album(album_name):\n async with aiohttp.ClientSession() as session:\n async with session.get('https://bandcamp.com/api/fuzzysearch/1/autocomplete?q=' + album_name) as resp:\n response = await resp.json()\n\n results = response.get('auto', {}).get('results', [])\n results = [res for res in results if res.get('type') == 'a']\n if not results:\n raise NotFound\n result = results[0]\n async with session.get(result.get('url', 'https://bandcamp.com/')) as resp:\n response = await resp.text()\n try:\n result['release_date'] = response.split('album_release_date: \"')[-1].split('\",')[0].split(':')[0]\n except:\n result['release_date'] = '01 Jan 1970 00'\n result['track_list'] = [getattr(aa.find('span'), 'text', '') for aa in bs4.BeautifulSoup(response, 'html.parser').find('table', {'class':'track_list'}).find_all('tr')]\n\n return BandcampAlbum(result)",
"def tracked_albums():\n print('Your Google Photos Albums ([X] = tracked):')\n albums = get_albums(service)\n for i, a in enumerate(albums):\n check = 'X' if a.id in library.get_album_ids() else ' '\n print('[{}] {}. {}'.format(check, i+1, a.title))\n return albums",
"def search_album_art(artist, title, select_index=0, return_all=False):\r\n # TODO: add soundcloud search as well if spotify comes up with no results.\r\n # Soundcloud has it disabled\r\n artist, title = parse.quote(artist), parse.quote(title)\r\n header = {'Authorization': 'Bearer ' + get_spotify_access_token()}\r\n # TODO: search through playlists too\r\n links = []\r\n links_set = set()\r\n for code in COUNTRY_CODES:\r\n url = f'https://api.spotify.com/v1/search?q={title}+artist:{artist}&type=track&market={code}'\r\n r = requests.get(url, headers=header).json()\r\n if 'tracks' in r:\r\n links_from_country = [item['album']['images'][0]['url'] for item in r['tracks']['items']]\r\n for link in links_from_country:\r\n if link not in links_set:\r\n links.append(link)\r\n links_set.add(link)\r\n if return_all: return links\r\n return links[0]",
"def test_get_songs_by_album_id(self, track_elms, service_config, request):\n album_id = uuid.UUID(avalon.compat.to_uuid_input('37cac253-2bca-4a3a-be9f-2ac655e04ad8'))\n service_config.track_store.get_by_album.return_value = track_elms\n request.args['album_id'] = six.text_type(album_id)\n params = avalon.web.request.Parameters(request)\n\n service = avalon.web.services.AvalonMetadataService(service_config)\n results = service.get_songs(params)\n\n assert results == track_elms, 'Expected matching tracks returned'\n service_config.track_store.get_by_album.assert_called_with(album_id)",
"def get_albums_by_artist(self, artist_id):\n return self.__get('album', artist_id)",
"def album_detection(user):\n seen_tracks = user.seen_tracks\n\n list_of_albums = {}\n album_number_of_tracks = {}\n for track in seen_tracks:\n if \"album\" not in track:\n continue\n\n if track[\"name\"] == \"sanjake\":\n continue\n\n album_name = track[\"album\"][\"name\"]\n if album_name not in list_of_albums:\n list_of_albums[album_name] = 0\n album_number_of_tracks[album_name] = track[\"album\"][\"total_tracks\"]\n list_of_albums[album_name] += 1\n\n if list_of_albums[album_name] > 1 and list_of_albums[album_name] == album_number_of_tracks[album_name]:\n print(f\"Album search detected: {album_name}, number of tracks: {album_number_of_tracks[album_name]}\")\n print(f\"User: {user.email_address}\")",
"def add_songs(self, name, year, title):\n\n album_found = find_object(name, self.album)\n if album_found is None:\n print(\"Not Found \" + name)\n album_found = Album(name, year, self.name)\n self.add_album(album_found)\n else:\n print(\"Found album \"+name)\n\n album_found.add_songs(title)",
"def get_albums_by_text(albumtype, search_type, search_for, sort_on):\n if albumtype == 'studio':\n search_on = {0: '*', 2: 'name', 3: 'produced_by', 4: 'credits', 5: 'bezetting'}[search_type]\n elif albumtype == 'live':\n search_on = {0: '*', 2: 'name', 3: 'name', 4: 'produced_by', 5: 'bezetting'}[search_type]\n return list(dmla.list_albums_by_search(albumtype, search_on, search_for, sort_on))"
] | [
"0.7356128",
"0.72707754",
"0.7060411",
"0.7050987",
"0.69334686",
"0.6912387",
"0.67691773",
"0.66861814",
"0.66782176",
"0.66549325",
"0.66268027",
"0.6598319",
"0.65646094",
"0.6564256",
"0.6559331",
"0.6554879",
"0.65491766",
"0.65131474",
"0.6505864",
"0.64808756",
"0.64737844",
"0.6442853",
"0.6438412",
"0.6435556",
"0.6430831",
"0.6395764",
"0.63881505",
"0.63713944",
"0.63334763",
"0.6317665"
] | 0.83801645 | 0 |
This function makes list of the top 50 commonest words of all songs | def common():
full_song = ""
albums = simple_album_list()
for album in albums:
songs = simple_songs_list(album)
for song in songs:
full_song += str(song_lyrics(song))
split_lyrics = full_song.lower().split()
counter = collections.Counter(split_lyrics)
most_words = counter.most_common(50)
return most_words | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def top_k_frequent(top_k, words, list_of_texts):\n dict_top_freq = {}\n for word in words:\n dict_top_freq[word.lower()] = 0\n for string in list_of_texts:\n if word.lower() in string.lower():\n counter = string.lower().count(word.lower())\n dict_top_freq[word.lower()] += counter\n\n list_top_sorted = sorted(dict_top_freq.items(), key=lambda item: item[1], reverse=True)\n print(list_top_sorted)\n\n list_k = []\n for i in list_top_sorted:\n list_k.append(i[0])\n\n return list_k[:top_k]",
"def topCommonwords(self,value=5):\n out=self.df.withColumn('word', explode(split(col('name'), ' '))) \\\n .withColumn('norm_word',trim(regexp_replace('word','[^a-zA-Z0-9 ]', ''))) \\\n .filter(col('norm_word') !='')\\\n .groupBy('norm_word')\\\n .count()\\\n .sort('count', ascending=False)\\\n .select('norm_word').limit(value)\n out.withColumnRenamed('norm_word','Top english name in pubname').write \\\n .mode(\"overwrite\").csv('{}pubname/'.format(self.target))\n\n return out.rdd.map(lambda l:l.norm_word).collect()",
"def test_get_top_n_words_same_frequency(self):\n expected = ['happy', 'man']\n actual = get_top_n_words({'happy': 2, 'man': 2}, 2)\n self.assertEqual(expected, actual)\n expected = ['happy']\n actual = get_top_n_words({'happy': 2, 'man': 2}, 1)\n self.assertEqual(expected, actual)",
"def most_common(self, number=10):\n\n words_full_list = []\n\n for string in self.__corpora:\n words_full_list += string.split()\n\n print(Counter(words_full_list).most_common(number))",
"def find_frequent_words(words, most_frequent): \n \n # common_words = Counter(sorted(words))\n # print common_words\n common_words = Counter(sorted(words)).most_common(most_frequent)\n print (common_words )\n most_common_words = [w for w, w_count in common_words]\n return most_common_words",
"def top_50():\r\n file_read = read_file()\r\n vacabulary_list = []\r\n for key in file_read:\r\n vacabulary_list.extend(file_read[key])\r\n top_50 = Counter(vacabulary_list).most_common(50)\r\n return (top_50)",
"def display_top_n_words(total_count__of_words, n): # Considering n=10 here as specified in the requirements\n return sorted(total_count__of_words.items(), key=lambda i: i[1], reverse=True)[:n]",
"def most_common(filename,n):\n\tfreq_dict = dictionary_creation(filename)\n\tt = []\n\tfor key, value in freq_dict.items():\n\t\tt.append((value,key))\n\t\tt.sort(reverse=True)\n\twordlist = []\n\tfreqlist = []\n\tprint n, 'most common words:'\n\tfor freq,word in t[0:n]:\n\t\tprint word,'\\t', freq\n\t\twordlist.append(word)\n\t\tfreqlist.append(freq)\n\treturn wordlist,freqlist",
"def get_top_words(input_string):\n # count the words\n top_words = Counter(input_string)\n # order the words in descending order\n top_words_ordered = sorted(top_words.items(), key=operator.itemgetter(1), reverse=True)\n # keep the top twenty elements\n top_twenty = top_words_ordered[0:20]\n print(top_twenty)\n return top_twenty",
"def top_sentences(query, sentences, idf, n):\n ll=[]\n for s in sentences:\n st=sentences[s]\n st=[word.lower() for word in st]\n found_word=0\n total_idf=0\n\n for word in query:\n if word in st:\n total_idf+=idf[word]\n found_word+=1 \n ll.append((total_idf,found_word/len(st),s))\n ll.sort(reverse=True)\n #print(ll)\n ans=[]\n for i in range(n):\n ans.append(ll[i][2])\n #print(\"answer is : \",*ans)\n return ans",
"def getTopKCounter(a, K):\n # r = []\n # for i in a:\n # r.extend(i)\n c = Counter(a)\n words = [i[0] for i in c.most_common(K)]\n return words",
"def most_common_words(counts, n=-1):\n\n result = sorted(list(counts.items()), key=lambda x: x[1], reverse=True)\n\n if n == -1:\n return result\n else:\n return result[:n]",
"def get_top_n_words(column, n):\r\n frequencies = Counter()\r\n column.str.lower().str.split().apply(frequencies.update)\r\n return frequencies.most_common(n)",
"def most_similar(self, words: [str], top_n=3, metric='cosine') -> [(str, float)]:\n if len(words) == 0:\n return []\n\n vec = self.mean(words)\n if numpy.count_nonzero(vec) == 0:\n return []\n\n return [w for w, sim in self.most_similar_vec(vec=vec, top_n=top_n, exclude_words=words, metric=metric)]",
"def get_top_n_words(word_list, n):\n words = []\n\n # Change all words to lowercase\n for word in word_list:\n word = str.lower(word)\n if word not in words:\n words.append(word)\n\n # Calculate frequency of each word\n frequency = []\n for word in words:\n word_count = 0\n for test in word_list:\n if word == test:\n word_count += 1\n frequency.append(word_count)\n\n dic = dict()\n for i, word in enumerate(words):\n dic[frequency[i]] = word\n\n # Sort dictionary to return ranks\n keys = dic.keys()\n keys = sorted(keys)\n words_ranked = []\n for key in keys:\n words_ranked.append(dic.get(key))\n words_ranked = words_ranked[::-1]\n words_ranked = words_ranked[:n]\n return words_ranked",
"def get_top_n_words(word_list, n):\n\tfreq_dict = make_freq_dict (word_list) # get a dictionary\n\tordered_by_frequency = sorted(freq_dict, key=freq_dict.get, reverse=True) # sort\n\tprint ordered_by_frequency[0:n] # print\n\treturn ordered_by_frequency[0:n]",
"def get_main_words(idioms_set):\r\n main_words = Counter([idiom.split()[-1] for idiom in idioms_set])\r\n print('main words:', '\\n', main_words)\r\n print('top 50 main words:', '\\n', main_words.most_common(50)) \r\n return list(main_words)",
"def get_top_words(self, label, n):\n score_list = []\n if('sod' in label):\n for term in self.vocab:\n score = self.cond_prob_sod[term] / self.cond_prob_pop[term]\n score_list.append((score,term)) \n else:\n for term in self.vocab:\n score = self.cond_prob_pop[term] / self.cond_prob_sod[term]\n score_list.append((score,term))\n score_list = sorted(score_list, key=lambda x:x[0],reverse=True)[:n]\n return score_list \n pass",
"def get_10_most_frequent_words(tokens):\n\n return FreqDist(word.lower() for word in tokens).most_common(10)",
"def find_frequent_words(word_frequencies, amount=50):\n alphabetically_sorted = sorted(word_frequencies.most_common(amount), key=lambda tup: tup[0])\n final_sorted = sorted(alphabetically_sorted, key=lambda tup: tup[1], reverse=True)\n list1 = [i[0] for i in final_sorted]\n\n list2 = [i[1] for i in final_sorted]\n return list1, list2",
"def get_top_n_words(word_list, n):\n\t\n\t#Uses Counter function to create tuples of words and number of instances of word\n\twordCount = Counter(word_list)\n\ttopWords = []\n\n\torderedByFrequency = sorted(wordCount, key=wordCount.get, reverse=True)\n\n\t#create list of inputted 'n' top words\n\tfor i in range (0 , n):\n\t\ttopWords.append(orderedByFrequency[i])\n\n\treturn topWords",
"def most_common_words(n):\n with open(os.path.join('visualization', 'vocab.tsv')) as fd:\n words = fd.readlines()[:n]\n words = [word for word in words]\n save_path = os.path.join('visualization', 'vocab_' + str(n) + '.tsv')\n with open(save_path, 'w') as fd:\n for word in words:\n fd.write(word)",
"def top_three_letters(string):\n print(Counter(string))\n print(Counter(string).most_common(3))",
"def most_similar(self, article: str, topn: int = 5):\n return [article[0] for article in self._model.similar_by_word(article, topn)]",
"def test_get_top_n_words_ideal(self):\n expected = ['man']\n actual = get_top_n_words({'happy': 2, 'man': 3}, 1)\n self.assertEqual(expected, actual)",
"def get_top_n_words(word_list, n):\n d = dict()\n for w in word_list:\n d[w] = d.get(w, 0) + 1\n ordered_by_frequency = sorted(d, key=d.get, reverse=True)\n return ordered_by_frequency[0:n]",
"def commonWords(self):\n #utilize similar code used in stats.py\n exclude = set(('!', '.', '?'))\n freq = Stats()\n fullText = []\n #Parse email\n for x in range(self.getSCount()):\n #Simplify email into string of words separated by single space\n sString = self[x].lower()\n sString = ''.join(char for char in sString if char not in exclude)\n sString = sString.split()\n fullText = fullText + sString\n\n #Call findFreqDic() to find frequencies of words\n freqDict = freq.findFreqDic(fullText)\n\n #Analyze 10 words\n numTopic = 10\n \n #Find most and least common calling topNSort and bottomNSort\n mostCommon = freq.topNSort(freqDict, numTopic)\n leastCommon = freq.bottomNSort(freqDict, numTopic)\n \n most = list(mostCommon.keys())\n least = list(leastCommon.keys())\n \n return most, least",
"def get_top_n_words(word_list, n):\n word_counts = dict()\n\n for word in word_list:\n freq = word_counts.get(word, 1)\n word_counts[word] = freq + 1\n\n ordered_by_frequency = sorted(word_counts, key=word_counts.get, reverse=True)\n\n return ordered_by_frequency[0:n]",
"def count_words(s, n):\r\n list_of_words=get_listOfWords(s)\r\n res=wrap_with_freq_toList(list_of_words)\r\n res=sortit(res)\r\n top_n=res[0:n]\r\n return top_n\r\n \r\n # TODO: Count the number of occurences of each word in s\r\n # TODO: Sort the occurences in descending order (alphabetically in case of ties)\r\n # TODO: Return the top n words as a list of tuples (<word>, <count>)\r",
"def get_top_n_words(word_list, n):\n\tfreqs = get_word_frequencies(word_list)\n\tfreq_words = sorted(freqs, key=freqs.get, reverse=False)\n\treturn freq_words[:n]"
] | [
"0.738273",
"0.7270854",
"0.72609174",
"0.6993908",
"0.6986561",
"0.69415885",
"0.6875806",
"0.6855121",
"0.684043",
"0.6821874",
"0.6817871",
"0.68166554",
"0.67563236",
"0.6738581",
"0.6699482",
"0.6695034",
"0.6693428",
"0.66674244",
"0.6641728",
"0.66324925",
"0.66190416",
"0.6612445",
"0.6582773",
"0.65825444",
"0.65676993",
"0.65594697",
"0.6545609",
"0.6536612",
"0.6493325",
"0.64930385"
] | 0.7426751 | 0 |
Points List of tuples, where each tuple has (x,y) coods of the points. numLines Number of pairs of points to be randomly sampled numIter Number of ietrations for which estimates of Prob should be refined e_tilde Critical distance for 50% probability of memebership in the line gamma_tilde Critical fraction of valid points for 50% probability of validity of a line beta_tilde Critical fraction of valid lines for 50% probability of validity of a point | def iterative_function(points, numLines, numIter, e_tilde, gamma_tilde, beta_tilde):
numPoints = len(points)
# Randomly sample pairs and get the corresponding rho and theta parameters for a line fitted to the pair:
# Returns a list of tuples - Each tuple has the rho and theta parameters for the line:
lines ,idxs = info_rand_sample_fit_lines(points, numLines)
# Compute normal distance of each point from each line: Store in a 2-D numpy array:
# Points along 1st axis - Rows - axis= 0
# Lines along 2nd axis - Columns - axis=1
# Initialize the 2-D array:
normDist = np.zeros((numPoints, numLines))
# Indices for the 2-D array:
j,k = 0,0
# Loop through points:
for point in points:
k = 0
# Loop through the lines:
for line in lines:
normDist[j,k] = get_normal_dist(line,point)
# Increment the column (line) index:
k+=1
#Increment the row (point) index
j += 1
# Transform the Normal Distance matrix to the Probability of Membership matrix:
Pr_C = log_dec(normDist,e_tilde)
## Iteratively refine estimates of Prob of Validity - Points and Lines:
iterCount = 0
# Initialize Probability of Validity of points and lines:
initProb =1
Pr_A = initProb*np.ones((numPoints,1))
Pr_V = np.zeros((numLines,1))
# Initialize gamma and beta: Fractions of valid points and lines respectively:
gamma = np.zeros_like(Pr_V)
beta = np.zeros_like(Pr_A)
while iterCount < numIter:
# For each line:
for k in range(numLines):
# Compute expected fraction of valid points:
gamma[k] = np.dot(Pr_A.T, Pr_C[:,k])/np.sum(Pr_A)
#print (gamma[k], end=" ->")
# Compute Probability of Validity:
Pr_V = log_inc(gamma, gamma_tilde)
# For each point:
for j in range(numPoints):
# Compute expected fraction of valid lines in which it is a member:
beta[j] = np.dot(Pr_V.T, Pr_C[j,:])/np.sum(Pr_V)
#print (beta[j], end=" ->")
#print (" ")
# Compute Probability of Validity:
Pr_A = log_inc(beta, beta_tilde)
iterCount +=1
# Sort the lines according to Probability of Validity:
idx_sort = np.argsort(Pr_V, axis=0)
print (" The equations of candidate lines and their probability of validity are: ")
for idx in idx_sort:
print (lines[int(idx)] , end = '-- >')
print (Pr_V[idx])
return lines, Pr_A, Pr_V | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def generateData(numPoints,x,y):\n\tfor i in range(0,numPoints):\n\t\tif (i % 2 == 0):\n\t\t\tx.append(random.normalvariate(25, 15))\n\t\t\ty.append(random.normalvariate(25, 15))\n\t\t\t \n\t\t\t\n\t\telse:\n\t\t\tx.append(random.normalvariate(75, 15))\n\t\t\ty.append(random.normalvariate(75, 15))",
"def generate_points(num_points):\n for i in xrange(0, num_points):\n pass",
"def create_points(self):\n v1 = 0.0\n v2 = 0.5\n v3 = 0.25\n v4 = 0.2 # only used for hexgrid\n\n points = []\n\n points.append((v1, v1, v1)) # 0\n points.append((v2, v1, v1)) # 1\n points.append((v2, v2, v1)) # 2\n points.append((v1, v2, v1)) # 3\n\n points.append((v1, v1, v2)) # 4\n points.append((v2, v1, v2)) # 5\n points.append((v2, v2, v2)) # 6\n points.append((v1, v2, v2)) # 7\n\n points.append((v3, v1, v1)) # 8\n points.append((v2, v3, v1)) # 9\n points.append((v3, v2, v1)) # 10\n points.append((v1, v3, v1)) # 11\n\n points.append((v1, v1, v3)) # 12\n points.append((v2, v1, v3)) # 13\n points.append((v2, v2, v3)) # 14\n points.append((v1, v2, v3)) # 15\n\n points.append((v3, v1, v2)) # 16\n points.append((v2, v3, v2)) # 17\n points.append((v3, v2, v2)) # 18\n points.append((v1, v3, v2)) # 19\n\n points.append((v4, v1, v1)) # 20\n points.append((v1, v4, v1)) # 21\n points.append((v1, v1, v4)) # 22\n\n return points",
"def points_generator(self):\n rows, cols = self.game.board.board_size\n points = [Point(i, j) for i, j in product(range(rows), range(cols))]\n for point in points:\n yield point",
"def _create_examples(self, lines, set_type):\n examples = []\n for (i, line) in enumerate(tqdm(lines)):\n if i == 0:\n continue\n gold_label_id = self.label2id(line[0])\n several_labels = [self.label2id(line[-5]), self.label2id(line[-4]), self.label2id(line[-3]),\n self.label2id(line[-2]), self.label2id(line[-1])]\n\n pair_id = line[-6]\n premise = line[5]\n hypothesis = line[6]\n premise_bp = line[1]\n hypothesis_bp = line[2]\n premise_p = line[3]\n hypothesis_p = line[4]\n # premise_length = len(premise)\n # hypothesis_length = len(hypothesis)\n guid = \"%s-%s\" % (set_type, i)\n\n ex = dict()\n for k in self.data_keys:\n ex[k] = eval(k)\n examples.append(ex)\n logger.info(\" {} examples\".format(len(examples)))\n return examples",
"def create_random_points(n):\n\n\treturn [(random.randint(0,n),random.randint(0,n)) for i in range(n)]",
"def create_points(number): \n\n # generate x and y coordinates:\n x = np.random.permutation(2*number)[:number] - number\n y = np.random.permutation(2*number)[:number] - number\n\n points = [ { 0 : float(x[i]), 1 : float(y[i]), \"index\" : i} for i in range(len(x)) ]\n\n return points\n\n # generate points as coordinate pairs of floats.\n # return zip(map(float,x),map(float,y))",
"def generatePoints(centre: Point, radius: float, numPoints: int, jitterRatio: float = 0) -> List[Point]:\n def jitter() -> float:\n diamiter = radius * math.pi * 2\n jitterSize = jitterRatio * diamiter / numPoints\n return random.random() * 2 * jitterSize - jitterSize\n\n points: List[Point] = []\n angle_segment = math.pi * 2 / numPoints\n angle = 0\n\n while angle < math.pi * 2:\n point = (centre[0] + radius * math.cos(angle) + jitter(),\n centre[1] + radius * math.sin(angle) + jitter())\n points.append(point)\n angle += angle_segment\n\n return points",
"def points(self, width, height):\n\n num_lines = int(math.sqrt(self.num_points))\n\n if num_lines ** 2 != self.num_points:\n raise RuntimeError(\"Number of points must be a perfect square\")\n\n chunk_width = int(width / (num_lines + 1))\n chunk_height = int(height / (num_lines + 1))\n\n x_list = []\n y_list = []\n for i in range(1, num_lines + 1):\n x_list.append(chunk_width * i)\n y_list.append(chunk_height * i)\n\n xy_list = []\n for y in y_list:\n for x in x_list:\n xy_list.append((x, y))\n\n return xy_list",
"def read_test_tuples():\n lines = read_input(25, True)\n point_sets = list(parse_points(lines))\n expected_counts = [4, 3, 8]\n\n return zip(point_sets, expected_counts)",
"def iterative_function_vect(points, numLines, numIter, e_tilde, gamma_tilde, beta_tilde):\n \n numPoints = len(points)\n \n # Randomly sample pairs and get the corresponding rho and theta parameters for a line fitted to the pair: \n # Returns a list of tuples - Each tuple has the rho and theta parameters for the line: \n lines ,idxs = info_rand_sample_fit_lines(points, numLines)\n \n \n # Compute normal distance of each point from each line: Store in a 2-D numpy array: \n # Points along 1st axis - Rows - axis= 0\n # Lines along 2nd axis - Columns - axis=1\n \n # Initialize the 2-D array: \n normDist = np.zeros((numPoints, numLines))\n \n \n # Indices for the 2-D array: \n j,k = 0,0\n \n # Loop through points:\n for point in points: \n \n k = 0\n \n # Loop through the lines: \n for line in lines:\n \n normDist[j,k] = get_normal_dist(line,point)\n \n # Increment the column (line) index:\n k+=1\n \n \n #Increment the row (point) index\n j += 1\n \n # Transform the Normal Distance matrix to the Probability of Membership matrix: \n Pr_C = log_dec(normDist,e_tilde)\n \n \n ## Iteratively refine estimates of Prob of Validity - Points and Lines: \n iterCount = 0\n \n # Initialize Probability of Validity of points and lines: \n initProb =1\n Pr_A = initProb*np.ones((numPoints,1))\n Pr_V = np.zeros((numLines,1))\n \n \n # Initialize gamma and beta: Fractions of valid points and lines respectively: \n gamma = np.zeros_like(Pr_V)\n beta = np.zeros_like(Pr_A)\n \n \n while iterCount < numIter: \n \n # For each line: Compute Gamma:\n \n # Compute expected fraction of valid points: \n gamma = np.dot(Pr_A.T, Pr_C)/np.sum(Pr_A) # Hope the broadcasting works here:\n \n \n # Compute Probability of Validity: \n Pr_V = log_inc(gamma, gamma_tilde) \n \n \n # For each point: Compute beta:\n \n # Compute expected fraction of valid lines in which it is a member: \n beta = np.dot(Pr_C, Pr_V.T)/np.sum(Pr_V)\n \n \n # Compute Probability of Validity: \n Pr_A = log_inc(beta, beta_tilde)\n \n \n iterCount +=1\n \n # Sort the lines according to Probability of Validity:\n idx_sort = np.argsort(Pr_V, axis=1)\n \n print (\" The equations of candidate lines and their probability of validity are: \") \n\n \n for idx in idx_sort: \n print (lines[int(idx)] , end = '-- >')\n print (Pr_V[idx])\n \n return lines, Pr_A, Pr_V",
"def get_points(self, npoints: int):\n\n R = sorted(np.random.rand(npoints) * 2. * np.pi)\n\n xx = self.cx + self.a * np.cos(R) * np.cos(self.angle_rad) - self.b * np.sin(R) * np.sin(\n self.angle_rad)\n\n yy = self.cy + self.a * np.cos(R) * np.sin(self.angle_rad) + self.b * np.sin(R) * np.cos(\n self.angle_rad)\n\n return R, xx, yy",
"def drawPoints(self, points, color):\n for p in points:\n Point\n p.color = color\n p.radius = self.points_radius\n p.conversion = False\n p.show(self.context)",
"def MakePoints(xStart, xEnd, numPoints):\n if len(xStart) != 3 or len(xEnd) != 3:\n raise Exception(\"Start and end point must be 3-dimensional vectors\")\n if numPoints < 2:\n raise Exception(\"At least two points are required\")\n \n # Starting Points\n pt_list = []\n x = xStart[0]\n y = xStart[1]\n z = xStart[2]\n\n # How much we add/subtract between each interpolated point\n x_steps = (xEnd[0] - xStart[0])/(numPoints-1)\n y_steps = (xEnd[1] - xStart[1])/(numPoints-1)\n z_steps = (xEnd[2] - xStart[2])/(numPoints-1)\n\n # Incrementally add to each point until the end point is reached\n for i in range(numPoints):\n point_i = [x,y,z] # create a point\n #append the point to the list\n pt_list.append(point_i)\n x = x + x_steps\n y = y + y_steps\n z = z + z_steps\n return pt_list",
"def get_dots(self):\n logging.debug('Generate dots to draw')\n gc = self.coordinates\n coords = []\n zmin = ymin = xmin = self.fmin = 999999\n self.fmax = 0\n for line in gc:\n temp = [None, None, None, None] # X, Y, Z, Feedrate\n for c in line:\n if c.startswith('X'):\n temp[0] = float(c[1:])\n xmin = min(xmin, temp[0])\n elif c.startswith('Y'):\n temp[1] = float(c[1:])\n ymin = min(ymin, temp[1])\n elif c.startswith('Z'):\n temp[2] = float(c[1:])\n zmin = min(zmin, temp[2])\n elif c.startswith('F'):\n temp[3] = int(float(c[1:]))\n self.fmin = min(self.fmin, temp[3])\n self.fmax = max(self.fmax, temp[3])\n if ((temp[0] is not None) or (temp[1] is not None) or\n (temp[2] is not None) or (temp[3] is not None)):\n if coords:\n if temp[0] is None:\n temp[0] = coords[-1][0]\n if temp[1] is None:\n temp[1] = coords[-1][1]\n if temp[2] is None:\n temp[2] = coords[-1][2]\n if temp[3] is None:\n temp[3] = coords[-1][3]\n coords.append(temp)\n\n if (self.fmin == 999999) or (self.fmax == 0):\n raise GcodeError('Please check feedrate')\n if (xmin == ymin == zmin == 999999):\n raise GcodeError('Please check coordinates')\n if xmin == 999999:\n xmin = 0\n if ymin == 999999:\n ymin = 0\n if zmin == 999999:\n zmin = 0\n\n for i in coords: # if something is still 0\n if i[0] is None:\n i[0] = xmin\n if i[1] is None:\n i[1] = ymin\n if i[2] is None:\n i[2] = zmin\n if i[3] is None:\n i[3] = self.fmin\n i[0] -= xmin\n i[1] -= ymin\n i[2] -= zmin\n i[3] -= self.fmin\n\n self.fmax -= self.fmin\n self.colors_list = grad(MIN_COLOR, MAX_COLOR, self.fmax+1)\n\n dots = []\n for i in range(len(coords)):\n temp = []\n if i != len(coords)-1:\n temp = self.getColorLine(coords[i], coords[i+1])\n if temp:\n dots.extend(temp)\n\n return dots",
"def _gather_points(self):\n # This is just a stub for now. We should really find the lines only\n # inside the screen range here.\n\n x = self.index.get_data()\n y = self.value.get_data()\n rad= min(self.width/2.0,self.height/2.0)\n sx = x*rad+ self.x + self.width/2.0\n sy = y*rad+ self.y + self.height/2.0\n\n points = transpose(array((sx,sy)))\n self._cached_data_pts = points\n self._cache_valid = True\n return",
"def __init__(self, lines):\n\t\tself.lines = lines\n\t\tself.points = set()\n\t\tfor l in lines:\n\t\t\tif not l.a in self.points:\n\t\t\t\tself.points.add(l.a)\n\t\t\tif not l.b in self.points:\n\t\t\t\tself.points.add(l.b)",
"def lines(self):\n for pair in pairs(self.points):\n yield Line(pair, shape=self)",
"def fill_points_list(filename):\n f = open(input_file_test(filename), \"r\")\n\n dimension = find_dimesion(filename)\n points = list()\n line_count = 1\n flag = False\n for line in f:\n current_point = line.split()\n\n if dimension == len(current_point):\n check_if_number(current_point)\n point = Point(points=current_point, line=line_count)\n points.append(point)\n\n line_count += 1\n else:\n flag=True\n break\n\n if flag:\n print PointError()\n sys.exit()\n\n if len(points) ==1:\n print NotEnoughPointError()\n sys.exit()\n\n f.close()\n\n return points",
"def gen_test_points(n=50, extent=(0,0,100,100), rand_seed=None):\n if rand_seed:\n random.seed(rand_seed)\n return [(random.randint(extent[0], extent[2]), random.randint(extent[1], extent[3]))\n for i in xrange(n)]",
"def __init__(self, points):\n self.points = points\n self.lines = []\n\n orientation = 1\n for i, point in enumerate(self.points):\n try:\n if points[i+1].x > point.x:\n orientation = orientation\n else:\n orientation = - 1\n point.orientation = orientation\n self.points[i+1].orientation = orientation\n self.lines.append(Line(point, self.points[i+1]))\n except IndexError:\n point.orientation = orientation\n self.lines.append(Line(point, self.points[0]))",
"def give_rand_points(n_points, xmin, xmax, ymin, ymax, n_dim=2):\n random_points = np.random.rand(n_points, n_dim)\n random_points[:, 0] = random_points[:, 0]*(xmax-xmin)+xmin\n random_points[:, 1] = random_points[:, 1]*(ymax-ymin)+ymin\n\n return random_points",
"def DrawPointList(self, points, pens=None):\n if pens is None:\n pens = []\n elif isinstance(pens, wx.Pen):\n pens = [pens]\n elif len(pens) != len(points):\n raise ValueError('points and pens must have same length')\n return self._DrawPointList(points, pens, [])",
"def scatter_points(n):\r\n P1 = np.random.randn(int(np.ceil(n/2)), 2) - 4\r\n P2 = 3 * np.random.rand(int(np.ceil(n/4)), 2) - np.array([10, 0])\r\n P3 = np.random.randn(int(np.ceil(n/4)), 2) + 3\r\n \"\"\"\r\n P1=np.floor(P1)\r\n P2=np.floor(P2)\r\n P3=np.floor(P3)\r\n \"\"\"\r\n L = list(np.concatenate((P1,P2,P3), axis=0))\r\n \r\n return L \r\n #return no_dupli(L)\r",
"def read_points():\n\tpoints = []\n\tf = open(r'sample_points.txt')\n\twhile True:\n\t\tnstr = f.readline()\n\t\tif len(nstr) == 0:\n\t\t\tbreak\n\t\tline = nstr.rstrip('\\n').split(', ')\n\t\t# print(line)\n\n\t\tpoints.append((round(float(line[0]),3),round(float(line[1]),3))) \n\n\tprint(points)\n\treturn points",
"def generate_interpolated_points(point1, point2):\n points = connect(np.array([point2, point1]))\n return set(map(tuple, points))",
"def vertices_from_lines(lines):\n count = len(lines)\n print(\"Getting vertices 1/3\")\n pb = pbar.ProgressBar(count)\n vertices = []\n# print(\"getting vertices from line\")\n for line in lines:\n pb +=1\n vertices.extend(list(line.coords))\n del pb\n return [Point(p) for p in set(vertices)]",
"def parse_points(lines):\n lines = deque(lines)\n current = []\n while lines:\n line = lines.popleft().strip()\n if line:\n current.append(Point.parse(line))\n else:\n yield current\n current = []\n\n if current:\n yield current",
"def test_points_calculation(self):\n\n assert self.test_shape.points == [\n (1030.0, 525.0),\n (1030.0, 475.0),\n (970.0, 475.0),\n (970.0, 525.0),\n ]",
"def _getHLinesPoints(self, character, size):\n if character in _HORIZONTAL_MAP.keys():\n hline_map = _HORIZONTAL_MAP[character] #(3 dimension tuple)\n else:\n hline_map = _HORIZONTAL_MAP['*']\n \n # i*(size+1) fit the position series in the rows\n hline_points = [(i*(size+1) ,1) for i in range(3) if hline_map[i] == 1]\n return hline_points"
] | [
"0.6171076",
"0.61004645",
"0.60166216",
"0.5971953",
"0.5899086",
"0.5896352",
"0.5832605",
"0.58060277",
"0.5772896",
"0.57310075",
"0.57118297",
"0.5706741",
"0.5691438",
"0.5668014",
"0.566632",
"0.564936",
"0.5631067",
"0.56161624",
"0.56137055",
"0.55902743",
"0.55628055",
"0.5561844",
"0.55462",
"0.55447054",
"0.5517833",
"0.55101854",
"0.5478493",
"0.5452622",
"0.54497",
"0.5447443"
] | 0.6206707 | 0 |
This function call from contact. | def call_from_contact(self):
log_test_case(self.name, 'call_from_contact')
#lick_textview_by_text(SC.PRIVATE_CONTACT_NUMBER)
click_textview_by_id('primary_action_view')
sleep(1)
goback()
sleep(3)
return | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def call(self):",
"def force_contact(self, *args, **kwargs) -> Any:\n pass",
"def call(self, callee: \"SIPPhoneTemplate\") -> None:",
"def call(self) -> global___Snippet.ClientCall:",
"def call(self) -> global___Snippet.ClientCall:",
"def receiveContactList(self, contactList):",
"def moment_contact(self, *args, **kwargs) -> Any:\n pass",
"def test_get_contact(self):\n pass",
"def on_contact(self, update, context):\n user = update.effective_user\n chat_id = update.effective_chat.id\n phone = update.message.contact.phone_number\n log.info(\n \"TEL from %s, %s, @%s, %s\", user.username, user.full_name, chat_id, phone,\n )\n\n # Here's an example of what else you can find in update['message'].contact.to_dict()\n # {'phone_number': '+4500072470000', 'first_name': 'Alex', 'user_id': 253150000}\n # And some user-related details in update.effective_user.to_dict()\n # {'first_name': 'Alex', 'id': 253150000, 'is_bot': False, 'language_code': 'en', 'username': 'ralienpp'}\n\n # Tell the backend about it, such that from now on it knows which chat_id corresponds to this user\n known_user = self.backend.link_chatid_to_volunteer(\n user.username, update.effective_chat.id, phone\n )\n\n if known_user:\n # Mark the user as available once onboarding is complete\n context.user_data[\"state\"] = c.State.AVAILABLE\n # Acknowledge receipt and tell the user that we'll contact them when new requests arrive\n update.message.reply_text(c.MSG_STANDBY)\n return\n\n # If we got this far, this is a completely new person who initiated the registration process via the bot, it is\n # time to ask them a few things and build a profile\n self.build_profile(update, context, phone=phone)",
"def phone_start(self) -> None:",
"def showEditContact(self):",
"def __call__(self):\n\t\treturn",
"def call(self, *args, **kwargs):",
"def __call__( self ):\n pass",
"def before_send(self):",
"def make_phone_call(self):\n client = Client(account_sid, auth_token)\n\n call = client.calls.create(\n url='http://demo.twilio.com/docs/classic.mp3',\n to=self.emergency_number,\n from_='+16505499680'\n )\n\n print(call.sid)",
"def call_cell_phone(self, sender, message):\n if self.cell_phone:\n phone_call.call(sender, message, self.cell_phone)",
"def Run(self):\n return self.ListAllContacts()",
"def after_send(self):",
"def on_success(self) -> None:",
"async def on_call(message, client):\n pass",
"def __call__():",
"def __call__():",
"def __call__():",
"def __call__():",
"def __call__():",
"def call_home_phone(self, sender, message):\n if self.home_phone:\n phone_call.call(sender, message, self.home_phone)",
"def call(self):\n self.call() # Call a function",
"def run(self): \r\n return",
"def on_success(self):\n pass"
] | [
"0.68409646",
"0.660355",
"0.6514977",
"0.6372869",
"0.6372869",
"0.6274491",
"0.6248815",
"0.61637676",
"0.61566913",
"0.60917664",
"0.5976085",
"0.59733593",
"0.59509075",
"0.58956426",
"0.5874206",
"0.58698475",
"0.585964",
"0.58154577",
"0.580931",
"0.58043754",
"0.5796055",
"0.57956487",
"0.57956487",
"0.57956487",
"0.57956487",
"0.57956487",
"0.5699766",
"0.5699522",
"0.5697437",
"0.5690947"
] | 0.7567253 | 0 |
Assign slots for organizations within a program Gets the slot assignment data as a JSON string from the program and enqueues a task to process the slot assignments | def assignProgramSlots(request, *args, **kwargs):
program = None
params = request.REQUEST
# Query the program entity
try:
program = program_logic.getFromKeyName(params["programkey"])
except KeyError:
logging.error("programkey not in params")
return responses.terminateTask()
if not program:
logging.error("no such program '%s'" % params["programkey"])
return responses.terminateTask()
if not program.slots_allocation:
logging.error("empty slots_allocation")
return responses.terminateTask()
# Enqueue a task to assign the slots
taskqueue.add(
url = "/gsoc/tasks/assignslots/assign",
params = {
'programkey': params["programkey"],
})
# Return successful
return responses.terminateTask() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def assignSlots(request, *args, **kwargs):\n\n # Setup an artifical request deadline\n timelimit = int(request.REQUEST.get(\"timelimit\", 20000))\n timekeeper = Timekeeper(timelimit)\n\n program_key = request.REQUEST.get(\"programkey\")\n last_key = request.REQUEST.get(\"lastkey\", \"\")\n program = program_logic.getFromKeyName(program_key)\n\n # Copy for modification below\n params = request.POST.copy()\n params[\"timelimit\"] = timelimit\n\n # Parse the JSON org:slots dictionary\n slots = simplejson.loads(program.slots_allocation)\n org_keys = [i for i in sorted(slots.keys()) if i > last_key]\n logging.info(org_keys)\n\n # Assign slots for each organization\n try:\n for clock, org_key in timekeeper.iterate(org_keys):\n logging.info(\"%s %s %s\", request.path, clock, org_key)\n\n org_slots = slots[org_key]\n # Get the organization entity\n org = org_logic.getFromKeyFields({\n 'link_id': org_key,\n 'scope_path': program_key,\n })\n\n if not org:\n logging.error(\"no such org '%s'/'%s'\" % (program_key, org_key))\n continue\n\n # Count proposals and mentors\n org.slots = int(org_slots['slots'])\n org.nr_applications, org.nr_mentors = countProposals(org)\n\n # Update the organization entity\n org.put()\n\n # Mark the organization as done\n last_key = org_key\n\n # Requeue this task for continuation\n except DeadlineExceededError:\n params[\"lastkey\"] = last_key\n taskqueue.add(url=request.path, params=params)\n\n # Exit this task successfully\n return responses.terminateTask()",
"def schedule_slot(data):\n firebase_uid = data[\"session\"].split(\"/\")[-1]\n db = firebase.database()\n ticket_id = data[\"queryResult\"][\"parameters\"][\"ticket_id\"]\n try:\n complaint = db.child(\"user_data\").child(firebase_uid).child(\"Complaints\").child(ticket_id).get().val()\n if complaint[\"Time Slots\"][\"Slot 1\"][\"Date\"] == \"0\":\n message = \"No time slots have been allotted yet. You can either check back with me in some time or go to the \" \\\n \"\\\"Tickets\\\" section of the app to stay updated. \"\n else:\n message = \"Available Time Slots: \\n\" + \\\n \"\\t\\tSlot 1 - \" + \\\n \"\\n\\t\\t\\t\\tDate: \" + complaint[\"Time Slots\"][\"Slot 1\"][\"Date\"] + \\\n \"\\n\\t\\t\\t\\tTime: \" + complaint[\"Time Slots\"][\"Slot 1\"][\"Time\"] + \\\n \"\\n\\t\\tSlot 2 - \" + \\\n \"\\n\\t\\t\\t\\tDate: \" + complaint[\"Time Slots\"][\"Slot 2\"][\"Date\"] + \\\n \"\\n\\t\\t\\t\\tTime: \" + complaint[\"Time Slots\"][\"Slot 2\"][\"Time\"] + \\\n \"\\n\\t\\tSlot 3 - \" + \\\n \"\\n\\t\\t\\t\\tDate: \" + complaint[\"Time Slots\"][\"Slot 3\"][\"Date\"] + \\\n \"\\n\\t\\t\\t\\tTime: \" + complaint[\"Time Slots\"][\"Slot 3\"][\"Time\"] + \"\\n\"\n message += \"Which time slot do you choose? Please enter \\\"1\\\" for Slot-1 and so on.\"\n except:\n message = \"I think you have entered an incorrect Ticket ID.\"\n response = {\n \"fulfillmentText\": message\n }\n return response",
"def set_available_time_slot():\n if request.content_type != 'application/json':\n error = json.dumps({'error': 'Invalid Content Type'})\n return make_response(error, 400, InterviewCalendarApi.HEADERS)\n\n data = request.json\n # For Temporary purpose, stored in flat file database\n with open(InterviewCalendarApi.DB_FILE, \"a+\") as fd:\n record = \"%s|%s|%s|%s\\n\" %(data[\"Category\"], data[\"Name\"],\n data[\"Email\"], \",\".join(data[\"AvailablityDateTime\"]))\n fd.write(record)\n msg = json.dumps({\"Status\": \"Success\"})\n return make_response(msg, 200, InterviewCalendarApi.HEADERS)",
"def set_resources():\n global available_resources\n global EdgenodeResources\n recv_json = request.get_json()\n for resourcename, value in recv_json.items():\n available_resources[resourcename] = value\n # TODO make this better\n EdgenodeResources = [TaskResources(ram=int(available_resources['RAM']), cpu=int(\n available_resources['CPU']), hdd=int(available_resources['HDD'])), available_resources['DATA']]\n\n print 'Available resources set to', EdgenodeResources\n return 'Available resources set to ' + str(available_resources)",
"def choose_slot(data):\n firebase_uid = data[\"session\"].split(\"/\")[-1]\n db = firebase.database()\n slot = data[\"queryResult\"][\"parameters\"][\"slot\"]\n for i in data[\"queryResult\"][\"outputContexts\"]:\n if \"ticket-id\" in i[\"name\"]:\n ticket_id = i[\"parameters\"][\"ticket_id\"]\n db.child(\"user_data\").child(firebase_uid).child(\"Complaints\").child(ticket_id).child(\"Time Slot Chosen\").set(str(int(slot)))\n break\n response = {\n \"fulfillmentText\": \"I have updated your preference.\"\n }\n return response",
"def get_available_time_slot():\n try:\n time_slot_set_list = list()\n # Read all time slot from database\n with open(InterviewCalendarApi.DB_FILE, \"r\") as fd:\n for line in fd:\n time_slot_list = list()\n (_,_,_, time_slots) = line.strip().split(\"|\")\n for time_slot in time_slots.split(\",\"):\n (from_time_slot, to_time_slot) = list(map(int, time_slot.split(\"-\")))\n time_slot_list.extend(range(from_time_slot, (to_time_slot + 1)))\n # Get all available time slot for every user\n time_slot_set_list.append(set(time_slot_list))\n \n # Find common time slot between multiple parties\n available_slots = list(set.intersection(*time_slot_set_list))\n\n msg = json.dumps({\"Status\": \"Success\", \"available_slots\": available_slots})\n return make_response(msg, 200, InterviewCalendarApi.HEADERS)\n except:\n err_msg = sys.exc_info()\n error = json.dumps({'error': 'Unable to find time slot due to error: %s' %str(err_msg)})\n return make_response(error, 401, InterviewCalendarApi.HEADERS)",
"def schedule_meeting(intent_request):\n \n meeting_person = intent_request['currentIntent']['slots']['Person']\n meeting_type = intent_request['currentIntent']['slots']['MeetingType']\n meeting_date = intent_request['currentIntent']['slots']['Date']\n meeting_time = intent_request['currentIntent']['slots']['Time']\n meeting_duration = intent_request['currentIntent']['slots']['Duration']\n meeting_address = intent_request['currentIntent']['slots']['Address']\n invitation_link = intent_request['currentIntent']['slots']['InvitationLink']\n phone_number = intent_request['currentIntent']['slots']['Phone']\n source = intent_request['invocationSource']\n output_session_attributes = intent_request['sessionAttributes'] if intent_request['sessionAttributes'] is not None else {}\n booking_map = json.loads(try_ex(lambda: output_session_attributes['bookingMap']) or '{}')\n\n if source == 'DialogCodeHook':\n # Perform basic validation on the supplied input slots.\n slots = intent_request['currentIntent']['slots']\n validation_result = validate_schedule_meeting(meeting_duration, date, meeting_time)\n if not validation_result['isValid']:\n slots[validation_result['violatedSlot']] = None\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n validation_result['violatedSlot'],\n validation_result['message']\n )\n\n if not meeting_person:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Person',\n {'contentType': 'PlainText', 'content': 'Who is gonna be that with?'}\n )\n \n if meeting_person and not meeting_type:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'MeetingType',\n {'contentType': 'PlainText', 'content': 'What type of meeting would you like to schedule?'}\n )\n\n if meeting_person and meeting_type and not meeting_date:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Date',\n {'contentType': 'PlainText', 'content': 'When would you like to schedule your {} ?'.format(meeting_type)}\n )\n\n if meeting_type and meeting_date:\n # Fetch or generate the availabilities for the given date.\n booking_availabilities = try_ex(lambda: booking_map[meeting_date])\n if booking_availabilities is None:\n booking_availabilities = get_availabilities(meeting_date)\n booking_map[meeting_date] = booking_availabilities\n output_session_attributes['bookingMap'] = json.dumps(booking_map)\n\n meeting_type_availabilities = get_availabilities_for_duration(get_duration(meeting_type), booking_availabilities)\n if len(meeting_type_availabilities) == 0:\n # No availability on this day at all; ask for a new date and time.\n slots['Date'] = None\n slots['Time'] = None\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n 'Date',\n {'contentType': 'PlainText', 'content': 'There is not any availability on that date, is there another day which works for you?'}\n )\n\n message_content = 'What time on {} works for you? '.format(meeting_date)\n if meeting_time:\n output_session_attributes['formattedTime'] = build_time_output_string(meeting_time)\n # Validate that proposed time for the meeting can be booked by first fetching the availabilities for the given day. To\n # give consistent behavior in the sample, this is stored in sessionAttributes after the first lookup.\n if is_available(meeting_time, get_duration(meeting_type), booking_availabilities):\n return delegate(output_session_attributes, slots)\n message_content = 'The time you requested is not available. '\n\n if len(meeting_type_availabilities) == 1:\n # If there is only one availability on the given date, try to confirm it.\n slots['Time'] = meeting_type_availabilities[0]\n return confirm_intent(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n {\n 'contentType': 'PlainText',\n 'content': '{}{} is our only availability, does that work for you?'.format\n (message_content, build_time_output_string(meeting_type_availabilities[0]))\n },\n build_response_card(\n 'Confirm Meeting',\n 'Is {} on {} okay?'.format(build_time_output_string(meeting_type_availabilities[0]), date),\n [{'text': 'yes', 'value': 'yes'}, {'text': 'no', 'value': 'no'}]\n )\n )\n\n available_time_string = build_available_time_string(meeting_type_availabilities)\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n slots,\n 'Time',\n {'contentType': 'PlainText', 'content': '{}{}'.format(message_content, available_time_string)},\n build_response_card(\n 'Specify Time',\n 'What time works best for you?',\n build_options('Time', meeting_type, meeting_date, booking_map)\n )\n )\n \n if meeting_type = 'online' and meeting_person and meeting_date and meeting_time and not invitation_link:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'InvitationLink',\n {'contentType': 'PlainText', 'content': 'Can you paste your invitation link in here, please?'}\n )\n \n if (meeting_type = 'personal' or meeting_type = 'inperson') and meeting_person and meeting_date and meeting_time and not meeting_address:\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Address',\n {'contentType': 'PlainText', 'content': 'Where the {} will take place?', .format(meeting_type)}\n )\n \n if meeting_person and meeting_type and meeting_date and meeting_time and (invitation_link or meeting_address) and not contact_phone\"\n return elicit_slot(\n output_session_attributes,\n intent_request['currentIntent']['name'],\n intent_request['currentIntent']['slots'],\n 'Phone',\n {'contentType': 'PlainText', 'content': 'Can you leave your contact phone number here, please?'}\n\n return delegate(output_session_attributes, slots)\n \n \n \"\"\" --- Check avalibility --- \"\"\"\n\n\n # Book the meeting.\n booking_availabilities = booking_map[meeting_date]\n if booking_availabilities:\n # Remove the availability slot for the given date as it has now been booked.\n booking_availabilities.remove(meeting_time)\n if meeting_duration == 60:\n second_half_hour_time = increment_time_by_thirty_mins(meeting_time)\n booking_availabilities.remove(second_half_hour_time)\n\n booking_map[date] = booking_availabilities\n output_session_attributes['bookingMap'] = json.dumps(booking_map)\n else:\n # This is not treated as an error as this code sample supports functionality either as fulfillment or dialog code hook.\n logger.debug('Availabilities for {} were null at fulfillment time. '\n 'This should have been initialized if this function was configured as the dialog code hook'.format(meeting_date))\n\n return close(\n output_session_attributes,\n 'Fulfilled',\n {\n 'contentType': 'PlainText',\n 'content': 'Okay, I have booked your meeting. See you at {} on {}'.format(build_time_output_string(meeting_time), meeting_date)\n }\n )",
"def save_slot(slot_dict, slot_name):\n output_path = f'{SLOTS_PATH}{slot_name}.json'\n with open(output_path, 'w') as fh:\n fh.writelines(json.dumps(slot_dict))",
"def _reserve_bsa_slot(self):\n self._run_cmd(CAPUT + \" IOC:IN20:EV01:EDEFNAME \" + '\"' + self._edef_name + '\"')",
"def _get_appointment_slots(self, timezone, employee=None):\n self.ensure_one()\n appt_tz = pytz.timezone(self.appointment_tz)\n requested_tz = pytz.timezone(timezone)\n first_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(hours=self.min_schedule_hours))\n last_day = requested_tz.fromutc(datetime.utcnow() + relativedelta(days=self.max_schedule_days))\n\n # Compute available slots (ordered)\n slots = self._slots_generate(first_day.astimezone(appt_tz), last_day.astimezone(appt_tz), timezone)\n if not employee or employee in self.employee_ids:\n self._slots_available(slots, first_day.astimezone(pytz.UTC), last_day.astimezone(pytz.UTC), employee)\n\n # Compute calendar rendering and inject available slots\n today = requested_tz.fromutc(datetime.utcnow())\n start = today\n month_dates_calendar = cal.Calendar(0).monthdatescalendar\n months = []\n while (start.year, start.month) <= (last_day.year, last_day.month):\n dates = month_dates_calendar(start.year, start.month)\n for week_index, week in enumerate(dates):\n for day_index, day in enumerate(week):\n mute_cls = weekend_cls = today_cls = None\n today_slots = []\n if day.weekday() in (cal.SUNDAY, cal.SATURDAY):\n weekend_cls = 'o_weekend'\n if day == today.date() and day.month == today.month:\n today_cls = 'o_today'\n if day.month != start.month:\n mute_cls = 'text-muted o_mute_day'\n else:\n # slots are ordered, so check all unprocessed slots from until > day\n while slots and (slots[0][timezone][0].date() <= day):\n if (slots[0][timezone][0].date() == day) and ('employee_id' in slots[0]):\n today_slots.append({\n 'employee_id': slots[0]['employee_id'].id,\n 'datetime': slots[0][timezone][0].strftime('%Y-%m-%d %H:%M:%S'),\n 'hours': slots[0][timezone][0].strftime('%H:%M')\n })\n slots.pop(0)\n dates[week_index][day_index] = {\n 'day': day,\n 'slots': today_slots,\n 'mute_cls': mute_cls,\n 'weekend_cls': weekend_cls,\n 'today_cls': today_cls\n }\n\n months.append({\n 'month': format_datetime(start, 'MMMM Y', locale=get_lang(self.env).code),\n 'weeks': dates\n })\n start = start + relativedelta(months=1)\n return months",
"def slot(self,num):\n if num in ApexAP1000.SLOTS:\n self.__slot=num\n else:\n raise ValueError('Bad slot number !')",
"def assignTaskQuotasGet(self, request, context, org_params,\n page_name, params, entity, **kwargs):\n\n from soc.modules.ghop.views.models.organization import view as org_view\n \n logic = params['logic']\n program_entity = logic.getFromKeyFieldsOr404(kwargs)\n \n org_params['list_template'] = ('modules/ghop/program/'\n 'allocation/allocation.html')\n org_params['list_heading'] = ('modules/ghop/program/'\n 'allocation/heading.html')\n org_params['list_row'] = 'modules/ghop/program/allocation/row.html'\n org_params['list_pagination'] = 'soc/list/no_pagination.html'\n org_params['list_description'] = self.DEF_TASK_QUOTA_ALLOCATION_MSG\n# TODO(LIST)\n\n return self.list(request, 'any_access', page_name=page_name, params=org_params)",
"def choose_time_slot():\n req = request.json\n firebase_uid = req[\"firebase_uid\"]\n ticket_id = req[\"complaint_id\"]\n db = firebase.database()\n db.child(\"user_data\").child(\n firebase_uid).child(\n \"Complaints\").child(\n ticket_id).update({\"Time Slot Chosen\": req[\"time_slot\"]\n })\n return jsonify({\"Status\": \"200\", \"Message\": \"successfully chosen time\"})",
"def _slots_generate(self, first_day, last_day, timezone):\n def append_slot(day, slot):\n local_start = appt_tz.localize(datetime.combine(day, time(hour=int(slot.hour), minute=int(round((slot.hour % 1) * 60)))))\n local_end = appt_tz.localize(\n datetime.combine(day, time(hour=int(slot.hour), minute=int(round((slot.hour % 1) * 60)))) + relativedelta(hours=self.appointment_duration))\n slots.append({\n self.appointment_tz: (\n local_start,\n local_end,\n ),\n timezone: (\n local_start.astimezone(requested_tz),\n local_end.astimezone(requested_tz),\n ),\n 'UTC': (\n local_start.astimezone(pytz.UTC).replace(tzinfo=None),\n local_end.astimezone(pytz.UTC).replace(tzinfo=None),\n ),\n 'slot': slot,\n })\n appt_tz = pytz.timezone(self.appointment_tz)\n requested_tz = pytz.timezone(timezone)\n\n slots = []\n for slot in self.slot_ids.filtered(lambda x: int(x.weekday) == first_day.isoweekday()):\n if slot.hour > first_day.hour + first_day.minute / 60.0:\n append_slot(first_day.date(), slot)\n slot_weekday = [int(weekday) - 1 for weekday in self.slot_ids.mapped('weekday')]\n for day in rrule.rrule(rrule.DAILY,\n dtstart=first_day.date() + timedelta(days=1),\n until=last_day.date(),\n byweekday=slot_weekday):\n for slot in self.slot_ids.filtered(lambda x: int(x.weekday) == day.isoweekday()):\n append_slot(day, slot)\n return slots",
"def schedulesiderooms(self, field):\n sande = list(filter(lambda stu: stu.sande, field))\n cit = list(filter(lambda stu: stu.citizen, field))\n\n # creates pools of players for sports and entertainemnt\n poolsande = [[] for _ in self.sandeschedule]\n for player in sande:\n for event in player.schedule:\n if event[0] == \"Sports & Entertain. Bee Buzzer Round\":\n poolsande[self.sandeschedule.index(event[1])].append(player)\n if event[0] == \"Sports & Entertainemnt Exam\":\n self.csarooms[self.csaexamschedule.index(event[1])][1].addplayer(player)\n event[2] = \"Exam Room\"\n\n # divides pool\n eig1 = list(filter(lambda stu: stu.division == '8', poolsande[0]))\n eig2 = list(filter(lambda stu: stu.division == '8', poolsande[1]))\n sev1 = list(filter(lambda stu: stu.division == '7', poolsande[0]))\n sev2 = list(filter(lambda stu: stu.division == '7', poolsande[1]))\n elm1 = list(filter(lambda stu: stu.division == 'Elementary', poolsande[0]))\n elm2 = list(filter(lambda stu: stu.division == 'Elementary', poolsande[1]))\n\n # puts players into rooms\n rn = list(reversed(self.usablerooms))\n self.sideroomhelp(rn, eig1, self.sanderooms[0])\n self.sideroomhelp(rn, sev1, self.sanderooms[0])\n self.sideroomhelp(rn, elm1, self.sanderooms[0])\n\n rn = list(reversed(self.usablerooms))\n self.sideroomhelp(rn, eig2, self.sanderooms[1])\n self.sideroomhelp(rn, sev2, self.sanderooms[1])\n self.sideroomhelp(rn, elm2, self.sanderooms[1])\n\n # create pools of players for citizenship bee\n poolcit = [[] for _ in self.citizenschedule]\n for player in cit:\n for event in player.schedule:\n if event[0] == \"Citizenship Bee Buzzer Round\":\n poolcit[self.citizenschedule.index(event[1])].append(player)\n if event[0] == \"Citizenship Bee Exam\":\n self.csarooms[self.csaexamschedule.index(event[1])][0].addplayer(player)\n event[2] = \"Exam Room\"\n\n # divides pool\n eig1 = list(filter(lambda stu: stu.division == '8', poolcit[0]))\n eig2 = list(filter(lambda stu: stu.division == '8', poolcit[1]))\n sev1 = list(filter(lambda stu: stu.division == '7', poolcit[0]))\n sev2 = list(filter(lambda stu: stu.division == '7', poolcit[1]))\n elm1 = list(filter(lambda stu: stu.division == 'Elementary', poolcit[0]))\n elm2 = list(filter(lambda stu: stu.division == 'Elementary', poolcit[1]))\n\n # puts players into rooms\n rn = list(reversed(self.usablerooms))\n self.sideroomhelp(rn, eig1, self.citizenrooms[0])\n self.sideroomhelp(rn, sev1, self.citizenrooms[0])\n self.sideroomhelp(rn, elm1, self.citizenrooms[0])\n\n rn = list(reversed(self.usablerooms))\n self.sideroomhelp(rn, eig2, self.citizenrooms[1])\n self.sideroomhelp(rn, sev2, self.citizenrooms[1])\n self.sideroomhelp(rn, elm2, self.citizenrooms[1])",
"def Scheduler():\n courses = \"cs108 cs112 cs214 stat343 cs336 cs300\".split()\n profs = \"norman adams schuurman pruim vanderlinden\".split()\n slots = \"mwf900 mwf1130 tth1030 tth130\".split()\n rooms = \"sb354 nh064\".split()\n \n variables = courses\n assignments = {}\n assignments['cs108'] = \"norman\"\n assignments['cs112'] = \"adams\"\n assignments['cs214'] = \"adams\"\n assignments['stat343'] = \"pruim\"\n assignments['cs336'] = \"vanderlinden\"\n assignments['cs300'] = \"schuurman\"\n neighbors = parse_neighbors(\"\"\"\n cs108: norman; cs112: adams; \n cs214: adams; stat343: pruim; \n cs336: vanderlinden; cs300: schuurman\n \"\"\", variables)\n domains = {}\n for course in courses:\n domains[course] = []\n for course in courses:\n for prof in profs:\n for room in rooms:\n for slot in slots:\n domains[course].append(prof + \" \" + room + \" \" + slot)\n \n for type in [courses]:\n for A in type:\n for B in type:\n if A != B:\n if B not in neighbors[A]:\n neighbors[A].append(B)\n if A not in neighbors[B]:\n neighbors[B].append(A)\n\n def scheduler_constraints(A, a, B, b, recurse=0):\n ADomain = a.split()\n BDomain = b.split()\n A_Prof = ADomain[0]\n B_Prof = BDomain[0]\n A_Room = ADomain[1]\n B_Room = BDomain[1]\n A_Slot = ADomain[2]\n B_Slot = BDomain[2]\n A_Course = A\n B_Course = B\n \n if(A_Prof == B_Prof and A_Slot == B_Slot):\n return False\n if(A_Room == B_Room and A_Slot == B_Slot):\n return False\n\n if('norman' in a and A == 'cs108'):\n return True\n if('adams' in a and A == 'cs112'):\n return True\n if('adams' in a and A == 'cs214'):\n return True\n if('pruim' in a and A == 'stat343'):\n return True\n if('vanderlinden' in a and A == 'cs336'):\n return True\n if('schuurman' in a and A == 'cs300'):\n return True\n if(A in courses and B in courses):\n return False\n if(recurse == 0):\n return scheduler_constraints(B, b, A, a, 1)\n return True\n \n return CSP(variables, domains, neighbors, scheduler_constraints)",
"def post(self):\n request, error_message = flask_request_response.message_request(\n _api_intput_pb2.AssignTask, ASSIGN_TASK_API, POST_REQUEST\n )\n if error_message is not None:\n return flask_request_response.error_response(\n [error_message[\"err_message\"]], ASSIGN_TASK_API, POST_REQUEST\n )\n try:\n app.logger.error(\"In API calling assign_task_query_response\")\n assign_task_response = assign_task_query_response(\n request.assigned_by, request.assigned_to_list,\n request.chapter_key\n )\n app.logger.info(assign_task_response)\n return flask_request_response.json_response(\n assign_task_response,\n ASSIGN_TASK_API, POST_REQUEST, 200\n )\n except Exception as err:\n return flask_request_response.error_response(\n [str(err)], ASSIGN_TASK_API, POST_REQUEST\n )",
"def test_generate_slots_for_interviewer_free_time_with_generated_slots(self):\n client = Client()\n client.login(\n email=self.teacher_admin.email,\n password='123'\n )\n url = reverse('admin:course_interviews_interviewslot_changelist')\n response = client.get(url, follow=True)\n\n result_list_before_slot_generation = response.context_data['cl'].result_list\n\n interview_length = 30\n break_between_interviews = 10\n interview_slots_generator = GenerateInterviewSlots(\n interview_length, break_between_interviews)\n interview_slots_generator.generate_interview_slots()\n\n response = client.get(url, follow=True)\n result_list_after_slot_generation = response.context_data['cl'].result_list\n\n self.assertCountEqual(result_list_before_slot_generation, result_list_after_slot_generation)",
"def addToReservation():\n\n def fits(x, y):\n \"\"\"\n Check if a job shape's resource requirements will fit within a given node allocation\n \"\"\"\n return y.memory <= x.memory and y.cores <= x.cores and y.disk <= x.disk\n\n def subtract(x, y):\n \"\"\"\n Adjust available resources of a node allocation as a job is scheduled within it.\n \"\"\"\n return Shape(x.wallTime, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk)\n\n def split(x, y, t):\n \"\"\"\n Partition a node allocation into two\n \"\"\"\n return (Shape(t, x.memory - y.memory, x.cores - y.cores, x.disk - y.disk),\n NodeReservation(Shape(x.wallTime - t, x.memory, x.cores, x.disk)))\n\n i = 0 # Index of node reservation\n while True:\n # Case a new node reservation is required\n if i == len(nodeReservations):\n x = NodeReservation(subtract(nodeShape, jS))\n nodeReservations.append(x)\n t = nodeShape.wallTime\n while t < jS.wallTime:\n y = NodeReservation(x.shape)\n t += nodeShape.wallTime\n x.nReservation = y\n x = y\n return\n\n # Attempt to add the job to node reservation i\n x = nodeReservations[i]\n y = x\n t = 0\n \n while True:\n if fits(y.shape, jS):\n t += y.shape.wallTime\n \n # If the jS fits in the node allocation from x to y\n if t >= jS.wallTime:\n t = 0\n while x != y:\n x.shape = subtract(x.shape, jS)\n t += x.shape.wallTime\n x = x.nReservation\n assert x == y\n assert jS.wallTime - t <= x.shape.wallTime\n if jS.wallTime - t < x.shape.wallTime:\n x.shape, nS = split(x.shape, jS, jS.wallTime - t)\n nS.nReservation = x.nReservation\n x.nReservation = nS\n else:\n assert jS.wallTime - t == x.shape.wallTime\n x.shape = subtract(x.shape, jS)\n return \n \n # If the job would fit, but is longer than the total node allocation\n # extend the node allocation\n elif y.nReservation == None and x == nodeReservations[i]:\n # Extend the node reservation to accommodate jS\n y.nReservation = NodeReservation(nodeShape)\n \n else: # Does not fit, reset\n x = y.nReservation\n t = 0\n \n y = y.nReservation\n if y is None:\n # Reached the end of the reservation without success so stop trying to\n # add to reservation i\n break\n i += 1",
"def reserve(self):\n assert self.is_available() is True, \"this slot is not available\"",
"def createRoomCallback():\n request_data = {}\n\n # get GET params\n global room_creation_params\n pid = room_creation_params[\"profid\"] # value: id integer\n q = room_creation_params[\"questions\"] # value: list of question obj -> title, choices, dbsrc\n\n # business logic\n # print(pid)\n # print(json.loads(q))\n # translate data to format in model.py tables -> profid, questionid, roomid, question, choices -> qid and rid to be generated in Room.py\n # request_data = {\"profid\": \"\", \"question X\":{\"\":\"\"}, } \n request_data[\"profid\"] = pid\n question_list = []\n for question_obj in json.loads(q):\n translated_qn = {} # create temp question object that stores formatted questions to be added to data to be requested\n translated_qn[\"question\"] = question_obj[\"title\"]\n translated_qn[\"choices\"] = question_obj[\"choices\"]\n question_list.append(translated_qn)\n \n request_data[\"questions\"] = question_list\n print(request_data)\n # send request to Room.py with data to be mutated in graphql\n response = requests.post( room_URL + \"create\", data=json.dumps(request_data) ) \n if response.status_code == 200:\n message = json.dumps(response.json())\n amqp_setup.channel.basic_publish(exchange=amqp_setup.exchangename, routing_key=\"game.activity\", body=message)\n else:\n message = { \"Error\": response.reason, \"Code\": response.status_code }\n amqp_setup.channel.basic_publish(exchange=amqp_setup.exchangename, routing_key=\"game.error\", body=json.dumps(message))\n\n # print response code, get all rooms (to check + to log)\n\n # redirect to manageRoom\n print(\"redirecting to manageRoom now\")\n return redirect(\"https://127.0.0.1:8080/manageRoom\")",
"def generate_slots(request):\n if request.method == 'POST':\n form = BulkCreateSlotsForm(request.POST)\n if not form.is_valid():\n return SlotAdmin.render_bulk_slots_form(request, form)\n\n instr = form.cleaned_data['instrument']\n start_date = form.cleaned_data['start_date']\n start_time = form.cleaned_data['start_time']\n end_time = form.cleaned_data['end_time']\n duration = form.cleaned_data['slot_duration']\n day_count = int(form.cleaned_data['for_the_next'])\n\n total, created = Slot.objects.bulk_create_slots(instr, start_date, start_time, end_time, duration,\n day_count)\n\n if total == created:\n messages.success(request, \"All slots were created successfully.\")\n else:\n messages.warning(request, f\"{created} out of {total} slots created. Some slots may not have been created\"\n f\" due to clashes with existing slots.\")\n return redirect(\"..\")\n else:\n form = BulkCreateSlotsForm()\n return SlotAdmin.render_bulk_slots_form(request, form)",
"def _sc_get_operational_slots(self):\n if self.__verbose_testing:\n print('##### test_sc_get_operational_slots')\n operational_models.OperationalSlot.objects.reset_ids_counter()\n\n # 1) non-existant Spacecraft\n self.assertRaises(\n models.ObjectDoesNotExist,\n jrpc_sc_scheduling.get_operational_slots,\n 0\n )\n\n # 2) basic test, should not generate slots until the GS is added,\n # raising an exception to confirm it\n self.assertEqual(\n jrpc_sc_chs.sc_channel_create(\n spacecraft_id=self.__sc_1_id,\n channel_id=self.__sc_1_ch_1_id,\n configuration=self.__sc_1_ch_1_cfg\n ), True, 'Channel should have been created!'\n )\n self.assertRaises(\n Exception,\n jrpc_sc_scheduling.get_operational_slots,\n self.__sc_1_ch_1_id\n )\n\n # 3) basic test, should generate 2 FREE slots\n self.assertEqual(\n jrpc_gs_chs.gs_channel_create(\n groundstation_id=self.__gs_1_id,\n channel_id=self.__gs_1_ch_1_id,\n configuration=self.__gs_1_ch_1_cfg\n ), True, 'Channel should have been created!'\n )\n\n date_i = misc.get_today_utc() + datetime.timedelta(days=1)\n date_f = misc.get_today_utc() + datetime.timedelta(days=366)\n\n now = misc.get_now_utc()\n s_time = now + datetime.timedelta(minutes=30)\n e_time = now + datetime.timedelta(minutes=45)\n\n jrpc_rules.add_rule(\n self.__gs_1_id, self.__gs_1_ch_1_id,\n db_tools.create_jrpc_daily_rule(\n date_i=date_i,\n date_f=date_f,\n starting_time=s_time,\n ending_time=e_time\n )\n )\n\n actual = jrpc_sc_scheduling.get_operational_slots(self.__sc_1_id)\n expected = {\n self.__sc_1_ch_1_id: {\n self.__gs_1_ch_1_id: {\n segment_serializers.GS_ID_K: self.__sc_1_id,\n jrpc_sch_serial.SLOTS_K: [{\n jrpc_sch_serial.SLOT_IDENTIFIER_K: '1',\n jrpc_sch_serial.STATE_K: operational_models.STATE_FREE,\n jrpc_sch_serial.DATE_START_K: (\n s_time + datetime.timedelta(days=1)\n ).isoformat(),\n jrpc_sch_serial.DATE_END_K: (\n e_time + datetime.timedelta(days=1)\n ).isoformat()\n }, {\n jrpc_sch_serial.SLOT_IDENTIFIER_K: '2',\n jrpc_sch_serial.STATE_K: operational_models.STATE_FREE,\n jrpc_sch_serial.DATE_START_K: (\n s_time + datetime.timedelta(days=2)\n ).isoformat(),\n jrpc_sch_serial.DATE_END_K: (\n e_time + datetime.timedelta(days=2)\n ).isoformat()\n }]\n }\n }\n }\n self.assertEqual(actual, expected, 'Expected different slots!')\n\n # ### clean up\n self.assertTrue(\n jrpc_gs_chs.gs_channel_delete(\n groundstation_id=self.__gs_1_id,\n channel_id=self.__gs_1_ch_1_id\n ),\n 'Could not delete GroundStationChannel = ' + str(\n self.__gs_1_ch_1_id\n )\n )\n self.assertTrue(\n jrpc_sc_chs.sc_channel_delete(\n spacecraft_id=self.__sc_1_id,\n channel_id=self.__sc_1_ch_1_id\n ),\n 'Could not delete SpacecraftChannel = ' + str(\n self.__sc_1_ch_1_id\n )\n )",
"def generate_slot(slot_name, slot_description, slot_raw_filename):\n slot = {\n 'enumerationValues': [],\n \"name\": slot_name,\n \"description\": slot_description\n }\n slot_raw_vals = read_raw_vals(slot_raw_filename)\n for slot_val in slot_raw_vals:\n slot['enumerationValues'].append({'value': slot_val})\n\n return slot",
"def post(self):\n user = self.current_user\n data = self.get_json_body()\n port = int(data.get('port', 0))\n user.spawner.current_port = port\n self.finish(json.dumps({\"message\": \"YarnSpawner port configured\"}))\n self.set_status(201)",
"def _slots_available(self, slots, first_day, last_day, employee=None):\n\n def is_work_available(start_dt, end_dt, intervals):\n \"\"\" check if the slot is contained in the employee's work hours (defined by intervals)\n \"\"\"\n def find_start_index():\n \"\"\" find the highest index of intervals for which the start_date (element [0]) is before (or at) start_dt\n \"\"\"\n def recursive_find_index(lower_bound, upper_bound):\n if upper_bound - lower_bound <= 1:\n if intervals[upper_bound][0] <= start_dt:\n return upper_bound\n return lower_bound\n index = (upper_bound + lower_bound) // 2\n if intervals[index][0] <= start_dt:\n return recursive_find_index(index, upper_bound)\n else:\n return recursive_find_index(lower_bound, index)\n\n if start_dt <= intervals[0][0] - tolerance:\n return -1\n if end_dt >= intervals[-1][1] + tolerance:\n return -1\n return recursive_find_index(0, len(intervals) - 1)\n\n if not intervals:\n return False\n\n tolerance = timedelta(minutes=1)\n start_index = find_start_index()\n if start_index != -1:\n for index in range(start_index, len(intervals)):\n if intervals[index][1] >= end_dt - tolerance:\n return True\n if len(intervals) == index + 1 or intervals[index + 1][0] - intervals[index][1] > tolerance:\n return False\n return False\n\n def is_calendar_available(slot, events, employee):\n \"\"\" Returns True if the given slot doesn't collide with given events for the employee\n \"\"\"\n start_dt = slot['UTC'][0]\n end_dt = slot['UTC'][1]\n\n event_in_scope = lambda ev: (\n fields.Date.to_date(ev.start) <= fields.Date.to_date(end_dt)\n and fields.Date.to_date(ev.stop) >= fields.Date.to_date(start_dt)\n )\n\n for ev in events.filtered(event_in_scope):\n if ev.allday:\n # allday events are considered to take the whole day in the related employee's timezone\n event_tz = pytz.timezone(ev.event_tz or employee.user_id.tz or self.env.user.tz or slot['slot'].appointment_type_id.appointment_tz or 'UTC')\n ev_start_dt = datetime.combine(fields.Date.from_string(ev.start_date), time.min)\n ev_stop_dt = datetime.combine(fields.Date.from_string(ev.stop_date), time.max)\n ev_start_dt = event_tz.localize(ev_start_dt).astimezone(pytz.UTC).replace(tzinfo=None)\n ev_stop_dt = event_tz.localize(ev_stop_dt).astimezone(pytz.UTC).replace(tzinfo=None)\n if ev_start_dt < end_dt and ev_stop_dt > start_dt:\n return False\n elif fields.Datetime.to_datetime(ev.start) < end_dt and fields.Datetime.to_datetime(ev.stop) > start_dt:\n return False\n return True\n\n workhours = {}\n meetings = {}\n\n # With context will be used in resource.calendar to force the referential user\n # for work interval computing to the *user linked to the employee*\n available_employees = [emp.with_context(tz=emp.user_id.tz) for emp in (employee or self.employee_ids)]\n random.shuffle(available_employees)\n for slot in slots:\n for emp_pos, emp in enumerate(available_employees):\n if emp_pos not in workhours:\n workhours[emp_pos] = [\n (interval[0].astimezone(pytz.UTC).replace(tzinfo=None),\n interval[1].astimezone(pytz.UTC).replace(tzinfo=None))\n for interval in emp.resource_calendar_id._work_intervals_batch(\n first_day, last_day, resources=emp.resource_id,\n )[emp.resource_id.id]\n ]\n\n if is_work_available(slot['UTC'][0], slot['UTC'][1], workhours[emp_pos]):\n if emp_pos not in meetings:\n # note: no check is made on the attendee's status (accepted/declined/...)\n meetings[emp_pos] = self.env['calendar.event'].search([\n ('partner_ids.user_ids', '=', emp.user_id.id),\n ('start', '<', fields.Datetime.to_string(last_day.replace(hour=23, minute=59, second=59))),\n ('stop', '>', fields.Datetime.to_string(first_day.replace(hour=0, minute=0, second=0)))\n ])\n\n if is_calendar_available(slot, meetings[emp_pos], emp):\n slot['employee_id'] = emp\n break",
"def testMainScheduler(self):\n # ARRANGE\n\n numGuardsToAllocate = 3\n guardsAllocated = []\n \n entries = []\n entries.append(GuardEntry(\"Mike\", 0, 12))\n entries.append(GuardEntry(\"Ray\", 3, 9))\n entries.append(GuardEntry(\"Dave\", 4, 8))\n\n # 12 slots 8pm to 2am\n numTimeSlots = 12\n \n # ACT\n\n # Setup the schedule\n (schedule, guardsAllocated) = createSchedule(entries, numTimeSlots)\n timeSlots = schedule.getSchedule()\n \n # ASSERT\n\n # Print details of the schedule\n timeSlotIdx = 0\n print(\"Time Slot,Guard ID\")\n for slot in timeSlots:\n print(str(timeSlotIdx) + \",\" + str(slot.guardID))\n timeSlotIdx += 1\n self.assertTrue(len(guardsAllocated) == 3)",
"def cluster_addslotsrange(\n self, target_node: \"TargetNodesT\", *slots: EncodableT\n ) -> ResponseT:\n return self.execute_command(\n \"CLUSTER ADDSLOTSRANGE\", *slots, target_nodes=target_node\n )",
"def handle_book_slot(time=None, name='default'):\n # Make request here\n print('in book slot')\n if not time:\n return question('You didn\\'t specify the time. Try again.')\n else:\n slot_date = session.attributes.get('date', None)\n params = {\n 'starttime': time,\n 'bookedbyuser': name,\n 'date': slot_date\n }\n print(params)\n session.attributes['stage'] = 'book_slot'\n session.attributes['slot_params'] = params\n return question('You want to book at ' + time + ' Is that correct?')",
"def cluster_slots() -> Dict[str, Any]:\n # TODO: refactor tests to not use cli singleton auth.\n certs.cli_cert = certs.default_load(conf.make_master_url())\n authentication.cli_auth = authentication.Authentication(conf.make_master_url())\n r = api.get(conf.make_master_url(), \"api/v1/agents\")\n assert r.status_code == requests.codes.ok, r.text\n jvals = r.json() # type: Dict[str, Any]\n return {agent[\"id\"]: agent[\"slots\"].values() for agent in jvals[\"agents\"]}"
] | [
"0.8256426",
"0.5762987",
"0.56316525",
"0.54743224",
"0.53757113",
"0.5314603",
"0.5236988",
"0.52270603",
"0.52103895",
"0.51965916",
"0.5142545",
"0.5133743",
"0.5121921",
"0.5057504",
"0.49842697",
"0.49663857",
"0.49040845",
"0.4877993",
"0.48507854",
"0.4843296",
"0.48183748",
"0.48157948",
"0.47979757",
"0.47947788",
"0.47750336",
"0.47555983",
"0.47493982",
"0.473427",
"0.47313687",
"0.4722882"
] | 0.7803846 | 1 |
Sets the slots attribute for each organization entity | def assignSlots(request, *args, **kwargs):
# Setup an artifical request deadline
timelimit = int(request.REQUEST.get("timelimit", 20000))
timekeeper = Timekeeper(timelimit)
program_key = request.REQUEST.get("programkey")
last_key = request.REQUEST.get("lastkey", "")
program = program_logic.getFromKeyName(program_key)
# Copy for modification below
params = request.POST.copy()
params["timelimit"] = timelimit
# Parse the JSON org:slots dictionary
slots = simplejson.loads(program.slots_allocation)
org_keys = [i for i in sorted(slots.keys()) if i > last_key]
logging.info(org_keys)
# Assign slots for each organization
try:
for clock, org_key in timekeeper.iterate(org_keys):
logging.info("%s %s %s", request.path, clock, org_key)
org_slots = slots[org_key]
# Get the organization entity
org = org_logic.getFromKeyFields({
'link_id': org_key,
'scope_path': program_key,
})
if not org:
logging.error("no such org '%s'/'%s'" % (program_key, org_key))
continue
# Count proposals and mentors
org.slots = int(org_slots['slots'])
org.nr_applications, org.nr_mentors = countProposals(org)
# Update the organization entity
org.put()
# Mark the organization as done
last_key = org_key
# Requeue this task for continuation
except DeadlineExceededError:
params["lastkey"] = last_key
taskqueue.add(url=request.path, params=params)
# Exit this task successfully
return responses.terminateTask() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def occupy_slot(self, slot, vehicle):\n self.__occupied_slots__[slot.slot_number] = vehicle.registration_number, vehicle.color\n self.__vehicle_slot_mapping__[vehicle.registration_number] = slot.slot_number\n self.__available_slots__.remove(slot)",
"def extend_slots(self, prediction, item):\n spec = prediction.phrasal_pattern[0]\n slots = prediction.slots\n if is_role_specifier(spec):\n new_slots = copy(slots)\n new_slot = self.role_specifier(spec)\n if new_slot in new_slots:\n raise DuplicateSlotError('Slot %s already exists in %s.' % (\n new_slot, prediction))\n new_slots[new_slot] = item\n return new_slots\n else:\n return slots",
"def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])",
"def __init__(self, slots):\n self.slots = slots\n self.table = []\n for i in range(slots):\n self.table.append([])",
"def timeslot(self, timeslot: List[TimeslotTimeslot]):\n\n self._timeslot = timeslot",
"def fillSlots(self, name, stan):\n if self._slotData is None:\n self._slotData = {}\n self._slotData[name] = stan",
"def set_org_and_space_dicts(self, org_dict, space_dict):\n self._space = space_dict\n self._org = org_dict\n return self",
"def _get_slot_and_set_to_optimizer(self, layer_name):\n for slot_name in self._allowed_slot_names:\n param_name = get_slot_table_name(layer_name, slot_name)\n indices = self._tls._unique_ids_all_layers[layer_name]\n slot_value = self._lookup_embedding_func(param_name, indices)\n # self._create_slot_variable creates a slot variable in tf\n # optimizer and set slot_value to it.\n self._create_slot_variable(layer_name, slot_name, slot_value)",
"def __init__(self, **kwargs):\n slots = self.GetAllSlots()\n for (key, value) in kwargs.items():\n if key not in slots:\n raise TypeError(\"Object %s doesn't support the parameter '%s'\" %\n (self.__class__.__name__, key))\n setattr(self, key, value)",
"def _configure(self):\n Values._configure(self)\n self.values = [self.inventory.one, self.inventory.two]\n return",
"def _reserve_bsa_slot(self):\n self._run_cmd(CAPUT + \" IOC:IN20:EV01:EDEFNAME \" + '\"' + self._edef_name + '\"')",
"def fill(self, mc, n_slots):\n for i in range(n_slots):\n self.put(mc.clone())",
"def assignProgramSlots(request, *args, **kwargs):\n\n program = None\n params = request.REQUEST\n\n # Query the program entity\n try:\n program = program_logic.getFromKeyName(params[\"programkey\"])\n except KeyError:\n logging.error(\"programkey not in params\")\n return responses.terminateTask()\n\n if not program:\n logging.error(\"no such program '%s'\" % params[\"programkey\"])\n return responses.terminateTask()\n\n if not program.slots_allocation:\n logging.error(\"empty slots_allocation\")\n return responses.terminateTask()\n\n # Enqueue a task to assign the slots\n taskqueue.add(\n url = \"/gsoc/tasks/assignslots/assign\",\n params = {\n 'programkey': params[\"programkey\"],\n })\n\n # Return successful\n return responses.terminateTask()",
"def set_slot(self, slot, natid, level=None):\n enc = {'natid': natid}\n if self.game.is_dpp():\n if level is not None:\n enc['level'] = level\n self.walking.normal[slot].from_dict(enc)\n if self.game.is_gen(4):\n self.walking.morning[slot].from_dict(enc)\n self.walking.day[slot].from_dict(enc)\n self.walking.night[slot].from_dict(enc)\n # TODO: HGSS specials\n else:\n if level is not None:\n enc['minlevel'] = enc['maxlevel'] = level\n # TODO: BW specials\n self.walking.normal[slot].from_dict(enc)\n if self.game.is_hgss() and level is not None:\n self.walking.levels[slot] = level",
"def random_assign(self, person, room_set):\n random_room = self.random_select(room_set)\n while room_set[random_room]['room'].allocate_room_space() == -1:\n random_room = self.random_select(room_set) # pragma: no cover\n if self.all_rooms[random_room]['room'].room_type == \"LivingSpace\":\n person.set_livingspace(\n self.living_spaces[random_room]['room'].name)\n occupant = person.name + \"\\t\" + person.email\n room_set[random_room]['occupants'].append(occupant)\n elif self.all_rooms[random_room]['room'].room_type == \"OfficeSpace\":\n occupant = person.name + \"\\t\" + person.email\n person.set_office(self.offices[random_room]['room'].name)\n room_set[random_room]['occupants'].append(occupant)",
"def initSlotObjectDict(cls):\n restslotattributedict.update(dict({extension_tunnel: \"name\"}))\n restslotattributedict.update(dict({extension_circuit: \"name\"}))\n restslotattributedict.update(dict({extension_ip_interface: \"name\"}))\n restslotattributedict.update(dict({extension_ip_route: \"name\"}))\n restslotattributedict.update(dict({gigabitethernet: \"name\"}))\n restslotattributedict.update(dict({blade: \"slot_number\"}))",
"def organization(self, organization):\n\n self._organization = organization",
"def organization(self, organization):\n\n self._organization = organization",
"def organization(self, organization):\n\n self._organization = organization",
"def num_slots(self, num_slots):\n\n self._num_slots = num_slots",
"def add_slot(self, slot):\n slot.set_location(len(self.slots)+1)\n self.slots.append(slot)",
"def set_atoms(self, atoms):\r\n self.__atoms = atoms",
"def test_putorganizations_item(self):\n pass",
"def slot(self,num):\n if num in ApexAP1000.SLOTS:\n self.__slot=num\n else:\n raise ValueError('Bad slot number !')",
"def test_modify_slot_site(self):\n slot = SLOT_FACTORY.create_slot()\n FlyerPlacement.objects.create(site_id=2, slot=slot,\n send_date=next_flyer_date())\n slot.site = Site.objects.get(id=3)\n with self.assertRaises(ValidationError) as context_manager:\n slot.save()\n self.fail('Slot with flyer placements allowed site update.')\n LOG.debug(context_manager.exception)",
"def test_save_slot_same_start_end(self):\n business = BUSINESS_FACTORY.create_business()\n with self.assertRaises(ValidationError) as context_manager:\n Slot.objects.create(site_id=2, business_id=business.id,\n start_date=datetime.date.today(),\n end_date=datetime.date.today())\n self.fail('Invalid slot saved.')\n LOG.debug(context_manager.exception)",
"def test_save_slot(self):\n business = BUSINESS_FACTORY.create_business()\n slot = Slot.objects.create(site_id=2, business_id=business.id,\n start_date = datetime.date.today(),\n end_date = datetime.date.today() + datetime.timedelta(1))\n LOG.debug(slot)\n self.assertTrue(slot.id)\n self.assertEqual(slot.renewal_rate, 10)\n self.assertEqual(slot.is_autorenew, False)",
"def enclosure_disk_slots(self, enclosure_disk_slots):\n\n self._enclosure_disk_slots = enclosure_disk_slots",
"def slot_mappings(self):\n # type: () -> Dict[Text: Union[Dict, List[Dict]]]\n\n return {\"name\": [self.from_entity(entity=\"name\"),\n self.from_text()],\n \"roomcount\": [self.from_entity(entity=\"roomcount\"),\n self.from_text()],\n \"roomtype\": [self.from_entity(entity=\"roomtype\"),\n self.from_text()]}",
"def slot_owns_changed(self, orderbook, _dummy):\r\n pass"
] | [
"0.5510804",
"0.5312831",
"0.52753824",
"0.52753824",
"0.5273821",
"0.52403814",
"0.5212397",
"0.51739573",
"0.51648086",
"0.51483923",
"0.51372176",
"0.51309526",
"0.5111528",
"0.5084358",
"0.5033539",
"0.49991065",
"0.4996006",
"0.4996006",
"0.4996006",
"0.49839744",
"0.49473614",
"0.49394053",
"0.49128547",
"0.4899443",
"0.4874174",
"0.48685294",
"0.4865335",
"0.4860095",
"0.48599964",
"0.48595196"
] | 0.6378653 | 0 |
receive batch from replay and transfer batch from cpu to gpu | def sample_batch(pid, args, batch_queue, port_dict, device, actor_id_to_ip_dataport, local_size, cache_array):
def recv_data(k, data_stream, actor_set, real_data_tasks_i):
for real_data in data_stream:
tmp = []
tmp.append(real_data.state)
tmp.append(real_data.action)
tmp.append(real_data.reward)
tmp.append(real_data.next_state)
tmp.append(real_data.done)
tmp.append(actor_set[k]['w'][real_data.idx])
tmp.append(actor_set[k]['i'][real_data.idx])
tmp.append(actor_set[k]['t'][real_data.idx])
tmp.append(real_data.timestamp)
local_dict[actor_set[k]['i'][real_data.idx]] = tmp
cache_array[actor_set[k]['i'][real_data.idx]] |= 2**pid
decom_state = torch.FloatTensor(np.frombuffer(zlib.decompress(real_data.state), dtype=np.uint8).reshape((1, 4, 84, 84)))
real_data_tasks_i['states'].append(decom_state) #.to(device))
real_data_tasks_i['actions'].append(torch.LongTensor([real_data.action])) #.to(device))
real_data_tasks_i['rewards'].append(torch.FloatTensor([real_data.reward])) #.to(device))
decom_next_state = torch.FloatTensor(np.frombuffer(zlib.decompress(real_data.next_state), dtype=np.uint8).reshape((1, 4, 84, 84)))
real_data_tasks_i['next_states'].append(decom_next_state) #.to(device))
real_data_tasks_i['dones'].append(torch.FloatTensor([real_data.done])) #.to(device))
real_data_tasks_i['batch_weights'].append(torch.FloatTensor([actor_set[k]['w'][real_data.idx]])) #.to(device))
real_data_tasks_i['batch_idxes'].append(actor_set[k]['i'][real_data.idx])
# is the data overwrited?
real_data_tasks_i['batch_timestamp_store'].append(actor_set[k]['t'][real_data.idx])
real_data_tasks_i['batch_timestamp_real'].append(real_data.timestamp)
conn = grpc.insecure_channel(port_dict['replay_ip'] + ':' + port_dict['sampleDataPort'])
client = apex_data_pb2_grpc.SampleDataStub(channel=conn)
local_dict = {}
while True:
batch_timestamp_real = []
batch_timestamp_store = []
batch_weights = []
batch_idxes = []
states, actions, rewards, next_states, dones = [], [], [], [], []
res_batch = client.Send(apex_data_pb2.SampleDataRequest(batch_size=args.batch_size, beta = args.beta))
actor_ids, data_ids, timestamps, weights, idxes = res_batch.actor_ids, res_batch.data_ids, res_batch.timestamp, res_batch.weights, res_batch.idxes
actor_set = {}
cached_value = {'states':{},'actions':{},'rewards':{},'next_states':{},'dones':{},'batch_weights':{},'batch_idxes':{},'batch_timestamp_store':{},'batch_timestamp_real':{}}
for i in range(len(actor_ids)):
set_a = actor_set.get(actor_ids[i], False)
if set_a == False:
actor_set[actor_ids[i]] = {}
set_a = actor_set[actor_ids[i]]
set_a['d'] = []
set_a['w'] = []
set_a['i'] = []
set_a['t'] = []
cached_value['states'][actor_ids[i]] = []
cached_value['actions'][actor_ids[i]] = []
cached_value['rewards'][actor_ids[i]] = []
cached_value['next_states'][actor_ids[i]] = []
cached_value['dones'][actor_ids[i]] = []
cached_value['batch_weights'][actor_ids[i]] = []
cached_value['batch_idxes'][actor_ids[i]] = []
cached_value['batch_timestamp_store'][actor_ids[i]] = []
cached_value['batch_timestamp_real'][actor_ids[i]] = []
cache_id = actor_ids[i]*local_size+data_ids[i]
cache_trans = cache_array[cache_id]
if cache_trans & 2**pid == 0:
set_a['d'].append(data_ids[i])
set_a['w'].append(weights[i])
set_a['i'].append(idxes[i])
set_a['t'].append(timestamps[i])
if cache_trans == 0 and local_dict.get(cache_id, False) != False:
del local_dict[cache_id]
else:
try:
state_tmp = local_dict[cache_id][0]
action_tmp = local_dict[cache_id][1]
reward_tmp = local_dict[cache_id][2]
next_state_tmp = local_dict[cache_id][3]
done_tmp = local_dict[cache_id][4]
batch_weight_tmp = local_dict[cache_id][5]
batch_idx_tmp = local_dict[cache_id][6]
batch_store_tmp = local_dict[cache_id][7]
batch_real_tmp = local_dict[cache_id][8]
decom_state = torch.FloatTensor(np.frombuffer(zlib.decompress(state_tmp), dtype=np.uint8).reshape((1, 4, 84, 84)))
cached_value['states'][actor_ids[i]].append(decom_state)
cached_value['actions'][actor_ids[i]].append(torch.LongTensor([action_tmp]))
cached_value['rewards'][actor_ids[i]].append(torch.FloatTensor([reward_tmp]))
decom_next_state = torch.FloatTensor(np.frombuffer(zlib.decompress(next_state_tmp), dtype=np.uint8).reshape((1, 4, 84, 84)))
cached_value['next_states'][actor_ids[i]].append(decom_next_state)
cached_value['dones'][actor_ids[i]].append(torch.FloatTensor([done_tmp]))
cached_value['batch_weights'][actor_ids[i]].append(torch.FloatTensor([batch_weight_tmp]))
cached_value['batch_idxes'][actor_ids[i]].append(batch_idx_tmp)
cached_value['batch_timestamp_store'][actor_ids[i]].append(batch_store_tmp)
cached_value['batch_timestamp_real'][actor_ids[i]].append(batch_real_tmp)
except:
set_a['d'].append(data_ids[i])
set_a['w'].append(weights[i])
set_a['i'].append(idxes[i])
set_a['t'].append(timestamps[i])
real_data_links = {}
real_data_tasks = {}
for k, v in actor_set.items():
actor_ip, data_port = actor_id_to_ip_dataport[k]
conn_actor = grpc.insecure_channel(actor_ip + ':' + data_port)
client_actor = apex_data_pb2_grpc.SendRealDataStub(channel=conn_actor)
real_data_links[k] = client_actor.Send(apex_data_pb2.RealBatchRequest(idxes=v['d']))
real_data_tasks[k] = {}
real_data_tasks[k]['states'] = cached_value['states'][k]
real_data_tasks[k]['actions'] = cached_value['actions'][k]
real_data_tasks[k]['rewards'] = cached_value['rewards'][k]
real_data_tasks[k]['next_states'] = cached_value['next_states'][k]
real_data_tasks[k]['dones'] = cached_value['dones'][k]
real_data_tasks[k]['batch_weights'] = cached_value['batch_weights'][k]
real_data_tasks[k]['batch_idxes'] = cached_value['batch_idxes'][k]
real_data_tasks[k]['batch_timestamp_store'] = cached_value['batch_timestamp_store'][k]
real_data_tasks[k]['batch_timestamp_real'] = cached_value['batch_timestamp_real'][k]
threads = []
for k, v in real_data_links.items():
t = threading.Thread(target=recv_data, args=(k, v, actor_set, real_data_tasks[k],))
threads.append(t)
t.start()
for t in threads:
t.join()
for k, v in real_data_tasks.items():
states += v['states']
actions += v['actions']
rewards += v['rewards']
next_states += v['next_states']
dones += v['dones']
batch_weights += v['batch_weights']
batch_idxes += v['batch_idxes']
batch_timestamp_real += v['batch_timestamp_real']
batch_timestamp_store += v['batch_timestamp_store']
states = torch.cat(states,0).to(device)
actions = torch.cat(actions,0).to(device)
rewards = torch.cat(rewards,0).to(device)
next_states = torch.cat(next_states,0).to(device)
dones = torch.cat(dones,0).to(device)
batch_weights = torch.cat(batch_weights,0).to(device)
batch = [states, actions, rewards, next_states, dones, batch_weights, batch_idxes]
batch_queue.put(batch)
data, batch = None, None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def process(self, sess):\n global send_counter\n \n #sess.run(self.sync) # copy weights from shared to local\n rollout = self.pull_batch_from_queue()\n batch = process_rollout(rollout, gamma=0.99, lambda_=1.0)\n\n should_compute_summary = self.task == 0 and self.local_steps % 11 == 0\n\n if should_compute_summary:\n fetches = [self.summary_op, self.train_op]\n else:\n fetches = [self.train_op]\n\n\n feed_dict = {\n self.local_network.x: batch.si,\n self.ac: batch.a,\n self.adv: batch.adv,\n self.r: batch.r,\n self.local_network.state_in[0]: batch.features[0],\n self.local_network.state_in[1]: batch.features[1],\n }\n\n # Get current trainable variables\n # This is trainable variables\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n\n if self.num_workers > 1:\n sys.stdout.write('\\r' + str(self.local_steps))\n if self.local_steps % 100 == 0:\n global var0\n global var1\n var1 = sess.run(self.local_network.var_list) # After training\n if var0 != None:\n var_diff = [a - b for (a,b) in zip(var1, var0)]\n var_diff_data = pickle.dumps(var_diff, -1)\n print('Sync weights')\n self.msg_sent = socket_util.socket_send_data_chucks(self.sock, var_diff_data, self.mcast_destination, self.msg_sent)\n var0 = sess.run(self.local_network.var_list) # A list of numpy array\n\n # Handle each message in the socket queue\n while not self.inc_msg_q.empty():\n print('Apply remote gradients')\n # Process received grads_and_vars from other peers\n remote_var_diff_data = self.inc_msg_q.get(False)\n remote_var_diff = pickle.loads(remote_var_diff_data)\n\n add_op = [a+b for (a,b) in zip(self.local_network.var_list, remote_var_diff)]\n sess.run(add_op)\n\n if should_compute_summary:\n self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]))\n self.summary_writer.flush()\n self.local_steps += 1",
"def forward_batch(self,batcher, phase=0):\n pass",
"def fetch_batch(learner_replay_queue, learner_local_queue):\n while True:\n data = learner_replay_queue.get()\n learner_local_queue.put(data)",
"def run(self):\n # Single GPU flag\n single_gpu = True if size()==1 else False\n print_rank(f\"Single GPU flag Client: {single_gpu}\", loglevel=logging.DEBUG)\n \n if not single_gpu:\n while True: # keeps listening for incoming server calls\n\n # Initialize tensors -- required by torch.distributed\n command, client_idx, mode = 0, 0, 0 # int\n lr, nround = torch.zeros(1), torch.zeros(1) # float\n\n # Read command\n command = _recv(command)\n print_rank(f\"Command received {command} on worker {rank()}\", loglevel=logging.DEBUG)\n\n # Receive server data -- lr, model_params\n if command == COMMAND_UPDATE:\n print_rank(f\"COMMMAND_UPDATE received {rank()}\", loglevel=logging.DEBUG) \n lr = _recv(lr, 0)\n model_params = _recv_gradients(0)\n nround = _recv(nround, 0)\n server_data = (lr, model_params, int(nround))\n print_rank(f\"Received lr: {lr} and n_params: {len(model_params)} - round {nround}\", loglevel=logging.DEBUG)\n \n elif command == COMMAND_TRAIN:\n print_rank(f\"COMMMAND_TRAIN received {rank()}\", loglevel=logging.DEBUG)\n \n # Init profiler in training worker\n profiler = None\n if self.do_profiling:\n profiler = cProfile.Profile()\n profiler.enable()\n \n # Receive client id from Server\n client_idx = _recv(client_idx)\n print_rank(f\"Cliend idx received from Server: {client_idx}\", loglevel=logging.DEBUG)\n\n # Instantiate client\n client_to_process = Client(\n [client_idx],\n self.config,\n self.config['client_config']['type'] == 'optimization') \n \n # Execute Client.get_data()\n client_data = client_to_process.get_client_data()\n\n # Execute Client.process_round()\n output = client_to_process.process_round(client_data, server_data, self.model, self.data_path)\n\n # Send output back to Server\n if dist.get_backend() == \"nccl\":\n # ASYNC mode -- enabled only for nccl backend\n ack = to_device(torch.tensor(1))\n dist.isend(tensor=ack, dst=0)\n _send_train_output(output)\n else:\n # SYNC mode -- gloo backend does not have a non-blocking way to check if the operation is completed\n gather_objects = [output for i in range(size())]\n output = [None for _ in gather_objects]\n dist.all_gather_object(output, gather_objects[rank()])\n\n # Some cleanup\n torch.cuda.empty_cache()\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n\n if self.do_profiling:\n profiler.disable()\n print_profiler(profiler)\n\n elif command == COMMAND_TESTVAL:\n print_rank(f\"COMMMAND_TESTVAL received {rank()}\", loglevel=logging.DEBUG)\n\n # Init profiler in validation worker\n profiler = None\n if self.do_profiling:\n profiler = cProfile.Profile()\n profiler.enable()\n \n # Receive mode and client id from Server\n mode = _recv(mode)\n mode = \"test\" if mode == -2 else \"val\"\n client_idx = _recv(client_idx)\n print_rank(f\"Client idx received from Server: {client_idx}, {mode}\", loglevel=logging.DEBUG)\n \n # Get client and dataset\n clients = self.val_clients if mode == \"val\" else self.test_clients\n dataset = self.val_dataset if mode == \"val\" else self.test_dataset\n clients_queue = clients.copy()\n assert 0 <= client_idx < len(clients_queue)\n client_to_process = clients_queue.pop(client_idx)\n\n # Execute Client.get_data()\n client_data = client_to_process.get_client_data(dataset)\n \n # Execute Client.run_testvalidate()\n output = client_to_process.run_testvalidate(client_data, server_data, mode, self.model)\n\n # Send output back to Server\n if dist.get_backend() == \"nccl\":\n # ASYNC mode -- enabled only for nccl backend\n _, metrics, num_instances = output\n metrics['num']= {'value': float(num_instances), 'higher_is_better': False}\n output = metrics\n print_rank(f\"Worker {rank()} output {output}\", loglevel=logging.DEBUG)\n ack = to_device(torch.tensor(1))\n dist.isend(tensor=ack, dst=0)\n _send_metrics(output)\n else:\n # SYNC mode -- gloo backend does not have a non-blocking way to check if the operation is completed\n gather_objects = [output for i in range(size())]\n output = [None for _ in gather_objects]\n dist.all_gather_object(output, gather_objects[rank()])\n print_rank(f\"Worker {rank()} sent output back\", loglevel=logging.DEBUG)\n\n # Some cleanup\n torch.cuda.empty_cache()\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n\n if self.do_profiling:\n profiler.disable()\n print_profiler(profiler)\n\n elif command == COMMAND_TERMINATE:\n print_rank(f\"COMMMAND_TERMINATE received {rank()}\", loglevel=logging.DEBUG)\n\n # Some cleanup\n torch.cuda.empty_cache()\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n return\n\n elif command == COMMAND_SYNC_NODES: # Only for sync calls\n print_rank(f\"COMMMAND_SYNC_NODES received {rank()}\", loglevel=logging.DEBUG)\n\n gather_objects = [None for i in range(size())]\n output = [None for _ in gather_objects]\n dist.all_gather_object(output, gather_objects[rank()])\n print_rank(f\"Worker IDLE {rank()} sent dummy output back\", loglevel=logging.DEBUG)\n\n # Some cleanup\n torch.cuda.empty_cache()\n torch.cuda.synchronize() if torch.cuda.is_available() else None\n else:\n assert False, \"unknown command\"",
"def forward(self, batch):\n raise NotImplementedError",
"def forward(self, batch):\n raise NotImplementedError",
"def _train(self):\n epoch_training_time = 0\n epoch_metrics_time = 0\n self.epoch_ += 1\n for i_batch, sample_batched in enumerate(self.dataloader):\n self.global_step_ += 1\n batch_start_time = time.time()\n data_sample = sample_batched[0].to(self.device)\n\n # Get model samples, either from replay buffer or noise.\n if self.model_samples_ is None:\n self.model_samples_ = deque(\n [\n self.net_.sample_from_prior(\n data_sample.shape[0], device=self.device\n ).detach()\n ]\n )\n elif len(self.model_samples_) > self.max_replay:\n self.model_samples_.popleft()\n replay_sample = random.choices(\n self.model_samples_,\n # favor more recent samples:\n weights=list(range(1, len(self.model_samples_) + 1)),\n )[0]\n noise_sample = self.net_.sample_from_prior(\n replay_sample.shape[0], device=self.device\n )\n mask = torch.rand(replay_sample.shape[0]) < self.replay_prob\n while len(mask.shape) < len(replay_sample.shape):\n # Add extra feature-dims\n mask.unsqueeze_(dim=-1)\n\n model_sample = torch.where(\n mask.to(self.device), replay_sample, noise_sample\n )\n\n self.net_.eval()\n # Run at least one iteration\n model_sample = self.net_.sample_fantasy(\n model_sample,\n num_mc_steps=self.num_mc_steps,\n mc_dynamics=self.sampler,\n ).detach()\n\n self.model_samples_.append(model_sample)\n\n # Sanity checks:\n assert (\n data_sample.shape[1:] == self.net_.input_shape\n ), \"Data is incompatible with network.\"\n assert (\n model_sample.shape[1:] == data_sample.shape[1:]\n ), \"Model and data samples are incompatible.\"\n\n # Forward gradient:\n self.net_.train()\n self.net_.zero_grad()\n data_energy_mean = self.net_(data_sample).mean()\n model_energy = self.net_(model_sample)\n model_energy_mean = model_energy.mean()\n\n # Estimate the odds of the data's energy based on a normal fitted to\n # model samples:\n data_erf = torch.erf(\n (data_energy_mean - model_energy_mean) / model_energy.std()\n )\n\n objective = data_energy_mean - model_energy_mean\n objective.backward()\n torch.nn.utils.clip_grad.clip_grad_value_(self.net_.parameters(), 1e2)\n self.optimizer_.step()\n\n batch_training_time = time.time() - batch_start_time\n epoch_training_time += batch_training_time\n self.logger_(energy_diff=float(objective))\n self.logger_(data_erf=float(data_erf))\n\n tr_metrics_start_time = time.time()\n for callback in self.step_callbacks:\n callback(\n net=self.net_,\n data_sample=data_sample,\n model_sample=model_sample,\n epoch=self.epoch_,\n global_step=self.global_step_,\n validation=False,\n )\n tr_metrics_time = time.time() - tr_metrics_start_time\n epoch_metrics_time += tr_metrics_time\n if self.verbose:\n print(\n f\"on epoch {self.epoch_}, batch {i_batch}, data erf: {data_erf}, objective: {objective}\"\n )\n print(f\"model energy: {model_energy_mean} +- {model_energy.std()}\")\n print(f\"data energy: {data_energy_mean}\")\n print(\n f\"training time: {batch_training_time:0.3f}s, metrics time: {tr_metrics_time:0.3f}s\"\n )\n means = self.logger_.means()\n if self.verbose:\n print(f\"on epoch {self.epoch_}\")\n for k, v in means.items():\n print(f\"{k}: {v}\")\n self.logger_.flush()\n means[\"loss\"] = energy_model.utils.constraints.add_soft_constraint(\n means[\"loss_ais\"], means[\"data_erf\"], lower_bound=-1\n )\n return means",
"def forward_batch(model, batch, device):\n inputs, labels = (batch, batch)\n inputs, labels = inputs.to(device), labels.to(device)\n\n outputs = model(inputs, labels=labels)\n\n return outputs[:2]",
"def _batch_train(self, batch, training_step, step):\n lstm_size = (self.batch_size, self.Qmain.h_size)\n batch_mem = np.zeros(lstm_size)\n batch_carry = np.zeros(lstm_size)\n input_shape = (self.batch_size,\n self.trace_length,\n self.observation_size)\n m_data = np.vstack(batch[:, 0])\n m_data = m_data.reshape(input_shape)\n t_data = np.vstack(batch[:, 4])\n t_data = t_data.reshape(input_shape)\n q_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(m_data)]\n q1_input = [np.copy(batch_mem), np.copy(batch_carry), np.copy(t_data)]\n\n # Batch predict\n self.Qmain.trace_length.assign(self.trace_length)\n self.Qmain.dropout_rate.assign(0.0)\n self.Qtarget.trace_length.assign(self.trace_length)\n self.Qtarget.dropout_rate.assign(0.0)\n\n # Save the graph just the first time\n if training_step == 0:\n tf.summary.trace_on()\n\n # T batch predict\n pred = self.Qmain.model.predict(q_input,\n batch_size=self.batch_size)\n Q = pred[0]\n batch_bus = pred[1]\n batch_line = pred[2]\n batch_disp = pred[3]\n\n ## Log graph once and disable graph logging\n if training_step == 0:\n with self.tf_writer.as_default():\n tf.summary.trace_export(self.name + \"-graph\", step)\n\n # T+1 batch predict\n Qn, *_ = self.Qtarget.model.predict(q1_input,\n batch_size=self.batch_size)\n \n # Compute batch Q update to Qtarget\n for i in range(self.batch_size):\n idx = i * (self.trace_length - 1)\n a = batch[idx][1]\n grid = a[0]\n batch_bus[i][:] = a[1][:]\n batch_line[i][:] = a[2][:]\n batch_disp[i][:] = a[3][:]\n r = batch[idx][2]\n d = batch[idx][3]\n Q[i][grid] = r\n if d == False:\n Q[i][grid] += DISCOUNT_FACTOR * Qn[i][grid]\n\n # Batch train\n batch_x = [batch_mem, batch_carry, m_data]\n batch_y = [\n Q,\n batch_bus, batch_line, batch_disp,\n batch_mem, batch_carry\n ]\n loss = self.Qmain.model.train_on_batch(batch_x, batch_y)\n loss = loss[0]\n\n # Log to tensorboard\n self._tf_log_summary(loss, step)",
"def feed_batch(self, generated_batch, generated_labels):\n _, self.act2, _ = self.inference_net(generated_batch.cuda(self.gpu_id))\n self.g_labels = generated_labels",
"def fetch_batch(self, phase):\n pass",
"def run(self):\n\n mconns: Dict[str, cb_bin_client.MemcachedClient] = {} # State kept across scatter_gather() calls.\n backoff_cap: int = self.opts.extra.get(\"backoff_cap\", 10)\n while not self.ctl['stop']:\n batch, future = self.pull_next_batch() # type: Optional[pump.Batch], pump.SinkBatchFuture\n if not batch:\n self.future_done(future, 0)\n self.close_mconns(mconns)\n return\n\n backoff = 0.1 # Reset backoff after a good batch.\n\n while batch: # Loop in case retry is required.\n rv, batch, need_backoff = self.scatter_gather(mconns, batch)\n if rv != 0:\n self.future_done(future, rv)\n self.close_mconns(mconns)\n return\n\n if batch:\n self.cur[\"tot_sink_retry_batch\"] = \\\n self.cur.get(\"tot_sink_retry_batch\", 0) + 1\n\n if need_backoff:\n backoff = min(backoff * 2.0, backoff_cap)\n logging.warning(f'backing off, secs: {backoff}')\n time.sleep(backoff)\n\n self.future_done(future, 0)\n\n self.close_mconns(mconns)",
"def _recv(self) -> List[np.ndarray]:",
"def train(self, batch):\n pass",
"def simulate_batch():\n this_run = op_util.current_run()\n util.ensure_dir(this_run.guild_path(\"proto\"))",
"def test_remote_buffer() -> None:\n # Prepare the input and output data\n shape_1 = (1, 3, 5)\n shape_2 = (7, 11)\n d_type_1 = np.dtype(\"float32\")\n d_type_2 = np.dtype(\"float16\")\n\n data: Dict[str, np.ndarray] = {}\n\n # Store and load data for the first tensor\n data[\"store_in_1\"] = np.random.rand(*shape_1).astype(d_type_1)\n data[\"load_in_1\"] = np.zeros(shape_1).astype(d_type_1)\n data[\"load_in_1_inplace\"] = np.zeros(shape_1).astype(d_type_1)\n # Store and load data for the second tensor\n data[\"store_in_2\"] = np.random.rand(*shape_2).astype(d_type_2)\n data[\"load_in_2\"] = np.zeros(shape_2).astype(d_type_2)\n # Store and load data for the third tensor\n data[\"store_in_3\"] = np.random.rand(*shape_2).astype(d_type_2)\n data[\"load_in_3_inplace\"] = np.zeros(shape_2).astype(d_type_2)\n\n ir, d2h_streams = build_model(data)\n\n # Get the tensor_ids\n labels = (\n \"load_in_1\",\n \"load_in_1_inplace\",\n \"load_out_1\",\n \"load_out_1_inplace\",\n \"load_in_2\",\n \"load_in_3_inplace\",\n \"load_out_2\",\n \"load_out_3_inplace\",\n )\n tensor_d2h = {label: d2h_streams[label] for label in labels}\n\n session = popxl.Session(ir, \"ipu_model\")\n with session:\n outputs = session.run()\n\n # Assert that the tensors are correct\n remote_load_scenarios = (\n \"1\",\n \"1_inplace\",\n \"2\",\n \"3_inplace\",\n )\n for scenario in remote_load_scenarios:\n print(f\"Now asserting remote load scenario {scenario}\")\n # Get data to assert\n store_in_data = data[f\"store_in_{scenario.replace('_inplace', '')}\"]\n load_in_data_before_op_call = data[f\"load_in_{scenario}\"]\n load_in_data_after_op_call = outputs[tensor_d2h[f\"load_in_{scenario}\"]]\n load_out_data = outputs[tensor_d2h[f\"load_out_{scenario}\"]]\n shape = shape_1 if \"1\" in scenario else shape_2\n d_type = d_type_1 if \"1\" in scenario else d_type_2\n inplace = True if \"inplace\" in scenario else False\n # Assert shape and type\n assert load_in_data_after_op_call.shape == shape\n assert load_in_data_after_op_call.dtype == d_type\n assert load_out_data.shape == shape\n assert load_out_data.dtype == d_type\n\n # Assert that the data has been loaded\n assert np.allclose(store_in_data, load_out_data)\n if inplace:\n # Assert that the load in data has been overwritten\n assert np.allclose(load_in_data_after_op_call, store_in_data)\n else:\n # Assert that the load in data has not been overwritten\n assert np.allclose(load_in_data_after_op_call, load_in_data_before_op_call)",
"def benchmark_synth_forward_batch1(self):\n params = self._shared_params()._replace(batch_size=1)\n self._run_benchmark(params)",
"def train(self):\n if len(self.buffer) >= self.batch_size:\n with torch.no_grad():\n states, actions, rewards, next_states, dones = self.buffer.sample(self.batch_size)\n\n # Send data to GPU\n states = torch.stack(states).to(self.device, dtype=torch.float)\n actions = torch.stack(actions).to(self.device, dtype=torch.float)\n rewards = torch.stack(rewards).to(self.device, dtype=torch.float)\n rewards = torch.reshape(rewards, (self.batch_size, 1))\n\n next_states = torch.stack(next_states).to(self.device, dtype=torch.float)\n dones = torch.stack(dones).to(self.device, dtype=torch.float)\n\n #TODO\n\n # Calculate target Q values using the Target Network\n selection = torch.argmax(self.main_dqn(next_states), dim = 1).unsqueeze(1)\n\n evaluation = self.target_dqn(next_states)\n evaluation = evaluation.gather(1, selection.long()) #size [256,1]\n\n #Create Done mask\n nonzero_indices = torch.nonzero(dones).reshape(-1).tolist()\n dones_mask = torch.eye(self.batch_size)\n for index in nonzero_indices:\n dones_mask[index,index] = 0\n dones_mask = dones_mask.to(self.device, dtype=torch.float)\n\n # Calculte target\n target = rewards + torch.matmul(dones_mask, evaluation*self.gamma)\n target = target.detach()\n\n # Calculate Q values using the Main Network\n if self.env.freely_moving:\n n_classes = self.env.number_of_action_channels * self.env.number_of_rows * self.env.number_of_columns\n else:\n n_classes = self.env.number_of_action_channels * 1 * self.env.nA\n\n n_samples = self.batch_size\n labels = torch.flatten(actions.type(torch.LongTensor), start_dim=0)\n labels_tensor = torch.as_tensor(labels)\n action_masks = torch.nn.functional.one_hot(labels_tensor, num_classes=n_classes).to(self.device, dtype=torch.float)\n\n q_value = action_masks * self.main_dqn(states)\n q_value = torch.sum(q_value, dim=-1).reshape((self.batch_size, 1))\n\n # Calculate loss\n loss = self.mse(target, q_value)\n\n # Optimize the model\n self.optimizer.zero_grad()\n loss.backward()\n torch.nn.utils.clip_grad_norm_(self.main_dqn.parameters(), 5)\n self.optimizer.step()\n\n # Soft Copy the Main Network's weights to the Target Network\n self.soft_update_of_target_network(self.main_dqn, self.target_dqn,tau=1e-3)\n\n return loss\n return 0",
"def _recv_reset(self):\n return (\n self._observation_out([conn.recv() for conn in self._conns]),\n tf.zeros((self.batch_size,), dtype=tf.float32),\n tf.zeros((self.batch_size,), dtype=tf.bool)\n )",
"def process_batch(self, batch):\n # shapes are [time, ...original dims...]\n v_global = np.stack(batch[:,0]) # [time, agents, l_state_one_agent]\n # note that *_local objects have shape\n # [time, agents, ...original dim...]\n obs_others = np.stack(batch[:,1]) # [time,agents,h,w,c] or [time, agents, obs_others]\n v_local = np.stack(batch[:,2]) # [time,agents,l]\n actions = np.stack(batch[:,3]) # [time,agents]\n reward = np.stack(batch[:,4]) # [time]\n reward_local = np.stack(batch[:,5]) # [time,agents]\n v_global_next = np.stack(batch[:,6]) # [time, agents, l_state_one_agent]\n obs_others_next = np.stack(batch[:,7]) # [time,agents,h,w,c]\n v_local_next = np.stack(batch[:,8]) # [time,agents,l]\n done = np.stack(batch[:,9]) # [time]\n goals = np.stack(batch[:,10]) # [time, agents, l_goal]\n\n batch = None\n \n n_steps = v_global.shape[0]\n \n # For all global quantities, for each time step,\n # duplicate values <n_agents> times for\n # batch processing of all agents\n reward = np.repeat(reward, self.n_agents, axis=0)\n\n # In-place reshape for *_local quantities,\n # so that one time step for one agent is considered\n # one batch entry\n if self.experiment == 'sumo':\n obs_others.shape = (n_steps*self.n_agents, self.h_obs,\n self.w_obs, self.c_obs)\n obs_others_next.shape = (n_steps*self.n_agents, self.h_obs,\n self.w_obs, self.c_obs)\n elif self.experiment == 'particle':\n obs_others.shape = (n_steps*self.n_agents, self.l_obs_others)\n obs_others_next.shape = (n_steps*self.n_agents, self.l_obs_others)\n v_local.shape = (n_steps*self.n_agents, self.l_obs)\n reward_local.shape = (n_steps*self.n_agents)\n v_local_next.shape = (n_steps*self.n_agents, self.l_obs)\n\n actions_1hot, actions_others_1hot = self.process_actions(n_steps, actions)\n \n return n_steps, v_global, obs_others, v_local, actions_1hot, actions_others_1hot, reward, reward_local, v_global_next, obs_others_next, v_local_next, done, goals",
"def on_batch(self, x, y):",
"def send_and_receive_many(world: CueBeamWorld):\n elements_vectorized1 = []\n for idxElement in range(0, len(world.elements) - 1):\n elements_vectorized1.extend(\n [world.elements[idxElement].x, world.elements[idxElement].y, world.elements[idxElement].z,\n world.elements[idxElement].amplitude, world.elements[idxElement].phase, 0.0])\n time_start = time.clock()\n current_ray_count = world.get_ray_count()\n estimated_worker_performance = 300000.0\n need_workers = math.ceil(current_ray_count / estimated_worker_performance)\n each_worker_does_ylines = math.ceil(world.rxPlane.ny / need_workers )\n # update\n handles = []\n for idx_worker in range(need_workers):\n yline0 = idx_worker*each_worker_does_ylines # starts at zero\n yline_y = world.rxPlane.y0 + world.rxPlane.dy * yline0\n handles.append({\n 'yline_y': yline_y,\n 'async_handle': beamsim_instant.delay(\n k=world.wavenumber,\n x0=world.rxPlane.x0,\n y0=yline_y,\n z0=world.rxPlane.z0,\n nx=world.rxPlane.nx,\n ny=each_worker_does_ylines,\n nz=world.rxPlane.nz,\n dx=world.rxPlane.dx,\n dy=world.rxPlane.dy,\n dz=world.rxPlane.dz,\n elements_vectorized=elements_vectorized1)\n })\n # TODO: FRONTIER HERE ===================\n\n # TODO: Wait for first worker, and load the result,\n #while not (async_handle.ready()):\n # time.sleep(0.02)\n\n world.rxPlane.pressurefield = pickle.loads(async_handle.result)\n time_end = time.clock()\n world.last_performance_rays_per_second = world.get_ray_count() / (time_end - time_start)\n print('performance = {} kRays/sec'.format(world.last_performance_rays_per_second / 1e3))\n return world",
"def make_reply(self,request,nreplies):\n #print(\"DummyPyWorker. Sending client message back\")\n self._log.debug(\"received message with {} parts\".format(len(request)))\n\n if not self.is_model_loaded():\n self._log.debug(\"model not loaded for some reason. loading.\")\n\n try:\n import torch\n except:\n raise RuntimeError(\"could not load pytorch!\")\n\n # message pattern: [image_bson,image_bson,...]\n\n nmsgs = len(request)\n nbatches = nmsgs/self.batch_size\n\n if not self._still_processing_msg:\n self._next_msg_id = 0\n\n # turn message pieces into numpy arrays\n img2d_v = []\n sizes = []\n frames_used = []\n rseid_v = []\n for imsg in xrange(self._next_msg_id,nmsgs):\n try:\n compressed_data = str(request[imsg])\n data = zlib.decompress(compressed_data)\n c_run = c_int()\n c_subrun = c_int()\n c_event = c_int()\n c_id = c_int()\n img2d = larcv.json.image2d_from_pystring(data,\n c_run, c_subrun, c_event, c_id )\n except:\n self._log.error(\"Image Data in message part {}\\\n could not be converted\".format(imsg))\n continue\n self._log.debug(\"Image[{}] converted: {}\"\\\n .format(imsg,img2d.meta().dump()))\n\n # check if correct plane!\n if img2d.meta().plane()!=self.plane:\n self._log.debug(\"Image[{}] is the wrong plane!\".format(imsg))\n continue\n\n # check that same size as previous images\n imgsize = (int(img2d.meta().cols()),int(img2d.meta().rows()))\n if len(sizes)==0:\n sizes.append(imgsize)\n elif len(sizes)>0 and imgsize not in sizes:\n self._log.debug(\"Next image a different size. \\\n we do not continue batch.\")\n self._next_msg_id = imsg\n break\n img2d_v.append(img2d)\n frames_used.append(imsg)\n rseid_v.append((c_run.value,c_subrun.value,c_event.value,c_id.value))\n if len(img2d_v)>=self.batch_size:\n self._next_msg_id = imsg+1\n break\n\n\n # convert the images into numpy arrays\n nimgs = len(img2d_v)\n self._log.debug(\"converted msgs into batch of {} images. frames={}\"\n .format(nimgs,frames_used))\n np_dtype = np.float32\n img_batch_np = np.zeros( (nimgs,1,sizes[0][1],sizes[0][0]),\n dtype=np_dtype )\n\n for iimg,img2d in enumerate(img2d_v):\n meta = img2d.meta()\n img2d_np = larcv.as_ndarray( img2d )\\\n .reshape( (1,1,meta.cols(),meta.rows()))\n\n img2d_np=np.transpose(img2d_np,(0,1,3,2))\n img_batch_np[iimg,:] = img2d_np\n\n # print(\"shape of image: \",img2d_np.shape)\n\n\n # now make into torch tensor\n img2d_batch_t = torch.from_numpy( img_batch_np ).to(self.device)\n # out_batch_np = img2d_batch_t.detach().cpu().numpy()\n # out_batch_np=np.transpose(out_batch_np,(0,1,3,2))\n\n print(\"shape of image: \",img2d_batch_t.shape)\n with torch.set_grad_enabled(False):\n out_batch_np = self.model.forward(img2d_batch_t).detach().cpu().numpy()\n out_batch_np=np.transpose(out_batch_np,(0,1,3,2))\n\n\n\n # compression techniques\n ## 1) threshold values to zero\n ## 2) suppress output for non-adc values\n ## 3) use half\n\n # suppress small values\n out_batch_np[ out_batch_np<1.0e-3 ] = 0.0\n\n # threshold\n # for ich in xrange(out_batch_np.shape[1]):\n # out_batch_np[:,ich,:,:][ img_batch_np[:,0,:,:]<10.0 ] = 0.0\n\n # convert back to full precision, if we used half-precision in the net\n\n self._log.debug(\"passed images through net. output batch shape={}\"\n .format(out_batch_np.shape))\n # convert from numpy array batch back to image2d and messages\n reply = []\n for iimg in xrange(out_batch_np.shape[0]):\n img2d = img2d_v[iimg]\n rseid = rseid_v[iimg]\n meta = img2d.meta()\n\n out_np = out_batch_np[iimg,0,:,:]\n # print(\"out_np\",type(out_np))\n # print(\"meta\",type(meta))\n out_img2d = larcv.as_image2d_meta( out_np, meta )\n bson = larcv.json.as_pystring( out_img2d,\n rseid[0], rseid[1], rseid[2], rseid[3] )\n compressed = zlib.compress(bson)\n reply.append(compressed)\n\n if self._next_msg_id>=nmsgs:\n isfinal = True\n self._still_processing_msg = False\n else:\n isfinal = False\n self._still_processing_msg = True\n\n self._log.debug(\"formed reply with {} frames. isfinal={}\"\n .format(len(reply),isfinal))\n return reply,isfinal",
"def run_batch(self, batch_x, batch_y):\n raise NotImplementedError()",
"def forward_one_batch(self, data, inference=False):\n inputs = data['img']\n labels = data.get('label', None)\n inputs = inputs.cuda()\n outputs = self.model(inputs)\n losses_report = None\n if not inference:\n labels = labels.cuda()\n losses_report = self.compute_losses(outputs, labels)\n return losses_report, outputs.detach().cpu().numpy(), labels.detach(\n ).cpu().numpy() if labels is not None else labels",
"def replay(self):\n \n #grab random batch\n if len(self.memory) < self.batchsize:\n minibatch = self.memory\n else:\n minibatch = random.sample(self.memory,self.batchsize)\n \n #instantiate\n states = []\n Q_wants = []\n \n #Find updates\n for event in minibatch:\n state,action,reward,next_state,done = event\n states.append(state)\n \n #Find Q_target\n state_tensor = np.reshape(state,(1,len(state))) # keras takes 2d arrays\n Q_want = self.model.predict(state_tensor)[0] # all elements of this, except the action chosen, stay\n # the same \n \n #If state is terminal, Q_target(action) = reward\n if done == True:\n Q_want[action] = reward\n \n # Q_want(action) = reward + gamma*Q_target(next_state) -- note I sample from the target network\n else:\n next_state_tensor = np.reshape(next_state,(1,len(next_state))) \n\n \n Q_target_next_state_vec = self.target_model.predict(next_state_tensor)[0]\n Q_target_next_state_max = max(Q_target_next_state_vec)\n \n Q_want[action] = reward + self.gamma*Q_target_next_state_max\n Q_want_tensor = np.reshape(Q_want,(1,len(Q_want)))\n #self.model.fit(state_tensor,Q_want_tensor,verbose=False,epochs=1)\n \n Q_wants.append(Q_want)\n \n \n #Here I fit on the whole batch. Others seem to fit line-by-line\n #Dont' think (hope) it makes much difference\n states = np.array(states)\n Q_wants = np.array(Q_wants)\n self.model.fit(states,Q_wants,verbose=False, epochs=1)",
"def _copy_to_gpu(self):\n self.dispatch('on_texture')",
"def process(self, sess):\n\n sess.run(self.sync) # copy weights from shared to local\n rollout = self.pull_batch_from_queue()\n batch = process_rollout(rollout, gamma=0.99, lambda_=1.0)\n\n should_compute_summary = self.task == 0 and self.local_steps % 11 == 0\n\n if should_compute_summary:\n fetches = [self.summary_op, self.train_op, self.global_step]\n else:\n fetches = [self.train_op, self.global_step]\n\n feed_dict = {\n self.local_network.x: batch.si,\n self.ac: batch.a,\n self.adv: batch.adv,\n self.r: batch.r,\n self.local_network.state_in[0]: batch.features[0],\n self.local_network.state_in[1]: batch.features[1],\n }\n\n fetched = sess.run(fetches, feed_dict=feed_dict)\n\n if should_compute_summary:\n self.summary_writer.add_summary(tf.Summary.FromString(fetched[0]), fetched[-1])\n self.summary_writer.flush()\n self.local_steps += 1",
"def __call__(self, epoch, update):\n count = 0\n ii = 1\n\n gradients_list = []\n metrics_list = []\n from_list = []\n step_list = []\n global_update_list = []\n\n while True:\n i,p = next(self.gen)\n if p.poll():\n count += 1\n grads =[]\n for i,fs in enumerate(self.float_sizes):\n w = p.recv_bytes(fs*4)\n grads.append(np.ndarray(self.shapes[i],np.float32, w))\n\n last_update, step, agnt_nr, metrics = p.recv() #only marginal gains her in the e-05s not worth the complexity to doing it with recv_bytes\n\n gradients_list.append(grads)\n metrics_list.append(metrics)\n from_list.append(agnt_nr)\n global_update_list.append(last_update)\n step_list.append(1)\n else:\n ii += 1\n if ii %self.learners == 0:\n time.sleep(0.0001)\n\n if self.warm_start and self.epochs >= epoch:\n if count == self.learners:\n return gradients_list, from_list, global_update_list ,step_list, metrics_list, 0, 2\n else:\n if count == self.num:\n return gradients_list, from_list, global_update_list,step_list, metrics_list, 0, 2",
"def _process(self):\n while True:\n with Timer() as data_timer:\n frame = self._frames_q.get()\n\n with Timer() as agent_timer:\n s, frame_metadata = self._unwrap_frame(frame)\n s = np.expand_dims(s, 0) # batch\n act = self.pred(s)[0][0].argmax()\n put_overwrite(self._actions_q, self._wrap_action(act, frame_metadata))\n\n print('.', end='', flush=True)\n if self.verbose:\n print('Avg data wait time: %.3f' % data_timer.time())\n print('Avg agent neural net eval time: %.3f' % agent_timer.time())"
] | [
"0.6505807",
"0.63949424",
"0.63113874",
"0.61615974",
"0.6134468",
"0.6134468",
"0.61168593",
"0.6034089",
"0.5956476",
"0.5939015",
"0.58440447",
"0.5803942",
"0.5793098",
"0.57923245",
"0.57748014",
"0.5772822",
"0.57639885",
"0.5756384",
"0.5732913",
"0.57300854",
"0.57294035",
"0.571429",
"0.57041234",
"0.5686585",
"0.56759757",
"0.564902",
"0.56483114",
"0.5647065",
"0.5644295",
"0.56429493"
] | 0.7087414 | 0 |
Get raw data from data file, returned in mV SPAM raw data is single precision float with unit Volts. Calling this applies the ts_lsb calculated when the headers are read. This is because when a recording consists of multiple data files, each channel of each data file might have a different scaling. The only way to make the data consistent is to apply the ts_lsb scaling. Therefore, this method returns the data in mV for all channels. | def getUnscaledSamples(self, **kwargs) -> TimeData:
# initialise chans, startSample and endSample with the whole dataset
options = self.parseGetDataKeywords(kwargs)
# get the files to read and the samples to take from them, in the correct order
dataFilesToRead, samplesToRead, scalings = self.getDataFilesForSamples(
options["startSample"], options["endSample"]
)
numSamples = options["endSample"] - options["startSample"] + 1
# set up the dictionary to hold the data
data = {}
for chan in options["chans"]:
data[chan] = np.zeros(shape=(numSamples), dtype=self.dtype)
# loop through chans and get data
sampleCounter = 0
for dFile, sToRead, scalar in zip(dataFilesToRead, samplesToRead, scalings):
# get samples - this is inclusive
dSamples = sToRead[1] - sToRead[0] + 1
# spam files always record 5 channels
dSamplesRead = dSamples * self.recChannels[dFile]
# read the data
byteOff = (
self.dataByteOffset[dFile]
+ sToRead[0] * self.recChannels[dFile] * self.dataByteSize
)
dFilePath = os.path.join(self.dataPath, dFile)
dataRead = np.memmap(
dFilePath,
dtype=self.dtype,
mode="r",
offset=byteOff,
shape=(dSamplesRead),
)
# now need to unpack this
for chan in options["chans"]:
# check to make sure channel exists
self.checkChan(chan)
# get the channel index - the chanIndex should give the right order in the data file
# as it is the same order as in the header file
chanIndex = self.chanMap[chan]
# use the range sampleCounter -> sampleCounter + dSamples, because this actually means sampleCounter + dSamples - 1 as python ranges are not inclusive of the end value
# scale by the lsb scalar here - note that these can be different for each file in the run
data[chan][sampleCounter : sampleCounter + dSamples] = (
dataRead[chanIndex : dSamplesRead : self.recChannels[dFile]]
* scalar[chan]
)
# increment sample counter
sampleCounter = sampleCounter + dSamples # get ready for the next data read
# return data
startTime, stopTime = self.sample2time(
options["startSample"], options["endSample"]
)
comments = []
comments.append(
"Unscaled data {} to {} read in from measurement {}, samples {} to {}".format(
startTime,
stopTime,
self.dataPath,
options["startSample"],
options["endSample"],
)
)
comments.append("Data read from {} files in total".format(len(dataFilesToRead)))
comments.append(
"Data scaled to mV for all channels using scalings in header files"
)
comments.append("Sampling frequency {}".format(self.getSampleFreq()))
return TimeData(
sampleFreq=self.getSampleFreq(),
startTime=startTime,
stopTime=stopTime,
data=data,
comments=comments,
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_magnetometer(self):\n data = self.mag.read_bytes(Register.OUT_X_L_M, 6)\n return lsm9ds1.to_vector(data)",
"def read_raw_data(self):\n dat_file = os.path.join(DATA_DIR, self.patient_number + '.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(dat_file))\n time = []\n voltage1 = []\n voltage2 = []\n with open(dat_file, 'r') as fd:\n for line in fd:\n line = line.split()\n time.append(line[0])\n voltage1.append(float(line[1]))\n voltage2.append(float(line[2]))\n\n tags_file = os.path.join(DATA_DIR, self.patient_number + '_tag.txt')\n if not os.path.exists(dat_file):\n raise AssertionError(\"{} doesn't exist.\".format(tags_file))\n tags_time = []\n tags = []\n r_peaks_indexes = []\n with open(tags_file, 'r') as fd:\n for line in fd:\n line = line.split()\n tags_time.append(line[0])\n tags.append(line[2])\n r_peaks_indexes.append(int(line[1]))\n return time, voltage1, voltage2, tags_time, tags, r_peaks_indexes",
"def read_sp2(file_name, debug=False, arm_convention=True):\n\n my_data = open(file_name, \"rb\").read()\n # Get file date from name\n if platform.system() == \"Windows\":\n split_file_name = file_name.split(\"\\\\\")\n else:\n split_file_name = file_name.split(\"/\")\n if arm_convention:\n next_split = split_file_name[-1].split(\".\")\n dt = datetime.strptime(next_split[2], \"%Y%m%d\")\n else:\n dt = datetime.strptime(split_file_name[-1][0:8], \"%Y%m%d\")\n\n if len(my_data) > 0:\n bytepos = 0\n numCols = struct.unpack(\">I\", my_data[bytepos:bytepos + 4])[0]\n bytepos += 4\n numChannels = struct.unpack(\">I\", my_data[bytepos:bytepos + 4])[0]\n if debug:\n print((\"Loaded file with numCols = {}, numChannels = {}\"\n .format(numCols, numChannels)))\n\n data_points_per_record = numChannels * numCols\n\n bytes_per_record = 2 * data_points_per_record\n bytes_not_data_array = 12 + 2 + 28 + 16\n bytes_per_record += bytes_not_data_array\n last_pos = int(bytes_per_record - 1)\n num_spare_cols = struct.unpack(\">I\", my_data[last_pos - 4:last_pos])[0]\n if debug:\n print(\"Number of spare columns = %d\" % num_spare_cols)\n\n if num_spare_cols != 0:\n bytes_per_record += num_spare_cols\n\n numRecords = int(len(my_data) / bytes_per_record)\n totalRows = numChannels * numRecords\n DataWave = np.zeros((totalRows, numCols), dtype='int16')\n Flag = np.zeros(int(totalRows / numChannels), dtype='int16')\n TimeWave = np.zeros(numRecords, dtype='float64')\n Res1 = np.zeros(numRecords, dtype='float32')\n EventIndex = np.zeros(numRecords, dtype='float32')\n TimeDiv10000 = np.zeros(numRecords, dtype='float64')\n TimeRemainder = np.zeros(numRecords, dtype='float64')\n Res5 = np.zeros(numRecords, dtype='float32')\n Res6 = np.zeros(numRecords, dtype='float32')\n Res7 = np.zeros(numRecords, dtype='float64')\n Res8 = np.zeros(numRecords, dtype='float64')\n if num_spare_cols != 0:\n SpareDataArray = np.zeros(numRecords, num_spare_cols)\n\n arrayFmt = \">\"\n for i in range(data_points_per_record):\n arrayFmt += \"h\"\n\n for record in range(numRecords):\n dataStartPoint = record * bytes_per_record + 8\n startRow = record * numChannels\n endRow = startRow + numChannels - 1\n the_row = np.array(struct.unpack(\n arrayFmt, my_data[dataStartPoint:dataStartPoint + int(data_points_per_record * 2)]))\n\n DataWave[startRow:endRow + 1, 0:numCols] = the_row.reshape(\n numCols, numChannels).T\n dataStartPoint += data_points_per_record * 2\n Flag[record] = struct.unpack(\">h\", my_data[dataStartPoint:dataStartPoint + 2])[0]\n next_floats = struct.unpack(\">ffffffff\", my_data[dataStartPoint + 2:dataStartPoint + 34])\n TimeWave[record] = next_floats[0]\n Res1[record] = next_floats[1]\n EventIndex[record] = next_floats[2]\n TimeDiv10000[record] = next_floats[3]\n TimeRemainder[record] = next_floats[4]\n Res5[record] = next_floats[5]\n Res6[record] = next_floats[6]\n next_doubles = struct.unpack(\">dd\", my_data[dataStartPoint + 34:dataStartPoint + 50])\n Res7[record] = next_doubles[0]\n Res8[record] = next_doubles[1]\n dataStartPoint += 50\n\n if num_spare_cols != 0:\n startRow = (2 * num_spare_cols) * record\n dataStartPoint += bytes_not_data_array - 4\n spareFmt = \">\"\n for i in range(num_spare_cols):\n spareFmt += \"f\"\n\n SpareDataArray[record] = np.array(\n struct.unpack(spareFmt, my_data[dataStartPoint:dataStartPoint+4*num_spare_cols]))\n\n UTCtime = TimeDiv10000 * 10000 + TimeRemainder\n diff_epoch_1904 = (\n datetime(1970, 1, 1) - datetime(1904, 1, 1)).total_seconds()\n UTCdatetime = np.array([\n datetime.utcfromtimestamp(x - diff_epoch_1904) for x in UTCtime])\n\n DateTimeWave = (dt - datetime(1904, 1, 1)).total_seconds() + TimeWave\n\n # Make an xarray dataset for SP2\n Flag = xr.DataArray(Flag, dims={'event_index': EventIndex})\n Res1 = xr.DataArray(Res1, dims={'event_index': EventIndex})\n Res5 = xr.DataArray(Res5, dims={'event_index': EventIndex})\n Res6 = xr.DataArray(Res6, dims={'event_index': EventIndex})\n Res7 = xr.DataArray(Res7, dims={'event_index': EventIndex})\n Res8 = xr.DataArray(Res8, dims={'event_index': EventIndex})\n Time = xr.DataArray(UTCdatetime, dims={'event_index': EventIndex})\n EventInd = xr.DataArray(EventIndex, dims={'event_index': EventIndex})\n DateTimeWaveUTC = xr.DataArray(UTCtime, dims={'event_index': EventIndex})\n DateTimeWave = xr.DataArray(DateTimeWave, dims={'event_index': EventIndex})\n TimeWave = xr.DataArray(TimeWave, dims={'event_index': EventIndex})\n my_ds = xr.Dataset({'time': Time, 'Flag': Flag, 'Res1': Res1, 'Res5': Res5,\n 'Res6': Res6, 'Res7': Res7, 'Res8': Res8, 'EventIndex': EventInd,\n 'DateTimeWaveUTC': DateTimeWaveUTC, 'TimeWave': TimeWave,\n 'DateTimeWave': DateTimeWave})\n\n for i in range(numChannels):\n temp_array = np.zeros((numRecords, numCols), dtype='int')\n for j in range(numRecords):\n k = i + j*numChannels\n temp_array[j] = DataWave[k]\n my_ds['Data_ch' + str(i)] = xr.DataArray(\n temp_array, dims={'event_index': EventIndex, 'columns': np.arange(0, 100, 1)})\n del my_data\n del DataWave\n return my_ds\n else:\n return None",
"def read(self, filename=None, **kwargs):\n \n # --- Standard tests and exceptions (generic code)\n if filename:\n self.filename = filename\n if not self.filename:\n raise Exception('No filename provided')\n if not os.path.isfile(self.filename):\n raise OSError(2,'File not found:',self.filename)\n if os.stat(self.filename).st_size == 0:\n raise EmptyFileError('File is empty:',self.filename)\n try:\n from nptdms import TdmsFile\n except:\n raise Exception('Install the library nptdms to read this file')\n\n fh = TdmsFile(self.filename, read_metadata_only=False)\n # --- OLD, using some kind of old version of tdms and probably specific to one file\n # channels_address = list(fh.objects.keys())\n # channels_address = [ s.replace(\"'\",'') for s in channels_address]\n # channel_keys= [ s.split('/')[1:] for s in channels_address if len(s.split('/'))==3]\n # # --- Setting up list of signals and times\n # signals=[]\n # times=[]\n # for i,ck in enumerate(channel_keys):\n # channel = fh.object(ck[0],ck[1])\n # signals.append(channel.data)\n # times.append (channel.time_track())\n\n # lenTimes = [len(time) for time in times]\n # minTimes = [np.min(time) for time in times]\n # maxTimes = [np.max(time) for time in times]\n # if len(np.unique(lenTimes))>1:\n # print(lenTimes)\n # raise NotImplementedError('Different time length') \n # # NOTE: could use fh.as_dataframe\n # if len(np.unique(minTimes))>1:\n # print(minTimes)\n # raise NotImplementedError('Different time span') \n # if len(np.unique(maxTimes))>1:\n # print(maxTimes)\n # raise NotImplementedError('Different time span') \n # # --- Gathering into a data frame with time\n # time =times[0]\n # signals = [time]+signals\n # M = np.column_stack(signals)\n # colnames = ['Time_[s]'] + [ck[1] for ck in channel_keys]\n # self['data'] = pd.DataFrame(data = M, columns=colnames)\n # --- NEW\n self['data'] = fh\n\n #for group in fh.groups():\n # for channel in group.channels():\n # #channel = group['channel name']\n # print('Group:',group.name , 'Chan:',channel.name)\n # channel_data = channel[:]\n # if len(channel_data)>0:\n # print(' ', type(channel_data))\n # #print(' ', len(channel_data))\n # print(' ', channel_data)\n # print(' ', channel_data[0])\n # try:\n # print(channel.time_track())\n # except KeyError:\n # print('>>> No time track')",
"def get_raw_data(self):\n if self._img and self.is_4d():\n temp = self._img.get_data(caching='unchanged')\n temp = np.rot90(temp)\n for tp in self._loaded_time_list:\n temp[..., tp] = self._data[..., tp]\n else:\n temp = self._data.copy()\n\n return np.rot90(temp, 3)",
"def read_PSSM_data(self):\n\n names = os.listdir(self.pssm_path)\n fname = [n for n in names if n.find(self.molname)==0]\n\n if len(fname)>1:\n raise ValueError('Multiple PSSM files found for %s in %s',self.mol_name,self.pssm_path)\n if len(fname)==0:\n raise FileNotFoundError('No PSSM file found for %s in %s',self.mol_name,self.pssm_path)\n else:\n fname = fname[0]\n\n f = open(self.pssm_path + '/' + fname,'rb')\n data = f.readlines()\n f.close()\n raw_data = list( map(lambda x: x.decode('utf-8').split(),data))\n\n self.res_data = np.array(raw_data)[:,:3]\n self.res_data = [ (r[0],int(r[1]),r[2]) for r in self.res_data ]\n self.pssm_data = np.array(raw_data)[:,3:].astype(np.float)",
"def read_raw(self):\n return self._FITS.read_raw()",
"def readData(self):\n if (self.model == 'GDS'):\n self.write(':ACQ'+str(ch)+':MEM?\\n')\n elif (self.model == 'TDS'):\n self.write('CURVe?\\n')\n\n # Check for the initial '#'; if not present, raise error.\n if (self.read(1) != '#'):\n raise Exception, \"Expected header not present\"\n\n # Read the data length indicator\n dataSize = int(self.read(int(self.read(1))))\n\n # extra steps for GDS\n if (self.model == 'GDS'):\n # subtract the 8 bytes we will read.\n dataSize -= 8\n # Read the sampling period\n hstep = struct.unpack('>f', self.read(4))[0]\n # also, fix hoff so it corresponds with that for TDS\n # FIXME: check with the scope at some point.\n hoff = hoff - float(dataSize/4) * hstep\n # Read 4 bytes to advance to the actual data: first byte\n # contains the channel and the three are not used,\n # according to the GDS800 manual.\n self.read(4)\n \n # Read data; TDS expects a 1-byte data, GDS expects 2-byte one.\n if (self.model == 'TDS'):\n data = list(struct.unpack('>'+str(dataSize)+'b',\n self.read(dataSize)))\n # TDS has a trailing '\\n' that should be drained.\n self.read(1)\n elif (self.model == 'GDS'):\n data = list(struct.unpack('>'+str(dataSize/2)+'h',\n self.read(dataSize)))\n\n return data",
"def getData(self):\n return struct.unpack(\"!f\",self.data)[0]",
"def read_all(self,reverse=True):\n raise NotImplementedError('To be implemented')\n\n # go to start of the data\n self.filfile.seek(int(self.datastart))\n # read data into 2-D numpy array\n# data=np.fromfile(self.filfile,dtype=self.dtype).reshape(self.channels,self.blocksize,order='F')\n data=np.fromfile(self.filfile,dtype=self.dtype).reshape(self.blocksize, self.channels)\n if reverse:\n data = data[:,::-1]\n return data",
"def load_data(self):\n return numpy.fromfile(self.data_fname, dtype=numpy.float32)",
"def read_raw(self, filename, ignore_data=False, open_dataset=None):\n twix, version_flag = self.get_twix(filename)\n d = self._get_parameters(twix.current)\n \n data = d['data']\n\n if d[\"remove_os\"]:\n data = self._remove_oversampling_basic(data)\n d[\"sw\"] = d[\"sw\"] / 2.0\n\n data = np.conjugate(data)\n\n # there may be multiple repetitions of svs_se in this data\n nrep = data.shape[0]\n raws = []\n for i in range(nrep):\n d[\"data\"] = data[i,:,:,:]\n d[\"data_source\"] = filename+'_rep'+str(i).zfill(2) if nrep > 1 else filename\n raws.append(DataRawFidsum(d))\n\n return raws",
"def _read_data(self):\n with self._open(self.filename, 'rb') as f:\n try:\n f.seek(self._offset_data, self._offset_whence)\n except IOError:\n print('Error: hedp.io.HamamatsuFile seeking outside of file limits.')\n print(' Failed to parse file.')\n print(\" Either the 'offset' or 'dtype' input arguments must be wrong!\")\n raise\n except:\n raise\n\n data_len = np.prod(self.shape)*np.dtype(self._dtype).itemsize\n data_str = f.read(data_len)\n if data_len != len(data_str):\n print(data_len, len(data_str))\n raise ValueError('File ended before all data was read. Probably wrong offset or dtype!')\n\n\n self.data = np.fromstring(data_str, dtype=self._dtype).reshape(self.shape[::-1])\n self.data = np.ndarray.astype(self.data, 'float32')\n\n #self.data = np.fromfile(f, dtype=self._dtype,\n # count=np.prod(self.shape)).reshape(self.shape[::-1])",
"def read(normalize=False):\n fname = join(dirname(abspath(__file__)), 'datasets', 'Medulloblastoma', 'Medulloblastoma_data.txt')\n V = np.loadtxt(fname)\n if normalize:\n V = (V - V.min()) / (V.max() - V.min())\n return V",
"def read(self, filename, normalize=True):\n if self.gcp == False:\n\n\t\t filepath = self.mixed_dir + filename\n\t\t sf, time_signal = wavfile.read(filepath, mmap=True)\n\n else:\n\n blob = list(self.bucket.list_blobs(prefix=filename))[0]\n # download blob as string\n file_as_string = blob.download_as_string()\n sf, time_signal = wavfile.read(io.BytesIO(file_as_string), mmap=True)\n\n\t\tif normalize == True:\n\t\t\t\n # normalization, assuming 2^15 is the highest possible quantization\n\t\t\ttime_signal = time_signal/np.power(2,15)\n\n\t\treturn time_signal",
"def readRawSamples(fname):\n\n d = numpy.fromfile(fname, dtype=numpy.float32)\n #d = d.astype(numpy.float64)\n #d = (d - 128) / 128.0\n\n return d[::2] + 1j * d[1::2]",
"def _read_sp(sp_file):\n content = sp_file.read()\n\n start_byte = 0\n n_bytes = 4\n signature = content[start_byte:start_byte + n_bytes]\n\n start_byte += n_bytes\n # the description is fixed to 40 bytes\n n_bytes = 40\n description = content[\n start_byte:start_byte + n_bytes].decode('utf8')\n\n meta = {'signature': signature,\n 'description': description}\n spectrum = []\n\n NBP = []\n start_byte += n_bytes\n n_bytes = 6\n block_id, block_size = _block_info(\n content[start_byte:start_byte + n_bytes])\n start_byte += n_bytes\n NBP.append(start_byte + block_size)\n while block_id != 122 and start_byte < len(content) - 2:\n next_block_id = content[start_byte:start_byte + 2]\n if indexbytes(next_block_id, 1) == 117:\n start_byte = NBP[-1]\n NBP = NBP[:-1]\n while start_byte >= NBP[-1]:\n NBP = NBP[-1]\n else:\n block_id, block_size = _block_info(\n content[start_byte:start_byte + n_bytes])\n start_byte += n_bytes\n NBP.append(start_byte + block_size)\n\n meta.update(_decode_5104(\n content[start_byte:start_byte + block_size]))\n\n start_byte = NBP[1]\n while start_byte < len(content):\n n_bytes = 6\n block_id, block_size = _block_info(\n content[start_byte:start_byte + n_bytes])\n start_byte += n_bytes\n if block_id in FUNC_DECODE.keys():\n decoded_data = FUNC_DECODE[block_id](\n content[start_byte:start_byte + block_size])\n if isinstance(decoded_data, dict):\n meta.update(decoded_data)\n else:\n spectrum = decoded_data\n start_byte += block_size\n\n wavelength = np.linspace(meta['min_wavelength'],\n meta['max_wavelength'],\n meta['n_points'])\n\n if isinstance(sp_file, string_types):\n meta['filename'] = basename(sp_file)\n else:\n meta['filename'] = basename(sp_file.name)\n\n return Spectrum(spectrum, wavelength, meta)",
"def _readBTS(self,fname):\n with BinaryFile(fname) as f:\n #\n # read header info\n #\n if self.verbose: print('Reading header information from',fname)\n\n ID = f.read_int2()\n assert( ID==7 or ID==8 )\n if ID==7: filetype = 'non-periodic'\n elif ID==8: filetype = 'periodic'\n else: filetype = 'UNKNOWN'\n if self.verbose:\n print(' id= {:d} ({:s})'.format(ID,filetype))\n\n # - read resolution settings\n self.NZ = f.read_int4()\n self.NY = f.read_int4()\n self.Ntower = f.read_int4()\n if self.verbose:\n print(' NumGrid_Z,_Y=',self.NZ,self.NY)\n print(' ntower=',self.Ntower)\n self.N = f.read_int4()\n self.dz = f.read_float(dtype=self.realtype)\n self.dy = f.read_float(dtype=self.realtype)\n self.dt = f.read_float(dtype=self.realtype)\n self.period = self.realtype(self.N * self.dt)\n self.Nsize = 3*self.NY*self.NZ*self.N\n if self.verbose:\n print(' nt=',self.N)\n print(' (problem size: {:d} points)'.format(self.Nsize))\n print(' dz,dy=',self.dz,self.dy)\n print(' TimeStep=',self.dt)\n print(' Period=',self.period)\n\n # - read reference values\n self.uhub = f.read_float(dtype=self.realtype)\n self.zhub = f.read_float(dtype=self.realtype) # NOT USED\n self.zbot = f.read_float(dtype=self.realtype)\n if self.Umean is None:\n self.Umean = self.uhub\n if self.verbose:\n print(' Umean = uhub =',self.Umean,\n '(for calculating fluctuations)')\n else: # user-specified Umean\n if self.verbose:\n print(' Umean =',self.Umean,\n '(for calculating fluctuations)')\n print(' uhub=',self.uhub,' (NOT USED)')\n if self.verbose:\n print(' HubHt=',self.zhub,' (NOT USED)')\n print(' Zbottom=',self.zbot)\n\n # - read scaling factors\n self.Vslope = np.zeros(3,dtype=self.realtype)\n self.Vintercept = np.zeros(3,dtype=self.realtype)\n for i in range(3):\n self.Vslope[i] = f.read_float(dtype=self.realtype)\n self.Vintercept[i] = f.read_float(dtype=self.realtype)\n if self.verbose:\n # output is float64 precision by default...\n print(' Vslope=',self.Vslope)\n print(' Vintercept=',self.Vintercept)\n\n # - read turbsim info string\n nchar = f.read_int4()\n version = f.read(N=nchar)\n if self.verbose: print(version)\n\n #\n # read normalized data\n #\n # note: need to specify Fortran-order to properly read data using np.nditer\n t0 = time.process_time()\n if self.verbose: print('Reading normalized grid data')\n\n self.U = np.zeros((3,self.NY,self.NZ,self.N),order='F',dtype=self.realtype)\n self.T = np.zeros((self.N,self.NY,self.NZ))\n if self.verbose:\n print(' U size :',self.U.nbytes/1024.**2,'MB')\n\n for val in np.nditer(self.U, op_flags=['writeonly']):\n val[...] = f.read_int2()\n self.U = self.U.swapaxes(3,2).swapaxes(2,1) # new shape: (3,self.N,self.NY,self.NZ)\n\n if self.Ntower > 0:\n if self.verbose:\n print('Reading normalized tower data')\n self.Utow = np.zeros((3,self.Ntower,self.N),\n order='F',dtype=self.realtype)\n if self.verbose:\n print(' Utow size :',self.Utow.nbytes/1024.**2,'MB')\n for val in np.nditer(self.Utow, op_flags=['writeonly']):\n val[...] = f.read_int2()\n\n if self.verbose:\n print(' Read velocitiy fields in',time.process_time()-t0,'s')\n \n #\n # calculate dimensional velocity\n #\n if self.verbose:\n print('Calculating velocities from normalized data')\n for i in range(3):\n self.U[i,:,:,:] -= self.Vintercept[i]\n self.U[i,:,:,:] /= self.Vslope[i]\n if self.Ntower > 0:\n self.Utow[i,:,:] -= self.Vintercept[i]\n self.Utow[i,:,:] /= self.Vslope[i]\n self.U[0,:,:,:] -= self.Umean # uniform inflow w/ no shear assumed\n\n print(' u min/max [',np.min(self.U[0,:,:,:]),\n np.max(self.U[0,:,:,:]),']')\n print(' v min/max [',np.min(self.U[1,:,:,:]),\n np.max(self.U[1,:,:,:]),']')\n print(' w min/max [',np.min(self.U[2,:,:,:]),\n np.max(self.U[2,:,:,:]),']')\n\n self.scaling = np.ones((3,self.NZ))\n\n #\n # calculate coordinates\n #\n if self.verbose:\n print('Calculating coordinates')\n #self.y = -0.5*(self.NY-1)*self.dy + np.arange(self.NY,dtype=self.realtype)*self.dy\n self.y = np.arange(self.NY,dtype=self.realtype)*self.dy\n self.z = self.zbot + np.arange(self.NZ,dtype=self.realtype)*self.dz\n #self.ztow = self.zbot - np.arange(self.NZ,dtype=self.realtype)*self.dz #--NOT USED\n\n self.t = np.arange(self.N,dtype=self.realtype)*self.dt\n if self.verbose:\n print('Read times [',self.t[0],self.t[1],'...',self.t[-1],']')",
"def read(f, normalized=False):\r\n a = pydub.AudioSegment.from_mp3(f)\r\n y = np.array(a.get_array_of_samples())\r\n if a.channels == 2:\r\n y = y.reshape((-1, 2))\r\n if normalized:\r\n return a.frame_rate, np.float32(y) / 2**15\r\n else:\r\n return a.frame_rate, y",
"def read_raw(self, offset, size, return_raw = False):\n raw_data = self.reader(offset, size)\n if raw_data is None:\n return None\n if return_raw:\n return raw_data\n else:\n if size == 1:\n data = struct.unpack(\"%dB\" %size, raw_data)[0]\n else:\n data = struct.unpack(\"%dB\" %size, raw_data)\n return data",
"def load_vo_txt_raw(*, fname, sampling='1M'):\n # Set the day of month for time series depending on the MF sampling rate\n if sampling == '1M':\n day = 15\n elif sampling == '4M':\n day = 1\n\n # Positions given in degrees - co-latitude (0 to 180), longitude (\n df = pd.read_csv(fname, sep=\"\\s+\", header=14,\n names=[\"theta\", \"phi\", \"Year\", \"Month\", \"Time\", \"r\",\n \"Br\", \"Btheta\", \"Y\", \"sigma_r\", \"sigma_theta\",\n \"sigma_phi\", \"N_data\"], usecols=range(13))\n\n df[\"mjd2000\"] = mjd2000(df[\"Year\"], df[\"Month\"], day)\n df[\"dyear\"] = mjd_to_dyear(df[\"mjd2000\"], leap_year=True)\n df[\"X\"] = -df[\"Btheta\"] # -theta component\n df[\"Z\"] = -df[\"Br\"] # -radial component\n df.drop(columns=[\"Btheta\", \"Br\"], inplace=True)\n # To 00:00 on 1st or 15th each month\n # Multiplication by 10000 and 100 are needed to convert to datetime\n # (see documentation for pandas.datetime)\n df[\"date\"] = pd.to_datetime(df[\"Year\"]*10000+df[\"Month\"]*100+day,\n format=\"%Y%m%d\")\n\n return df",
"def read(self):\n # open the .SPE file\n with open(self._input_file_path, 'rb') as f:\n lines = f.readlines()\n # Create an empty dictionary for the metadata\n metadata_dictionary = {}\n\n # Search through the file for the needed metadata\n metadata_dictionary['date_acquired'] = re.search(b'date=\"(.*?)\"', lines[1])[1].decode('ANSI') \n metadata_dictionary['width'] = int(re.search(b'width=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['height'] = int(re.search(b'height=\"(.*?)\"', lines[1])[1])\n metadata_dictionary['size'] = metadata_dictionary['width']*metadata_dictionary['height']\n metadata_dictionary['exposure_time'] = int(re.search(b'<ExposureTime type=\"Double\">(.*?)</ExposureTime>', lines[1])[1])\n metadata_dictionary['excitation_wavelength'] = float(re.search(b'laserLine=\"(.*?)\"',lines[1])[1])\n metadata_dictionary['center_wavelength'] = float(re.search(b'<CenterWavelength type=\"Double\">(.*?)</CenterWavelength>',lines[1])[1])\n metadata_dictionary['orientation'] = re.search(b'orientation=\"(.*?)\"',lines[1])[1].decode('ANSI')\n\n # Get the wavelength and intensity\n wavelength_string = re.search(b'<Wavelength xml:space=\"preserve\">(.*?)</Wavelength>',lines[1])[1].decode('utf-8')\n wavelength = np.array(wavelength_string.split(','), dtype=np.float64)\n\n f.seek(4100)\n intensity = np.fromfile(f,dtype=np.float32,count=metadata_dictionary['size'])\n\n raman_shift_wavenumbers = 1e7*(1/metadata_dictionary['excitation_wavelength'] - 1/wavelength)\n\n f.close()\n \n # create the sidpy dataset\n data_set = Dataset.from_array(intensity, name='Raman Spectra')\n\n data_set.data_type = 'spectrum'\n data_set.units = 'counts'\n data_set.quantity = 'Intensity'\n\n # set dimensions\n data_set.set_dimension(0, Dimension(raman_shift_wavenumbers, name='Raman Shift',\n units = 'cm-1',\n quantity='Raman shift',\n dimension_type='spectral'))\n data_set.set_dimension(1, Dimension(intensity, name='Intensity',\n units = 'counts',\n quantity='intensity',\n dimension_type='spectral')) \n\n data_set.metadata = metadata_dictionary\n\n return data_set",
"def process(self, data):\n spectr = stft(data, n_fft=512, hop_length=160)\n return np.concatenate((spectr.real[:, :, np.newaxis], spectr.imag[:, :, np.newaxis]), axis=2)",
"def openMCSH5File(filename, verbose=False):\n rf = h5py.File(filename, 'r')\n \n stream = rf.require_group('/Data/Recording_0/AnalogStream/Stream_0')\n data = np.array(stream.get('ChannelData'),dtype=np.int)\n timestamps = np.array(stream.get('ChannelDataTimeStamps'))\n info = np.array(stream.get('InfoChannel'))\n \n Unit = info['Unit'][0]\n Tick = info['Tick'][0]/1e6\n exponent = info['Exponent'][0]\n convFact = info['ConversionFactor'][0]\n \n nRecCh, nFrames = data.shape\n channel_ids = info['ChannelID']\n assert len(np.unique(channel_ids)) == len(channel_ids), 'Duplicate MCS channel IDs found'\n electrodeLabels = info['Label']\n \n TimeVals = np.arange(timestamps[0][0],timestamps[0][2]+1,1)*Tick\n \n assert Unit==b'V', 'Unexpected units found, expected volts, found {}'.format(Unit.decode('UTF-8'))\n data_V = data*convFact.astype(float)*(10.0**(exponent))\n \n timestep_avg = np.mean(TimeVals[1:]-TimeVals[0:-1])\n timestep_std = np.std(TimeVals[1:]-TimeVals[0:-1])\n timestep_min = np.min(TimeVals[1:]-TimeVals[0:-1])\n timestep_max = np.min(TimeVals[1:]-TimeVals[0:-1])\n assert all(np.abs(np.array((timestep_min, timestep_max))-timestep_avg)/timestep_avg < 1e-6), 'Time steps vary by more than 1 ppm'\n samplingRate = 1./timestep_avg\n\n if verbose:\n print('# MCS H5 data format')\n print('#')\n print('# File: {}'.format(rf.filename))\n print('# File size: {:.2f} MB'.format(rf.id.get_filesize()/1024**2))\n print('#')\n for key in rf.attrs.keys():\n print('# {}: {}'.format(key,rf.attrs[key]))\n print('#')\n print('# Signal range: {:.2f} to {:.2f} µV'.format(np.amin(data_V)*1e6,np.amax(data_V)*1e6))\n print('# Number of channels: {}'.format(nRecCh))\n print('# Number of frames: {}'.format(nFrames))\n print('# Time step: {:.2f} µs ± {:.5f} % (range {} to {})'.format(timestep_avg*1e6, timestep_std/timestep_avg*100, timestep_min*1e6, timestep_max*1e6))\n print('# Sampling rate: {:.2f} Hz'.format(samplingRate))\n print('#')\n print('# MCSH5RecordingExtractor currently only reads /Data/Recording_0/AnalogStream/Stream_0')\n\n return (rf, nFrames, samplingRate, nRecCh, channel_ids, electrodeLabels, exponent, convFact)",
"def read_data(self):\n self.data = reduce_spectrum(self.filename)",
"def _read_pha(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n header_for_livetime = hdul[0].header\n\n return data['channel'], data['counts'], header_for_livetime['LIVETIME']",
"def readWaveform(self):\n # prepare data holder\n y = [ 0 for j in range(4) ]\n # in case of previous errors\n self.flushInput()\n for ch in self.chs:\n # mostly for TDS\n self.setCh(ch)\n # calibration factor we will need soon\n (vmult, voff) = self.calibV()\n # read and calibrate data\n data = (numpy.array(self.readData()) - voff) * vmult\n # This is from the formula in TDS manual, without the\n # \"vzero\" in it---I couldn't figure out when that wouldn't\n # be exactly zero.\n y[ch-1]=data[:]\n\n (hstep, hoff) = self.calibH()\n # initialize time array\n t = numpy.array(range(len(y[0])))\n t = (t * hstep) + hoff\n\n # update the sequence number (... for isUpdated())\n self.seq = self.readSeq()\n\n return (t, y)",
"def readmod96(mod96file):\n with open(mod96file, 'r') as fid:\n while True:\n l = fid.readline()\n if l == \"\": break\n l = l.split('\\n')[0]\n if \"H\" in l and \"VP\" in l and \"VS\" in l:\n H, VP, VS, RHO, QP, QS, ETAP, ETAS, FREFP, FREFS = [[] for _ in xrange(10)]\n while True:\n l = fid.readline()\n l = l.split('\\n')[0]\n l = np.asarray(l.split(), float)\n for W, w in zip([H, VP, VS, RHO, QP, QS, ETAP, ETAS, FREFP, FREFS], l):\n W.append(w)\n if l[0] == 0.: #thickness is 0 = ending signal (half space)\n break\n if l[0] == 0.: break \n H, VP, VS, RHO, QP, QS, ETAP, ETAS, FREFP, FREFS = [np.asarray(_, float) for _ in H, VP, VS, RHO, QP, QS, ETAP, ETAS, FREFP, FREFS]\n Z = np.concatenate(([0.], H[:-1].cumsum()))\n return Z, H, VP, VS, RHO, QP, QS, ETAP, ETAS, FREFP, FREFS",
"def read_sas_file(filename):\n \n data = np.array([],dtype=np.float)\n \n with open(filename,'r') as f:\n \n for line in f:\n\n # Only the first 2 or 3 columns are parsed \n cols = parse_sas_data_line(line)\n \n if cols.any():\n\n if data.any():\n data = np.vstack((data,parse_sas_data_line(line)))\n else:\n data = parse_sas_data_line(line)\n \n return data",
"def loadTTLPulse(file, n_channels = 2, fs = 20000, track = 0, mscope = 1):\n f = open(file, 'rb')\n startoffile = f.seek(0, 0)\n endoffile = f.seek(0, 2)\n bytes_size = 2 \n n_samples = int((endoffile-startoffile)/n_channels/bytes_size)\n f.close()\n with open(file, 'rb') as f:\n data = np.fromfile(f, np.uint16).reshape((n_samples, n_channels))\n \n ch_track = data[:,track].astype(np.int32)\n peaks,_ = scipy.signal.find_peaks(np.diff(ch_track), height=30000)\n timestep = np.arange(0, len(data))/fs\n peaks+=1\n ttl_track = pd.Series(index = timestep[peaks], data = data[peaks,track]) \n\n ch_mscope = data[:,mscope].astype(np.int32)\n peaks,_ = scipy.signal.find_peaks(np.abs(np.diff(ch_mscope)), height=30000)\n peaks+=1\n ttl_mscope = pd.Series(index = timestep[peaks], data = data[peaks,mscope])\n\n return ttl_track, ttl_mscope"
] | [
"0.5571445",
"0.5542686",
"0.55078024",
"0.5504112",
"0.54853296",
"0.5461196",
"0.5426089",
"0.539896",
"0.5366386",
"0.53356355",
"0.53265965",
"0.53247327",
"0.529701",
"0.52936316",
"0.5283426",
"0.527169",
"0.52697086",
"0.5268657",
"0.5241611",
"0.5232119",
"0.5224715",
"0.5208464",
"0.51925397",
"0.51670283",
"0.5163784",
"0.5136063",
"0.5128488",
"0.51264393",
"0.51218826",
"0.5111792"
] | 0.55713576 | 1 |
Get the data files that have to be read to cover the sample range | def getDataFilesForSamples(
self, startSample: int, endSample: int
) -> Tuple[List[str], List[List[int]], List[float]]:
# have the datafiles saved in sample order beginning with the earliest first
# go through each datafile and find the range to be read
dataFilesToRead = []
samplesToRead = []
scalings = []
for idx, dFile in enumerate(self.dataFileList):
fileStartSamp = self.dataRanges[idx][0]
fileEndSamp = self.dataRanges[idx][1]
if fileStartSamp > endSample or fileEndSamp < startSample:
continue # nothing to read from this file
# in this case, there is some overlap with the samples to read
dataFilesToRead.append(dFile)
readFrom = 0 # i.e. the first sample in the datafile
readTo = fileEndSamp - fileStartSamp # this the last sample in the file
if fileStartSamp < startSample:
readFrom = startSample - fileStartSamp
if fileEndSamp > endSample:
readTo = endSample - fileStartSamp
# this is an inclusive number readFrom to readTo including readTo
samplesToRead.append([readFrom, readTo])
scalings.append(self.scalings[idx])
return dataFilesToRead, samplesToRead, scalings | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_samples_file(foldername, filter=None):\n samples = []\n for file in os.listdir(foldername):\n if filter and file.find(filter) == -1:\n continue\n for sample in sfile(foldername + '/' + file, None).get_samples():\n samples.append(sample)\n return samples",
"def _get_files(\n self,\n data_root,\n data_subset=\"full/*0\",\n signal_subset=\"*\",\n noise_subset=\"*\",\n data_type=\"raw\",\n noise_type=\"stationary\",\n noise_type_sim=None,\n mask_type=\"hitsmask_tailored\",\n signal_type=\"r0p03\",\n signal_type_sim=None,\n signal_transfer_type=None,\n suffix=\"\",\n foreground_type_sim=None,\n template_type=None,\n sub_planck=False,\n ):\n\n if signal_transfer_type is None:\n signal_transfer_type = signal_type\n\n # regularize data root\n if not os.path.exists(data_root):\n raise OSError(\"Missing data root {}\".format(data_root))\n\n # find all map files\n map_root = os.path.join(data_root, \"data_{}\".format(data_type))\n map_files = []\n data_subset = data_subset.split(\",\")\n for f in np.atleast_1d(data_subset):\n files = glob.glob(os.path.join(map_root, \"{}.fits\".format(f)))\n if not len(files):\n raise OSError(\"Missing files in data subset {}\".format(f))\n map_files.extend(files)\n data_subset = \",\".join(data_subset)\n map_files = sorted(map_files)\n map_files = [f for f in map_files if os.path.basename(f).startswith(\"map_\")]\n map_tags = [\n os.path.splitext(os.path.basename(f))[0].split(\"_\", 1)[1] for f in map_files\n ]\n map_freqs = []\n for t in map_tags:\n # if map tag is not a plain frequency, extract plain frequency\n map_freqs.append(self.dict_freqs[t])\n self.log(\"Found {} map files in {}\".format(len(map_files), map_root), \"info\")\n self.log(\"Map files: {}\".format(map_files), \"debug\")\n self.log(\"Map freqs: {}\".format(map_freqs), \"debug\")\n\n raw_root = None\n raw_files = None\n # find all corresponding signal sims\n signal_root = os.path.join(data_root, \"signal_{}\".format(signal_type))\n num_signal = None\n signal_files = []\n for f in map_files:\n sfiles = sorted(\n glob.glob(\n f.replace(map_root, signal_root).replace(\n \".fits\", \"_{}.fits\".format(signal_subset)\n )\n )\n )\n nsims1 = len(sfiles)\n if not nsims1:\n raise OSError(\"Missing signal sims for {}\".format(f))\n if num_signal is None:\n num_signal = nsims1\n else:\n if nsims1 != num_signal:\n raise OSError(\n \"Found {} signal sims for map {}, expected {}\".format(\n nsims1, f, num_signal\n )\n )\n num_signal = min(num_signal, nsims1)\n signal_files.append(sfiles)\n signal_files = np.asarray([x[:num_signal] for x in signal_files])\n self.log(\"Found {} signal sims in {}\".format(num_signal, signal_root), \"info\")\n self.log(\n \"First signal sim files: {}\".format(signal_files[:, 0].tolist()), \"debug\"\n )\n\n # find all corresponding signal transfer function sims\n signal_transfer_root = os.path.join(\n data_root, \"signal_{}\".format(signal_transfer_type)\n )\n num_signal_transfer = None\n signal_transfer_files = []\n for f in map_files:\n sfiles = sorted(\n glob.glob(\n f.replace(map_root, signal_transfer_root).replace(\n \".fits\", \"_{}.fits\".format(signal_subset)\n )\n )\n )\n nsims1 = len(sfiles)\n if not nsims1:\n raise OSError(\"Missing signal sims for {}\".format(f))\n if num_signal_transfer is None:\n num_signal_transfer = nsims1\n else:\n if nsims1 != num_signal_transfer:\n raise OSError(\n \"Found {} signal_transfer sims for map {}, expected {}\".format(\n nsims1, f, num_signal_transfer\n )\n )\n num_signal_transfer = min(num_signal_transfer, nsims1)\n signal_transfer_files.append(sfiles)\n signal_transfer_files = np.asarray(\n [x[:num_signal_transfer] for x in signal_transfer_files]\n )\n self.log(\n \"Found {} signal transfer sims in {}\".format(\n num_signal_transfer, signal_transfer_root\n ),\n \"info\",\n )\n self.log(\n \"First signal transfer sim files: {}\".format(\n signal_transfer_files[:, 0].tolist()\n ),\n \"debug\",\n )\n\n # find all corresponding noise sims\n if noise_type is not None:\n noise_root = os.path.join(data_root, \"noise_{}\".format(noise_type))\n num_noise = None\n noise_files = []\n for f in map_files:\n nfiles = sorted(\n glob.glob(\n f.replace(map_root, noise_root).replace(\n \".fits\", \"_{}.fits\".format(noise_subset)\n )\n )\n )\n nsims1 = len(nfiles)\n if not nsims1:\n raise OSError(\"Missing noise sims for {}\".format(f))\n if num_noise is None:\n num_noise = nsims1\n else:\n if nsims1 != num_noise:\n raise OSError(\n \"Found {} noise sims for map {}, expected {}\".format(\n nsims1, f, num_noise\n )\n )\n num_noise = min(num_noise, nsims1)\n noise_files.append(nfiles)\n noise_files = np.asarray([x[:num_noise] for x in noise_files])\n self.log(\"Found {} noise sims in {}\".format(num_noise, noise_root), \"info\")\n self.log(\n \"First noise sim files: {}\".format(noise_files[:, 0].tolist()), \"debug\"\n )\n else:\n noise_root = None\n noise_files = None\n\n # find all corresponding noise sims for sim_index run\n if noise_type_sim is not None:\n noise_root_sim = os.path.join(data_root, \"noise_{}\".format(noise_type_sim))\n num_noise_sim = None\n noise_files_sim = []\n for f in map_files:\n nfiles = sorted(\n glob.glob(\n f.replace(map_root, noise_root_sim).replace(\n \".fits\", \"_{}.fits\".format(noise_subset)\n )\n )\n )\n nsims1 = len(nfiles)\n if not nsims1:\n raise OSError(\"Missing noise sims for {}\".format(f))\n if num_noise_sim is None:\n num_noise_sim = nsims1\n else:\n if nsims1 != num_noise_sim:\n raise OSError(\n \"Found {} noise sims for map {}, expected {}\".format(\n nsims1, f, num_noise_sim\n )\n )\n num_noise_sim = min(num_noise_sim, nsims1)\n noise_files_sim.append(nfiles)\n noise_files_sim = np.asarray(noise_files_sim)\n self.log(\n \"Found {} noise sims in {}\".format(num_noise_sim, noise_root_sim),\n \"info\",\n )\n self.log(\n \"First noise sim files: {}\".format(noise_files_sim[:, 0].tolist()),\n \"debug\",\n )\n else:\n noise_root_sim = noise_root\n noise_files_sim = noise_files\n\n # find all corresponding signal sims for sim_index run\n if signal_type_sim is not None:\n signal_root_sim = os.path.join(\n data_root, \"signal_{}\".format(signal_type_sim)\n )\n num_signal_sim = None\n signal_files_sim = []\n for f in map_files:\n nfiles = sorted(\n glob.glob(\n f.replace(map_root, signal_root_sim).replace(\n \".fits\", \"_{}.fits\".format(signal_subset)\n )\n )\n )\n nsims1 = len(nfiles)\n if not nsims1:\n raise OSError(\"Missing signal sims for {}\".format(f))\n if num_signal_sim is None:\n num_signal_sim = nsims1\n else:\n if nsims1 != num_signal_sim:\n raise OSError(\n \"Found {} signal sims for map {}, expected {}\".format(\n nsims1, f, num_signal_sim\n )\n )\n num_signal_sim = min(num_signal_sim, nsims1)\n signal_files_sim.append(nfiles)\n signal_files_sim = np.asarray(signal_files_sim)\n self.log(\n \"Found {} signal sims in {}\".format(num_signal_sim, signal_root_sim),\n \"info\",\n )\n self.log(\n \"First signal sim files: {}\".format(signal_files_sim[:, 0].tolist()),\n \"debug\",\n )\n else:\n signal_root_sim = signal_root\n signal_files_sim = signal_files\n\n # find all corresponding foreground sims for sim_index run\n if foreground_type_sim is not None:\n foreground_root = os.path.join(\n data_root, \"foreground_{}\".format(foreground_type_sim)\n )\n num_foreground_sim = None\n foreground_files = []\n for f in map_files:\n nfiles = sorted(\n glob.glob(\n f.replace(map_root, foreground_root).replace(\".fits\", \"_*.fits\")\n )\n )\n nsims1 = len(nfiles)\n if not nsims1:\n raise OSError(\"Missing foreground sims for {}\".format(f))\n if num_foreground_sim is None:\n num_foreground_sim = nsims1\n else:\n if nsims1 != num_foreground_sim:\n raise OSError(\n \"Found {} foreground sims for map {}, expected {}\".format(\n nsims1, f, num_foreground_sim\n )\n )\n num_foreground_sim = min(num_foreground_sim, nsims1)\n foreground_files.append(nfiles)\n foreground_files = np.asarray(\n [x[:num_foreground_sim] for x in foreground_files]\n )\n self.log(\n \"Found {} foreground sims in {}\".format(\n num_foreground_sim, foreground_root\n ),\n \"info\",\n )\n self.log(\n \"First foreground sim files: {}\".format(\n foreground_files[:, 0].tolist()\n ),\n \"debug\",\n )\n else:\n foreground_root = None\n foreground_files = None\n\n # find all corresponding masks\n if mask_type is None:\n raise ValueError(\"Argument mask_type required\")\n # If mask is a fits file, use the same mask for all maps\n if os.path.splitext(mask_type)[1] == \".fits\":\n if os.path.exists(mask_type):\n # it's an absolute path\n mask_files = np.tile(mask_type, len(map_tags))\n mask_root = os.path.dirname(mask_type)\n else:\n # it's relative to base directory structure\n mask_files = np.tile(os.path.join(data_root, mask_type), len(map_tags))\n mask_root = os.path.dirname(os.path.join(data_root, mask_type))\n else:\n mask_root = os.path.join(data_root, \"masks_{}\".format(mask_type))\n # XXX Do this smarter\n mask_files = [\n os.path.join(mask_root, \"mask_map_{}.fits\".format(tag))\n for tag in map_tags\n ]\n for f in mask_files:\n if not os.path.exists(f):\n raise OSError(\"Missing mask file {}\".format(f))\n self.log(\"Found {} masks in {}\".format(len(mask_files), mask_root), \"info\")\n self.log(\"Mask files: {}\".format(mask_files), \"debug\")\n\n # Also need a list of unique map tags for populating dictionaries\n # in data structures\n map_tags_orig = list(map_tags) # copy\n map_tags = pt.unique_tags(map_tags)\n\n # make a list of names corresponding to the order of the cross spectra\n map_pairs = pt.tag_pairs(map_tags)\n map_pairs_orig = pt.tag_pairs(map_tags, index=map_tags_orig)\n\n # make a dictionary of map freqs for each unique map tag\n map_freqs_dict = {}\n for im0, m0 in enumerate(map_tags):\n map_freqs_dict[m0] = map_freqs[im0]\n map_freqs = map_freqs_dict\n\n fields = [\n \"data_root\",\n \"data_subset\",\n \"map_root\",\n \"map_files\",\n \"map_tags\",\n \"map_pairs\",\n \"map_tags_orig\",\n \"map_pairs_orig\",\n \"map_freqs\",\n \"raw_root\",\n \"raw_files\",\n \"signal_root\",\n \"signal_files\",\n \"signal_root_sim\",\n \"signal_files_sim\",\n \"signal_transfer_root\",\n \"signal_transfer_files\",\n \"noise_root\",\n \"noise_files\",\n \"noise_root_sim\",\n \"noise_files_sim\",\n \"mask_root\",\n \"mask_files\",\n \"foreground_root\",\n \"foreground_files\",\n ]\n out = dict()\n local = locals()\n for f in fields:\n out[f + suffix] = local[f]\n return out",
"def get_file_list(mixer_file, select_random, use_list_of_files):\n logger = logging.getLogger(get_file_list.__name__)\n files = list()\n\n if use_list_of_files:\n with open(mixer_file, 'r') as list_file:\n for line in list_file:\n files.append(os.path.join('data/raw',line.strip()))\n\n if select_random:\n random.shuffle(files)\n\n else:\n\n mixer = parse_mixer_file(mixer_file)\n\n for m in mixer:\n path = os.path.join(project_dir, m[0])\n all_mixer_files = [os.path.join(path,f) for f in os.listdir(path) \n if os.path.isfile(os.path.join(path, f)) and f.split('.')[-1] == 'csv']\n\n current_files = list()\n # Check if the number of samples is limited\n if m[2] >= 0:\n sample_count = 0\n for f in all_mixer_files:\n # Get number of lines without the header line\n num_lines = sum(1 for line in open(f)) - 1\n\n if (sample_count + num_lines) > m[2]:\n current_files.append((f, m[2] - sample_count))\n sample_count += (m[2] - sample_count)\n break\n else:\n current_files.append((f, -1))\n sample_count += num_lines\n\n if sample_count < m[2]:\n logger.warn('Not enough samples ({} < {}): {}'.format(sample_count, m[2], m[0]))\n else:\n # No limit, take all samples in the files\n current_files = zip(all_mixer_files, [-1]*len(all_mixer_files))\n\n if m[1] < 0:\n # -1 means all .csv files\n files += current_files\n elif m[1] > 0:\n if m[1] > len(current_files):\n logger.warn('Not enough files ({} < {}): {}'.format(len(current_files),\n m[1], m[0]))\n files += current_files[:m[1]]\n\n if select_random:\n random.shuffle(files)\n else:\n files = sorted(files, key=lambda x: int(os.path.basename(x[0]).split('_')[-1].split('.')[0]))\n\n return files",
"def getFiles(mintime=(17, 20, 17), maxtime=(17, 33, 17), folder='data/30Jul/'):\n start = datetime.time(*mintime)\n stop = datetime.time(*maxtime)\n all = g.glob(folder + '*.fits')\n ret = []\n for f in all:\n path, file = os.path.split(f)\n numbs = [int(x) for x in file.replace('sEuclid.fits', '').split('_')]\n data = datetime.time(*numbs)\n if start <= data <= stop:\n ret.append(file)\n return [folder + f for f in ret]",
"def smartmeter_data():\n path = '/datc/opschaler/smartmeter_data'\n file_paths = np.array(glob.glob(path + \"/*.csv\"))\n\n print('Detected %s smartmeter_data files.' % len(file_paths))\n dwelling_ids = np.array(list((map(lambda x: x[-15:-4], file_paths))))\n\n return file_paths, dwelling_ids",
"def test_read(self):\n for root, dirs, files in os.walk(os.path.join(self.test_dir, 'files')):\n for filename in files:\n if filename.endswith('.bin'):\n d = Dataset(os.path.join(root, filename))\n data = d.as_dict()\n for freq_dict in data['frequencies']:\n x = freq_dict['easting']\n y = freq_dict['northing']\n image = freq_dict['intensity']\n self.assertIsInstance(x, np.ndarray)\n self.assertIsInstance(y, np.ndarray)\n self.assertIsInstance(image, np.ndarray)",
"def _get_read_range(self):\n\n self.total_size = get_data_size(self.storage, self.read_bucket, self.read_path)\n\n partition_size = floor(self.total_size / self.task_info.num_tasks)\n\n self.lower_bound = self.task_info.task_id * partition_size\n self.upper_bound = self.lower_bound + partition_size\n\n # self.lower_bound, self.upper_bound = adjust_bounds(self.storage, self.read_bucket, self.read_path,\n # self.lower_bound, self.upper_bound, self.total_size)\n\n print(\"Scanning bytes=%d-%d (%d)\"%(self.lower_bound, self.upper_bound,\n self.upper_bound - self.lower_bound))",
"def readFullRelaxFiles(folder_path):\n\n run_arr = []\n Nrun_arr = []\n dod_arr = []\n crate_arr = []\n count=0\n\n # find number of files that starts with run\n # (this is the data file we want to read)\n for file in os.listdir(folder_path):\n if file.startswith(\"relaxrun\"):\n count+=1\n\n # order the data files by run number, so we get descending crates\n Nrun=1\n for i in range(count+5):\n for file in os.listdir(folder_path):\n if file.startswith(\"relaxrun_\"+str(Nrun)+\"-\"):\n run_arr.append(file)\n dod = re.search('dod=(.*).txt', file).group(1)\n crate = re.search('Crate=(.*)_',file).group(1)\n Nrun_arr.append(np.round(int(Nrun),decimals=0))\n dod_arr.append(float(dod))\n crate_arr.append(float(crate))\n Nrun+=1\n print(len(run_arr))\n\n return run_arr, Nrun_arr, dod_arr, crate_arr",
"def available_samples(vcf_path):\n return _header_from_vcf(vcf_path)[9:]",
"def read_data_samples(fp):\n if(path.isdir(fp)):\n fps = glob.glob(fp + '\\\\*.txt')\n return list(map(lambda x: read_file(x), fps))",
"def samples(self):\n return glob.glob(os.path.join(self.production.rundir, \"extrinsic_posterior_samples.dat\"))",
"def read_data(args):\n\n print(\"Start read_data\")\n t_tot = 0 # sum of times for the all dataset\n date_dirs = os.listdir(args.path_data_base)\n for n_iter, date_dir in enumerate(date_dirs):\n # get access to each sequence\n path1 = os.path.join(args.path_data_base, date_dir)\n if not os.path.isdir(path1):\n continue\n date_dirs2 = os.listdir(path1)\n\n for date_dir2 in date_dirs2:\n path2 = os.path.join(path1, date_dir2)\n if not os.path.isdir(path2):\n continue\n # read data\n oxts_files = sorted(glob.glob(os.path.join(path2, 'oxts', 'data', '*.txt')))\n oxts = KITTIDataset.load_oxts_packets_and_poses(oxts_files)\n\n \"\"\" Note on difference between ground truth and oxts solution:\n - orientation is the same\n - north and east axis are inverted\n - position are closed to but different\n => oxts solution is not loaded\n \"\"\"\n\n print(\"\\n Sequence name : \" + date_dir2)\n if len(oxts) < KITTIDataset.min_seq_dim: # sequence shorter than 30 s are rejected\n cprint(\"Dataset is too short ({:.2f} s)\".format(len(oxts) / 100), 'yellow')\n continue\n lat_oxts = np.zeros(len(oxts))\n lon_oxts = np.zeros(len(oxts))\n alt_oxts = np.zeros(len(oxts))\n roll_oxts = np.zeros(len(oxts))\n pitch_oxts = np.zeros(len(oxts))\n yaw_oxts = np.zeros(len(oxts))\n roll_gt = np.zeros(len(oxts))\n pitch_gt = np.zeros(len(oxts))\n yaw_gt = np.zeros(len(oxts))\n t = KITTIDataset.load_timestamps(path2)\n acc = np.zeros((len(oxts), 3))\n acc_bis = np.zeros((len(oxts), 3))\n gyro = np.zeros((len(oxts), 3))\n gyro_bis = np.zeros((len(oxts), 3))\n p_gt = np.zeros((len(oxts), 3))\n v_gt = np.zeros((len(oxts), 3))\n v_rob_gt = np.zeros((len(oxts), 3))\n\n k_max = len(oxts)\n for k in range(k_max):\n oxts_k = oxts[k]\n t[k] = 3600 * t[k].hour + 60 * t[k].minute + t[k].second + t[\n k].microsecond / 1e6\n lat_oxts[k] = oxts_k[0].lat\n lon_oxts[k] = oxts_k[0].lon\n alt_oxts[k] = oxts_k[0].alt\n acc[k, 0] = oxts_k[0].af\n acc[k, 1] = oxts_k[0].al\n acc[k, 2] = oxts_k[0].au\n acc_bis[k, 0] = oxts_k[0].ax\n acc_bis[k, 1] = oxts_k[0].ay\n acc_bis[k, 2] = oxts_k[0].az\n gyro[k, 0] = oxts_k[0].wf\n gyro[k, 1] = oxts_k[0].wl\n gyro[k, 2] = oxts_k[0].wu\n gyro_bis[k, 0] = oxts_k[0].wx\n gyro_bis[k, 1] = oxts_k[0].wy\n gyro_bis[k, 2] = oxts_k[0].wz\n roll_oxts[k] = oxts_k[0].roll\n pitch_oxts[k] = oxts_k[0].pitch\n yaw_oxts[k] = oxts_k[0].yaw\n v_gt[k, 0] = oxts_k[0].ve\n v_gt[k, 1] = oxts_k[0].vn\n v_gt[k, 2] = oxts_k[0].vu\n v_rob_gt[k, 0] = oxts_k[0].vf\n v_rob_gt[k, 1] = oxts_k[0].vl\n v_rob_gt[k, 2] = oxts_k[0].vu\n p_gt[k] = oxts_k[1][:3, 3]\n Rot_gt_k = oxts_k[1][:3, :3]\n roll_gt[k], pitch_gt[k], yaw_gt[k] = IEKF.to_rpy(Rot_gt_k)\n\n t0 = t[0]\n t = np.array(t) - t[0]\n # some data can have gps out\n if np.max(t[:-1] - t[1:]) > 0.1:\n cprint(date_dir2 + \" has time problem\", 'yellow')\n ang_gt = np.zeros((roll_gt.shape[0], 3))\n ang_gt[:, 0] = roll_gt\n ang_gt[:, 1] = pitch_gt\n ang_gt[:, 2] = yaw_gt\n\n p_oxts = lla2ned(lat_oxts, lon_oxts, alt_oxts, lat_oxts[0], lon_oxts[0],\n alt_oxts[0], latlon_unit='deg', alt_unit='m', model='wgs84')\n p_oxts[:, [0, 1]] = p_oxts[:, [1, 0]] # see note\n\n # take correct imu measurements\n u = np.concatenate((gyro_bis, acc_bis), -1)\n # convert from numpy\n t = torch.from_numpy(t)\n p_gt = torch.from_numpy(p_gt)\n v_gt = torch.from_numpy(v_gt)\n ang_gt = torch.from_numpy(ang_gt)\n u = torch.from_numpy(u)\n\n # convert to float\n t = t.float()\n u = u.float()\n p_gt = p_gt.float()\n ang_gt = ang_gt.float()\n v_gt = v_gt.float()\n\n mondict = {\n 't': t, 'p_gt': p_gt, 'ang_gt': ang_gt, 'v_gt': v_gt,\n 'u': u, 'name': date_dir2, 't0': t0\n }\n\n t_tot += t[-1] - t[0]\n KITTIDataset.dump(mondict, args.path_data_save, date_dir2)\n print(\"\\n Total dataset duration : {:.2f} s\".format(t_tot))",
"def read_data(self, datafile, min_period):\n data = [[], [],]\n\n # Read input file\n in_file = open(datafile, \"r\")\n for line in in_file:\n if line.startswith(\"#\") or line.startswith(\"%\"):\n continue\n tmp = line.split()\n period = float(tmp[0])\n # Extract subset of period values\n if ((period >= min_period) and\n (period <= MAX_PERIOD)):\n data[0].append(float(tmp[0]))\n data[1].append(float(tmp[1]))\n # Close file\n in_file.close()\n # Return data\n return data",
"def get_samples(self):\n result = []\n segmentsize=30\n # Reduce this to very little to get very large trainingsets\n stride=5\n noOfBuckets=40\n for start in range(0, len(self.data) - segmentsize, stride):\n if start + segmentsize <= len(self.data):\n segments_buckets = self.get_buckets(start, start + segmentsize, noOfBuckets)\n result.append(segments_buckets)\n return result",
"def getPadDataFiles(padPath, dateStart, dateStop, sensor):\n padFiles,sampleRate,dataColumns = getPadFiles(padPath,dateStart,dateStop,sensor,'')\n return padFiles,sampleRate,dataColumns",
"def get_filenames():\r\n datadir = \"./phase3_data/\"\r\n samples = os.listdir(datadir)\r\n all_files = []\r\n for i in range(len(samples)):\r\n sampfiles = []\r\n datadir = \"./phase3_data/\" + samples[i]\r\n files = os.listdir(datadir)\r\n for file in files:\r\n if file.endswith(\".bin\"):\r\n sampfiles += [file]\r\n all_files += [sampfiles]\r\n return samples, all_files",
"def read_data(source_path, target_path, opt):\n data_set = [[] for _ in _buckets]\n with tf.gfile.GFile(source_path, mode=\"r\") as source_file:\n with tf.gfile.GFile(target_path, mode=\"r\") as target_file:\n source, target = source_file.readline(), target_file.readline() \n counter = 0\n while source and target and (not opt.max_train_data_size or counter < opt.max_train_data_size):\n counter += 1\n if counter % 100000 == 0:\n print(\" reading data line %d\" % counter)\n sys.stdout.flush()\n source_ids = [int(x) for x in source.split()]\n target_ids = [int(x) for x in target.split()]\n target_ids.append(data_utils.EOS_ID)\n for bucket_id, (source_size, target_size) in enumerate(_buckets):\n if opt.minlen <len(source_ids) < min(source_size, opt.maxlen) and opt.minlen <len(target_ids) < min(target_size, opt.maxlen):\n data_set[bucket_id].append([source_ids, target_ids])\n break\n source, target = source_file.readline(), target_file.readline()\n \n \n \n return data_set",
"def samples(self):\n if self._samples:\n return self._samples\n if SAMPLE_DF_KEY not in self or self[SAMPLE_DF_KEY] is None:\n _LOGGER.debug(\"No samples are defined\")\n return []",
"def get_files_to_be_indexed(self):\n\t\tfiles = self.get_all_files()\n\t\tfiles_list = []\n\t\tfor name in files:\n\t\t\tif(name.split('.')[-1] in self.accepted_formats and os.stat(os.path.join(self.root, name)).st_size < 5000000):\n\t\t\t\tfiles_list.append(os.path.join(self.root, name))\n\t\treturn files_list[0:-1]",
"def read_data(self):\n self.days = [0, 2, 3, 5, 6, 8, 9, 11, 13, 14]\n path = '../data/'\n data = []\n for day in self.days:\n filename = path + 'spectrum_day{}.txt'.format(day)\n data.append(read_file(filename))\n return data",
"def get_samples(self, min_samples):\n raise NotImplementedError",
"def get_mean_files(self):\n return [self.mean_file_0, self.mean_file_1]",
"def sample_features_from_data(path:str, num_samples:int, total_count:int):\r\n sampled_idxs = sorted(np.random.choice(np.arange(total_count), num_samples, replace=False))\r\n retrieved_samples = []\r\n already_seen_samples = 0\r\n print(\"Sampling\")\r\n done = False\r\n for file in Path(path).rglob(\"*.npz\"):\r\n samples_from_file = 0\r\n loaded_data = np.load(str(file), allow_pickle=True)['data']\r\n datafile_samples = len(loaded_data)\r\n i_sample = sampled_idxs[len(retrieved_samples)] - already_seen_samples\r\n while i_sample < datafile_samples:\r\n retrieved_samples.append(loaded_data[i_sample].copy())\r\n samples_from_file += 1\r\n\r\n if len(retrieved_samples) == num_samples:\r\n done = True\r\n break\r\n\r\n i_sample = sampled_idxs[len(retrieved_samples)] - already_seen_samples\r\n\r\n already_seen_samples += datafile_samples\r\n print(\"From %s obtained %d samples out of %d samples\" % (str(file), samples_from_file, datafile_samples))\r\n\r\n if done:\r\n break\r\n\r\n assert len(retrieved_samples) == num_samples\r\n return retrieved_samples",
"def data_available(dataset_name=None):\r\n for file_list in data_resources[dataset_name]['files']:\r\n for file in file_list:\r\n if not os.path.exists(os.path.join(data_path, dataset_name, file)):\r\n return False\r\n return True",
"def test_collect_files():\n filelist = [\"test/a.ext\", \"test/b.asd\"]\n\n result = loader.collect_files(filelist, lambda x: x, lambda x: np.arange(0, 50))\n\n for k in filelist:\n assert np.array_equal(np.arange(0, 50), result[k])",
"def get_files_time_period(prefix, yr_s, yr_e):\n\n # Get path and folder\n path = directories.CLIMATE_DATA + '/'\n folder = os.listdir(path)\n\n # Files should be automatically ordered by year assuming that the format of files is what we expect\n files = []\n\n # List of years to extract\n years = list(range(yr_s, yr_e + 1))\n\n # Save lowest and highest year in data for later - only used if multiple years are in the same file\n min_yr = yr_s\n max_yr = yr_e\n\n # Go through the files in the folder and get the relevant files within the time frame\n for file in folder:\n if os.path.isfile(os.path.join(path, file)) and file.startswith(prefix):\n # If file with just one year in it\n if not get_file_two_years(file):\n for year in years:\n if str(year) in file:\n files.append(file)\n else: # file has multiple years in it\n fst_yr, snd_yr = get_file_two_years(file)\n # Get files that have data within the years\n if overlaps(fst_yr, snd_yr, yr_s, yr_e):\n files.append(file)\n if fst_yr < min_yr:\n min_yr = fst_yr\n if snd_yr > max_yr:\n max_yr = snd_yr\n\n return files, min_yr, max_yr",
"def samples(self):\n pass",
"def generate_read_list(num_files, world_size):\n return np.array_split(np.arange(num_files), world_size)",
"def get_filenames(start_year = 1980, end_year = 2009, path = 'ucr_offenses_known_monthly_1960_2016_dta/'):\n filenames = []\n for filename in os.listdir(path):\n #ignore pdf files in folder, filter out dta file names that contain 1980-2009\n if filename.endswith('.dta'):\n for years in range(start_year, end_year + 1):\n if str(years) in filename:\n filenames.append(filename)\n return(filenames)",
"def getFileList(ms1_folder, metadataFileName):\n\text = \"_ms1Peak\"\n\tdelim = \"\\t\"\n\n\t# Find list of MS1 feature files\n\tms1_file_dic = {}\n\tfor f in Path(ms1_folder).glob(\"**/*_ms1Peak.txt\"):\n\t\tf_remove_ext = re.sub(ext, \"\", str(f.stem))\n\t\tms1_file_dic[f_remove_ext] = 1\n\n\tmetadata_file_dic = {}\n\tmetadata_file_list = []\n\tmetadata_file = pd.read_csv(metadataFileName,sep='\\t')\n\tfor f in metadata_file['fileName']:\n\t\tcur_file = str(Path(f).stem)\n\t\tmetadata_file_dic[cur_file] = 1\n\t\tmetadata_file_list.append(cur_file)\n\n\tif ms1_file_dic.keys() != metadata_file_dic.keys():\n\t\traise Exception(\"The number of MS1 files does not match number of \\\nlines in \" + metadataFileName)\n\t\t\n\treturn(metadata_file_list, list(metadata_file['metadataLabel']))"
] | [
"0.61742574",
"0.60562795",
"0.5931317",
"0.5895991",
"0.5861483",
"0.58041275",
"0.5802405",
"0.57899123",
"0.5788426",
"0.57653975",
"0.5759746",
"0.5747541",
"0.57445914",
"0.57307",
"0.5728617",
"0.5701655",
"0.5679718",
"0.56450206",
"0.5641442",
"0.5636241",
"0.56325316",
"0.5627751",
"0.56249464",
"0.5607959",
"0.560415",
"0.55700254",
"0.5549988",
"0.5546132",
"0.55393565",
"0.5536804"
] | 0.73086745 | 0 |
Get defaults for channel headers Returns Dict[str, Any] Dictionary of headers for channels and default values | def chanDefaults(self) -> Dict[str, Any]:
chanH = {}
chanH["gain_stage1"] = 1
chanH["gain_stage2"] = 1
chanH["hchopper"] = 0 # this depends on sample frequency
chanH["echopper"] = 0
# channel output information (sensor_type, channel_type, ts_lsb, pos_x1, pos_x2, pos_y1, pos_y2, pos_z1, pos_z2, sensor_sernum)
chanH["ats_data_file"] = ""
chanH["num_samples"] = 0
chanH["sensor_type"] = ""
chanH["channel_type"] = ""
chanH["ts_lsb"] = 1
# the lsb/scaling is not applied. data is raw voltage which needs to be scaled
# an lsb is constructed from the scaling in the XTR/XTRX file to take the data to mV
chanH["scaling_applied"] = False # check this
chanH["pos_x1"] = 0
chanH["pos_x2"] = 0
chanH["pos_y1"] = 0
chanH["pos_y2"] = 0
chanH["pos_z1"] = 0
chanH["pos_z2"] = 0
chanH["sensor_sernum"] = 0
return chanH | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def defaults():\n return {}",
"def default_channel_response_data(channel):\n channel_record = Channel.objects.get(name=channel.name)\n return {\n \"title\": channel.title,\n \"name\": channel.name,\n \"description\": channel.description,\n \"public_description\": channel.public_description,\n \"channel_type\": channel.channel_type,\n \"user_is_contributor\": True,\n \"user_is_subscriber\": True,\n \"user_is_moderator\": False,\n \"link_type\": channel.link_type,\n \"membership_is_managed\": False,\n \"avatar\": None,\n \"avatar_small\": None,\n \"avatar_medium\": None,\n \"banner\": None,\n \"ga_tracking_id\": None,\n \"allowed_post_types\": [\n post_type\n for post_type, enabled in channel_record.allowed_post_types\n if enabled\n ],\n \"widget_list_id\": channel_record.widget_list_id,\n \"about\": None,\n \"moderator_notifications\": False,\n }",
"def defaults() -> dict:\n pass",
"def defaults(self) -> Dict[str, Any]:\n if self._defaults is _missing:\n return {}\n return self._defaults",
"def _available_channels(devices, header):\n\n # ------------------------ Definition of constants and variables ------------------------------\n chn_dict = {}\n\n # %%%%%%%%%%%%%%%%%%%%%% Access to the relevant data in the header %%%%%%%%%%%%%%%%%%%%%%%%%%%%\n for dev in devices:\n chn_dict[dev] = header[dev][\"column labels\"].keys()\n\n return chn_dict",
"def get_common_header():\n cf = ConfigParser.ConfigParser()\n cf.read('config.ini')\n headers = cf.items('headers')\n\n return dict(headers)",
"def default_hparams():\n hparams = DatasetBase.default_hparams()\n hparams.update({\n \"transforms\": None,\n \"processed_csv\": None,\n \"mode\": None,\n \"batch_size\": 1,\n \"shuffle\": False,\n \"shuffle_buffer_size\": 32,\n \"input_channel\": \"RGB\"\n })\n return hparams",
"def getDefaultSettings():\n return {}",
"def _set_defaults(self):\n defaults = {key: val[0][1].replace(\"*\", \"\")\n for key, val in self._filetypes.items()}\n defaults[\"default\"] = None\n defaults[\"video\"] = \".mp4\"\n defaults[\"image\"] = \".png\"\n logger.debug(defaults)\n return defaults",
"def get_default_header() -> dict:\n return {\"Accept\": \"application/json\"}",
"def get_default_request_headers(self) -> dict:\n return {\n RequestHeaders.CONTENT_TYPE_HEADER_NAME:\n RequestHeaders.CONTENT_TYPE_HEADER_VALUE_APPLICATION_JSON,\n RequestHeaders.X_CLIENT_HEADER_NAME: GlobalConstants.X_CLIENT,\n RequestHeaders.X_API_USER_HEADER_NAME: self.hopla_auth_parser.user_id,\n RequestHeaders.X_API_KEY_HEADER_NAME: self.hopla_auth_parser.api_token\n }",
"def _get_gateway_headers(self):\n return {MSG_HEADER_ACTOR: self.name,\n MSG_HEADER_VALID: DEFAULT_EXPIRY}",
"def get_defaults(self):\n default_dict = {}\n args, varargs, keyword, defaults = inspect.getargspec(self.exec_obj)\n if defaults:\n default_dict = dict(zip(args[-len(defaults):], defaults))\n return default_dict",
"def default_config():\n return {\n MESSAGE: 'reply -> send*',\n REPLY: 'transitiveReply -> send*',\n FORWARD: 'none*'\n }",
"def default_channel(self) -> int:\r\n ...",
"def default_hparams():\n return {\n \"name\": \"forward_connector\"\n }",
"def get_defaults():\n\n return {\n \"numberofrules\": 0,\n \"datapath\": path_join_robust(BASEDIR_PATH, \"data\"),\n \"freshen\": True,\n \"replace\": False,\n \"backup\": False,\n \"skipstatichosts\": False,\n \"keepdomaincomments\": True,\n \"extensionspath\": path_join_robust(BASEDIR_PATH, \"extensions\"),\n \"extensions\": [],\n \"compress\": False,\n \"minimise\": False,\n \"outputsubfolder\": \"\",\n \"hostfilename\": \"hosts\",\n \"targetip\": \"0.0.0.0\",\n \"sourcedatafilename\": \"update.json\",\n \"sourcesdata\": [],\n \"readmefilename\": \"readme.md\",\n \"readmetemplate\": path_join_robust(BASEDIR_PATH, \"readme_template.md\"),\n \"readmedata\": {},\n \"readmedatafilename\": path_join_robust(BASEDIR_PATH, \"readmeData.json\"),\n \"exclusionpattern\": r\"([a-zA-Z\\d-]+\\.){0,}\",\n \"exclusionregexes\": [],\n \"exclusions\": [],\n \"commonexclusions\": [\"hulu.com\"],\n \"blacklistfile\": path_join_robust(BASEDIR_PATH, \"blacklist\"),\n \"whitelistfile\": path_join_robust(BASEDIR_PATH, \"whitelist\"),\n }",
"def build_headers(self):\n\n # User-agent is always sent\n headers = {'user-agent': self.useragent}\n for hdr in self.config.client_standard_headers:\n val = getattr(self.config, 'client_' + hdr.lower().replace('-','_'))\n headers[hdr] = val\n\n return headers",
"def __get_headers(self):\n\n return {}",
"def currentSBHeaders(self):\n headers = {}\n for header_name in [options['Headers','classification_header_name'],\n options['Headers','mailid_header_name'],\n options['Headers','classification_header_name'] + \"-ID\",\n options['Headers','thermostat_header_name'],\n options['Headers','evidence_header_name'],\n options['Headers','score_header_name'],\n options['Headers','trained_header_name'],\n ]:\n value = self[header_name]\n if value is not None:\n headers[header_name] = value\n return headers",
"def _headers(self) -> Mapping[str, str]:\n return {}",
"def default_hparams():\n return {\n 'initializer': None,\n 'num_heads': 8,\n 'output_dim': 512,\n 'num_units': 512,\n 'dropout_rate': 0.1,\n 'use_bias': False,\n 'name': 'multihead_attention_rpr',\n 'is_decoder': False,\n 'relative_attention_num_buckets': 32\n }",
"def meta_defaults():\n try:\n with open(\".hosts.yml\") as f:\n meta = load(f, Loader=CLoader)\n except FileNotFoundError:\n return {}\n finally:\n meta = {\n host: {option[1:]: value for option, value in items.items()}\n for host, items in meta.items()\n }\n return meta",
"def default_dict_keys():\n return defaults_dict.keys()",
"def define_headers(self):\n return {}",
"def default_headers(self):\n return RequestHeaders().get_default_request_headers()",
"def default_hparams():\n return {\n \"value\": 0.,\n \"name\": \"constant_connector\"\n }",
"def _getDefaultGroupDict(self, container):\n ddict = dict(container._dict_)\n ddict.update({\n \"_def_for_repos\": container.for_repos,\n \"_def_for_paths\": container.for_paths,\n })\n\n return ddict",
"def get_config_defaults(self): # pylint: disable=R0201\n return {}",
"def get_default_hparams():\n hparams_map = base_model.get_default_hparams().values()\n hparams_map.update({\n 'conditional': True,\n 'dec_rnn_size': [512], # Decoder RNN: number of units per layer.\n 'dec_rnn_attn_len': 0, # Decoder RNN: length of attention vector.\n 'enc_rnn_size': [256], # Encoder RNN: number of units per layer per dir.\n 'dropout_keep_prob': 1.0, # Probability all dropout keep.\n 'sampling_schedule': 'constant', # constant, exponential, inverse_sigmoid\n 'sampling_rate': 0.0, # Interpretation is based on `sampling_schedule`.\n })\n return tf.contrib.training.HParams(**hparams_map)"
] | [
"0.62941504",
"0.6244179",
"0.620184",
"0.59563303",
"0.5954738",
"0.58055735",
"0.5720258",
"0.57148266",
"0.56928277",
"0.5690497",
"0.5689257",
"0.56565005",
"0.56229764",
"0.55866164",
"0.55399114",
"0.55186236",
"0.5510632",
"0.54901797",
"0.5486276",
"0.5461099",
"0.54414463",
"0.5429952",
"0.5426885",
"0.54262495",
"0.5425819",
"0.5381729",
"0.53362525",
"0.53302115",
"0.5318116",
"0.53002644"
] | 0.70106703 | 0 |
Read header files For SPAM data, the may be more than one header file as data can be split up into smaller files as it is recorded. In that case, the header information should be somehow merged. All sampling frequencies should be the same | def readHeader(self) -> None:
# read header files
self.headersList = []
self.chanHeadersList = []
for headerFile in self.headerF:
if "xtrx" in headerFile.lower():
headers, chanHeaders = self.readHeaderXTRX(headerFile)
else:
headers, chanHeaders = self.readHeaderXTR(headerFile)
self.headersList.append(headers)
self.chanHeadersList.append(chanHeaders)
# check to make sure no gaps, calculate out the sample ranges and list the data files for each sample
self.mergeHeaders(self.headersList, self.chanHeadersList) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def read_header(self):\n # Read entire header into memory in one read to minimize Disk I/O.\n self.fh.seek(0)\n hdr = self.fh.read(self.header['header size'])\n\n # Find several markers in the byte-string\n # Each of these may occur more than once, find last.\n polylist_pos = hdr.rfind(b'Poly_list\\x00')\n champslist_pos = hdr.rfind(b'Champs_list\\x00')\n offsetlist_pos = hdr.rfind(b'Offset_list\\x00')\n\n # Find first occurance for these.\n # analparam_pos = hdr.find(b'Anal_param\\x00')\n analparamnano_pos = hdr.find(b'Anal_param_nano\\x00')\n analparamnanobis_pos = hdr.find(b'Anal_param_nano_bis\\x00')\n\n # Turn byte-string into BytesIO file-like object; reading and\n # keeping track of where we are is easier that way than trying to\n # slice byte-string as an array and keeping track of indices.\n hdr = io.BytesIO(hdr)\n\n # Main header\n hdr.seek(12)\n self.header.update(self._main_header(hdr))\n\n # NanoSIMS header, starts with PolyList/ChampsList/OffsetList\n # The following configurations have been found in the wild, so far:\n # 1. NS header\n # 2. PL, NS header\n # 3. PL, CL, OL, NS header\n # 4. PL, CL, OL, partial NS header, PL, NS header, PL, CL, OL,\n # partial NS header, PL, NS header\n # Note: I have not seen any *lists with contents (only length 0).\n # From OpenMIMS documentation I know that PolyList is as list of\n # Species dicts, but don't know how to read ChampsList or OffsetList.\n if polylist_pos < 0:\n # Case 1: No PL marker, so far only found for Real Time Images,\n # beam stability, or secondary ion beam centering files.\n if (self.header['analysis type'].endswith('rti') or\n self.header['file type'] == 35):\n hdr.seek(216, 1)\n elif self.header['file type'] == 31:\n if (self.header['analysis type'].endswith('hmr') or\n self.header['analysis type'].endswith('trolley step scan')):\n hdr.seek(120, 1)\n else:\n # secondary ion beam\n hdr.seek(600, 1)\n else:\n raise NotImplementedError('No PolyList marker found in header '\n 'and not and RTI image. Don\\'t know '\n 'how to continue.')\n elif (champslist_pos < 0 and offsetlist_pos < 0):\n # Case 2: PL, NS header\n self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)\n elif (polylist_pos < champslist_pos < offsetlist_pos):\n # Case 3: PL, CL, OL, NS header\n self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)\n self.header['ChampsList'] = self._pco_list(hdr, 'champs', champslist_pos)\n self.header['OffsetList'] = self._pco_list(hdr, 'offset', offsetlist_pos)\n elif (champslist_pos < offsetlist_pos < polylist_pos):\n # Case 4: PL, CL, OL, partial NS header, PL, NS header\n # with possible repeat\n self.header['ChampsList'] = self._pco_list(hdr, 'champs', champslist_pos)\n self.header['OffsetList'] = self._pco_list(hdr, 'offset', offsetlist_pos)\n self.header['PolyList'] = self._pco_list(hdr, 'poly', polylist_pos)\n else:\n raise NotImplementedError(\n 'An unknown order of the Poly/Champs/Offset Lists occured.\\n'\n 'Positions: PL = {}, CL = {}, OL = {}'\n ''.format(polylist_pos, champslist_pos, offsetlist_pos))\n\n self.header['NanoSIMSHeader'] = self._nanosims_header(hdr)\n\n # How much to skip? Chomping does not work; what if first value is 0?\n # This is correct so far, for nsheader v8 and 9\n hdr.seek(948, 1)\n self.header['BFields'] = []\n for b in range(self.header['NanoSIMSHeader']['b fields']):\n bf = self._bfield(hdr)\n bf['counting frame time'] = bf['time per pixel'] * \\\n self.header['NanoSIMSHeader']['counting frame height'] * \\\n self.header['NanoSIMSHeader']['counting frame width']\n bf['scanning frame time'] = bf['time per pixel'] * \\\n self.header['NanoSIMSHeader']['scanning frame height'] * \\\n self.header['NanoSIMSHeader']['scanning frame width']\n bf['working frame time'] = bf['time per pixel'] * \\\n self.header['NanoSIMSHeader']['working frame height'] * \\\n self.header['NanoSIMSHeader']['working frame width']\n self.header['BFields'].append(bf)\n # End nanosims_header/bfield based on Poly_list position\n\n # Analytical parameters\n\n # anal_param is not in OpenMIMS at all, represents file\n # Cameca NanoSIMS Data/raw_spec/cur_anal_par\n # However, only few useful things in this section, all of\n # which are also in other sections. Skip.\n # if analparam_pos < 0:\n # msg = 'Anal_param not found in header, skipping.'\n # warnings.warn(msg)\n # else:\n # hdr.seek(analparam_pos + 24)\n # print(analparam_pos)\n # d = {}\n # d['primary ion'], d['primary current begin'], \\\n # d['primary current end'], d['raster'], \\\n # d['X 00 always 1.0'], \\\n # d['X 01 always 1'], d['X 02 always 0'], \\\n # d['X 03 always 1'], d['X 04 always 0'], \\\n # d['X 05 always 0'], d['X 06 (not0 always 0'], \\\n # d['X 07 (not) always 0'], d['X 08 always 0'], \\\n # d['pressure 1'], d['e0w'], d['X 09 always 35 or #'], \\\n # d['X 10 junk'], \\\n # d['X 11 always 1'], d['X 12 always 0'], \\\n # d['X 13 always 1'], d['X 14 always 0'], \\\n # d['X 15 always 0'], d['X 16 always 0'], \\\n # d['X 17 always 0'], d['X 18 always 0'], \\\n # d['X 19 always 0'], d['X 20 always 300'], \\\n # d['X 21'], d['X 22'], d['X 23'], d['X 24'], \\\n # d['pressure 2'], d['X 25 junk'] = \\\n # unpack(self._bo + '24s 4d 8i 48s d i 28s 14i 8s 176s', hdr.read(416))\n #\n # d['pressure 1'] = self._cleanup_string(d['pressure 1'])\n # d['pressure 2'] = self._cleanup_string(d['pressure 2'])\n # d['primary ion'] = self._cleanup_string(d['primary ion'])\n #\n # self.header['AnalParam'] = d\n\n # Called AnalyticalParamNano AND AnalysisParamNano in OpenMIMS.\n # Here, split out Primary and Secondary beam.\n # Represents the file Cameca NanoSIMS Data/raw_spec/cur_anal_par_nano\n if analparamnano_pos < 0:\n msg = 'Anal_param_nano not found in header, '\n msg += 'don\\'t know where PrimaryBeam section starts.'\n warnings.warn(msg)\n else:\n hdr.seek(analparamnano_pos + 16)\n self.header['analysis version'], self.header['n50large'], \\\n self.header['comment'] = \\\n unpack(self._bo + '2i 8x 256s', hdr.read(272))\n\n self.header['n50large'] = bool(self.header['n50large'])\n self.header['comment'] = self._cleanup_string(self.header['comment'])\n\n self.header['PrimaryBeam'] = self._primary_beam(hdr)\n self.header['SecondaryBeam'] = self._secondary_beam(hdr)\n self.header['Detectors'] = self._detectors1(hdr)\n\n self.header['SecondaryBeam']['E0S'] = self.header['Detectors'].pop('E0S')\n self.header['SecondaryBeam']['pressure multicollection chamber'] = \\\n self.header['Detectors'].pop('pressure multicollection chamber')\n\n # Add overall mode of machine, based on E0W\n if self.header['SecondaryBeam']['E0W'] < 0:\n self.header['polarity'] = '+'\n else:\n self.header['polarity'] = '-'\n\n # Combine pixel size from NanoSIMSHeader and raster from PrimaryBeam\n # Prevent ZeroDivisionError if undefined\n wfw = self.header['NanoSIMSHeader']['working frame width']\n if not wfw:\n wfw = 1\n self.header['NanoSIMSHeader']['working frame raster'] = \\\n self.header['PrimaryBeam']['raster']\n self.header['NanoSIMSHeader']['scanning frame raster'] = \\\n self.header['NanoSIMSHeader']['working frame raster'] * \\\n self.header['NanoSIMSHeader']['scanning frame width'] / wfw\n self.header['NanoSIMSHeader']['counting frame raster'] = \\\n self.header['NanoSIMSHeader']['working frame raster'] * \\\n self.header['NanoSIMSHeader']['counting frame width'] / wfw\n\n # Header for non-nano SIMS\n magic = unpack(self._bo + 'i', hdr.read(4))[0]\n if magic != 2306:\n msg = 'SIMSHeader magic number not found here at byte {}.'\n msg = msg.format(hdr.tell()-4)\n raise ValueError(msg)\n self.header['SIMSHeader'] = self._sims_header(hdr)\n\n if self.header['analysis version'] >= 5:\n if analparamnanobis_pos < 0:\n msg = 'Anal_param_nano_bis not found in header, '\n msg += 'don\\'t know where second Detectors section starts.'\n warnings.warn(msg)\n else:\n hdr.seek(analparamnanobis_pos + 24)\n self.header['Detectors'].update(self._detectors2(hdr))\n xl = self.header['Detectors'].pop('exit slit xl')\n for n in range(7):\n det = self.header['Detectors']['Detector {}'.format(n+1)]\n w = list(det['exit slit widths'])\n w[2] = xl[5*n:5*(n+1)]\n det['exit slit widths'] = tuple(w)\n h = list(det['exit slit heights'])\n h[2] = xl[5*(n+1):5*(n+2)]\n det['exit slit heights'] = tuple(h)\n\n # Presets\n self.header['Presets'] = self._presets(hdr)\n\n # End Detectors pt 2 based on anal_param_nano_bis position\n\n # Last part of detectors\n if self.header['analysis version'] >= 6:\n d3 = self._detectors3(hdr)\n self.header['Detectors']['TIC'] = d3.pop('TIC')\n for k, v in d3.items():\n self.header['Detectors'][k].update(v)\n # End PrimaryBeam/SecondaryBeam/Presets/Detectors based on anal_param_nano position\n\n # Image header, at end of overall header\n if self.header['file type'] == 26:\n hdr.seek(-176, 2)\n self.header['Isotopes'] = self._isotopes_hdr(hdr)\n elif self.header['file type'] in (21, 22, 31, 35):\n # no image header for line scan or beam stability\n pass\n else:\n hdr.seek(-84, 2)\n self.header['Image'] = self._image_hdr(hdr)\n\n # Done reading header. Check for and read external files for extra info.\n if os.path.exists(os.path.splitext(self.filename)[0] + '.chk_is'):\n self._read_chk_is()",
"def read_header(infile):\n h = dict()\n fid = open(infile, 'r+b')\n h['filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['parent_filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['comments1'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['comments2'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['energy_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['config_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['file_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['trans_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['date_modified'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 16))\n h['frequency'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['mat_velocity'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_polarization_channels'] =np.fromfile(fid, dtype = np.int16,count = 1)\n h['spare00'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['adc_min_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_max_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['band_width'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare01'] = np.fromfile(fid, dtype = np.int16, count = 5)\n h['polarization_type'] = np.fromfile(fid, dtype = np.int16, count = 4)\n h['record_header_size'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_precision'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['min_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['max_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['avg_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_scale_factor'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_units'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['surf_removal'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['edge_weighting'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['x_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['y_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['z_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['t_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['spare02'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['scan_orientation'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_direction'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_storage_order'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_x_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_y_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_z_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_t_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['x_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['date_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['time_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['depth_recon'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['elevation_offset_angle'] = np.fromfile(fid,dtype = np.float32, count = 1)\n h['roll_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['azimuth_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['spare06'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_radius'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_delay'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_start'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_end'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['ahis_software_version'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare_end'] = np.fromfile(fid, dtype = np.float32, count = 10)\n return h",
"def readHeaderXTR(self, headerFile: str) -> None:\n with open(headerFile, \"r\") as f:\n lines = f.readlines()\n sectionLines = {}\n # let's get data\n for line in lines:\n line = line.strip()\n line = line.replace(\"'\", \" \")\n # continue if line is empty\n if line == \"\":\n continue\n if \"[\" in line:\n sec = line[1:-1]\n sectionLines[sec] = []\n else:\n sectionLines[sec].append(line)\n # the base class is built around a set of headers based on ATS headers\n # though this is a bit more work here, it saves lots of code repetition\n headers = {}\n # recording information (start_time, start_date, stop_time, stop_date, ats_data_file)\n fileLine = sectionLines[\"FILE\"][0]\n fileSplit = fileLine.split()\n headers[\"sample_freq\"] = np.absolute(float(fileSplit[-1]))\n timeLine = sectionLines[\"FILE\"][2]\n timeSplit = timeLine.split()\n # these are the unix time stamps\n startDate = float(timeSplit[1] + \".\" + timeSplit[2])\n datetimeStart = datetime.utcfromtimestamp(startDate)\n stopDate = float(timeSplit[3] + \".\" + timeSplit[4])\n datetimeStop = datetime.utcfromtimestamp(stopDate)\n headers[\"start_date\"] = datetimeStart.strftime(\"%Y-%m-%d\")\n headers[\"start_time\"] = datetimeStart.strftime(\"%H:%M:%S.%f\")\n headers[\"stop_date\"] = datetimeStop.strftime(\"%Y-%m-%d\")\n headers[\"stop_time\"] = datetimeStop.strftime(\"%H:%M:%S.%f\")\n # here calculate number of samples\n deltaSeconds = (datetimeStop - datetimeStart).total_seconds()\n # calculate number of samples - have to add one because the time given in SPAM recording is the actual time of the last sample\n numSamples = int(deltaSeconds * headers[\"sample_freq\"]) + 1\n # put these in headers for ease of future calculations in merge headers\n headers[\"num_samples\"] = numSamples\n # spam datasets only have the one data file for all channels\n headers[\"ats_data_file\"] = fileSplit[1]\n # data information (meas_channels, sample_freq)\n chanLine = sectionLines[\"CHANNAME\"][0]\n # this gets reformatted to an int later\n headers[\"meas_channels\"] = chanLine.split()[1]\n numChansInt = int(headers[\"meas_channels\"])\n # deal with the channel headers\n chanHeaders = []\n for iChan in range(0, numChansInt):\n chanH = self.chanDefaults()\n # set the sample frequency from the main headers\n chanH[\"sample_freq\"] = headers[\"sample_freq\"]\n # line data - read through the data in the correct channel order\n chanLine = sectionLines[\"CHANNAME\"][iChan + 1]\n chanSplit = chanLine.split()\n dataLine = sectionLines[\"DATA\"][iChan + 1]\n dataSplit = dataLine.split()\n # channel input information (gain_stage1, gain_stage2, hchopper, echopper)\n chanH[\"gain_stage1\"] = 1\n chanH[\"gain_stage2\"] = 1\n # channel output information (sensor_type, channel_type, ts_lsb, pos_x1, pos_x2, pos_y1, pos_y2, pos_z1, pos_z2, sensor_sernum)\n chanH[\"ats_data_file\"] = fileSplit[1]\n chanH[\"num_samples\"] = numSamples\n\n # channel information\n # spams often use Bx, By - use H within the software as a whole\n chanH[\"channel_type\"] = consistentChans(chanSplit[2])\n # the sensor number is a bit of a hack - want MFSXXe or something - add MFS in front of the sensor number - this is liable to break\n # at the same time, set the chopper\n calLine = sectionLines[\"200{}003\".format(iChan + 1)][0]\n calSplit = calLine.split()\n if isMagnetic(chanH[\"channel_type\"]):\n chanH[\"sensor_sernum\"] = calSplit[\n 2\n ] # the last three digits is the serial number\n sensorType = calSplit[1].split(\"_\")[1][-2:]\n chanH[\"sensor_type\"] = \"MFS{:02d}\".format(int(sensorType))\n if \"LF\" in calSplit[1]:\n chanH[\"hchopper\"] = 1\n else:\n chanH[\"sensor_type\"] = \"ELC00\"\n if \"LF\" in calLine:\n chanH[\"echopper\"] = 1\n\n # data is raw voltage of sensors\n # both E and H fields need polarity reversal (from email with Reinhard)\n # get scaling from headers\n scaling = float(dataSplit[-2])\n if isElectric(chanH[\"channel_type\"]):\n # the factor of 1000 is not entirely clear\n lsb = 1000.0 * scaling\n # volts to millivolts and a minus to switch polarity giving data in mV\n lsb = -1000.0 * lsb\n else:\n # volts to millivolts and a minus to switch polarity giving data in mV\n # scaling in header file is ignored because it duplicates static gain correction in calibration\n lsb = -1000.0\n chanH[\"ts_lsb\"] = lsb\n\n # the distances\n if chanSplit[2] == \"Ex\":\n chanH[\"pos_x1\"] = float(dataSplit[4]) / 2\n chanH[\"pos_x2\"] = chanH[\"pos_x1\"]\n if chanSplit[2] == \"Ey\":\n chanH[\"pos_y1\"] = float(dataSplit[4]) / 2\n chanH[\"pos_y2\"] = chanH[\"pos_y1\"]\n if chanSplit[2] == \"Ez\":\n chanH[\"pos_z1\"] = float(dataSplit[4]) / 2\n chanH[\"pos_z2\"] = chanH[\"pos_z1\"]\n\n # append chanHeaders to the list\n chanHeaders.append(chanH)\n\n # check information from raw file headers\n self.headersFromRawFile(headers[\"ats_data_file\"], headers)\n # return the headers and chanHeaders from this file\n return headers, chanHeaders",
"def read_scamp_head(fname, header=None):\n\n with open(fname) as fobj:\n lines = fobj.readlines()\n\n lines = [l.strip() for l in lines if l[0:3] != 'END']\n\n # if header is None an empty FITSHDR is created\n hdr = FITSHDR(header)\n\n for l in lines:\n hdr.add_record(l)\n\n return hdr",
"def readHead(self):\n filesize = self.rhd.tell()\n \n #the order in which all of this is called is critcal\n self.header_identifier = hex(np.uint32(struct.unpack('<I', self.rhd.read(4))))\n v = np.int8(struct.unpack('BBBB', self.rhd.read(4)))\n\n #read each property of the header\n self.version = str(v[0]) + '.' + str(v[2])\n self.sample_rate = np.float32(struct.unpack('f', self.rhd.read(4)))[0] \n self.dsp_enabled = np.int8(struct.unpack('BB', self.rhd.read(2)))[0]\n self.actual_dsp_cutoff_frequency = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.actual_lower_bandwidth = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.actual_upper_bandwidth = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.desired_dsp_cutoff_frequency = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.desired_lower_bandwidth = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.desired_upper_bandwidth = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.notch_cutoff_mode = np.int8(struct.unpack('BB', self.rhd.read(2)))[0]\n self.desired_impedance_test_frequency = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.actual_impedance_test_frequency = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n #list of 3 notes\n self.note = [_qstring(self.rhd),_qstring(self.rhd),_qstring(self.rhd)]\n self.number_of_temperature_sensors = np.int16(struct.unpack('h', self.rhd.read(2)))[0]\n self._TEMP_SENSORS = self.number_of_temperature_sensors\n self.board_mode = np.int16(struct.unpack('h', self.rhd.read(2)))[0]\n self.number_of_signal_groups = np.int16(struct.unpack('h', self.rhd.read(2)))[0]\n\n #dict of signal groups\n self.signal_groups = {} \n for i in range(self.number_of_signal_groups):\n sg = Signal_Group(self)\n self.signal_groups[sg.signal_group_name] = sg\n \n #dict of channels\n self.channels = {}\n for key, group in self.signal_groups.iteritems():\n self.channels.update(group.channels)",
"def getPadHeaderFiles(padPath, dateStart, dateStop, sensor):\n padFiles,sampleRate,dataColumns = getPadFiles(padPath,dateStart,dateStop,sensor,'.header')\n return padFiles,sampleRate,dataColumns",
"def read_header(options, infile):\n\n contigs = dict()\n line = ''\n if options.is_bam:\n #chrm = infile.getrname(line.tid).replace('chr', '')\n for i in range(len(infile.references)):\n if infile.references[i] == 'chrM_rCRS':\n chr_key = 'chrM'\n else:\n chr_key = infile.references[i]\n\n if contigs.has_key(chr_key):\n if not contigs[chr_key] == infile.lengths[i]:\n print >> sys.stderr, \"Headers in BAM files have inconsistent contig lengths. Stopping ...\"\n sys.exit(1)\n else:\n contigs[chr_key] = infile.lengths[i]\n else:\n for line in infile:\n if not line[0] == '@':\n if len(contigs) == 0:\n print >> sys.stderr, \"No header found in %s. Stopping.\" % file\n sys.exit(1)\n else:\n break\n\n sl = line.strip().split('\\t')\n\n if not sl[0] == '@SQ':\n continue\n\n if sl[1][3:] == 'chrM_rCRS':\n chr_key = 'chrM'\n else:\n chr_key = sl[1][3:]\n if contigs.has_key(chr_key):\n if not contigs[chr_key] == int(sl[2][3:]):\n print >> sys.stderr, \"Headers in BAM files have inconsistent contig lengths. Stopping ...\"\n sys.exit(1)\n else:\n contigs[chr_key] = int(sl[2][3:])\n \n return (contigs, line)",
"def readHeader(self, filename):\n f = Data.Usrxxx.readHeader(self, filename)\n# self.sayHeader()\n \n while True:\n data = fortran.read(f)\n if data is None: break\n size = len(data)\n# print(\"size: \", size)\n\n if size == 14 and data[:10] == \"STATISTICS\":\n self.statpos = f.tell()\n for det in self.detector:\n data = Data.unpackArray(fortran.read(f))\n det.total = data[0]\n det.totalerror = data[1]\n# for j in range(6):\n# fortran.skip(f)\n break\n\n if size != 50: raise IOError(\"Invalid USRTRACK/USRCOLL file\")\n\n header = struct.unpack(\"=i10siiififfif\", data)\n\n det = Data.Detector()\n det.nb = header[0]\n det.name = header[1].strip() # titutc - track/coll name\n det.type = header[2] # itustc - type of binning: 1 - linear energy etc\n det.dist = header[3] # idustc = distribution to be scored\n det.reg = header[4] # nrustc = region\n det.volume = header[5] # vusrtc = volume (cm**3) of the detector\n det.lowneu = header[6] # llnutc = low energy neutron flag\n det.elow = header[7] # etclow = minimum energy [GeV]\n det.ehigh = header[8] # etchgh = maximum energy [GeV]\n det.ne = header[9] # netcbn = number of energy intervals\n det.de = header[10] # detcbn = energy bin width\n\n self.detector.append(det)\n\n if det.lowneu:\n data = fortran.read(f)\n det.ngroup = struct.unpack(\"=i\",data[:4])[0]\n det.egroup = struct.unpack(\"=%df\"%(det.ngroup+1), data[4:])\n print(\"Low energy neutrons scored with %d groups\" % det.ngroup)\n else:\n\t\tdet.ngroup = 0\n\t\tdet.egroup = []\n\n\t size = (det.ngroup+det.ne) * 4\n\t if size != fortran.skip(f):\n\t\traise IOError(\"Invalid USRTRACK file\")\n f.close()",
"def pareHeader(headerFile,Ldontcares=['GData','BiasCoeff','headerFile','y_m_d','TimeZero']):\n reload(chd) # KEN SCOPE ISSUE?\n dHeader = chd.main(['headerFile=' + headerFile])\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d','TimeZero']\n #Ldontcares = ['GData','BiasCoeff','headerFile','y_m_d']\n for k in Ldontcares:\n del dHeader[k]\n dataFile = split(headerFile,'.header')[0] # toss extension\n return dHeader,dataFile",
"def _read_header(self):\n f = self._open(self.filename, 'rb')\n idx = 0\n header = b''\n # reading the header \n while idx < 13: \n header += f.readline().rstrip() # removes the \"\\n\\r\" at the end\n idx += 1\n # \"magically\" compute the data offset\n try:\n self._offset_auto = ord(header[2]) + 1856\n except:\n self._offset_auto = header[2] + 1856\n\n\n\n header = header[:self._offset_auto+300] # add an extra random header for offset\n header = re.sub(r'(?P<section>\\[[^\\]]+\\])', '\\n\\g<section>', header.decode('latin1'))\n header = header.splitlines()[1:]\n self.header = dict([self._header_sect2dict(line) for line in header])\n self.shape = np.array(self.header['Acquisition']['areGRBScan'].split(',')[-2:]).astype(np.int)\n f.close()\n\n offset_list = {'auto': self._offset_auto,\n 'from_end': -np.prod(self.shape)*self._nbytes,\n 'from_end_4k': - np.prod(self.shape)*self._nbytes - 4092}\n\n if self._offset_input in offset_list:\n\n self._offset_data = offset_list[self._offset_input]\n if self._offset_input.startswith('from_end'):\n # set the flag to seek from the end of the file.\n self._offset_whence = 2\n elif type(self._offset_input) is int:\n self._offset_data = self._offset_input\n else:\n raise ValueError\n\n \n\n return self.header",
"def _main_header(self, hdr):\n d = {}\n # Called readDefAnalysis in OpenMIMS\n d['sample type'], d['data included'], d['sample x'], d['sample y'], \\\n d['analysis type'], d['user name'], d['sample z'], date, time = \\\n unpack(self._bo + '4i 32s 16s i 12x 16s 16s', hdr.read(112))\n\n d['data included'] = bool(d['data included'])\n d['user name'] = self._cleanup_string(d['user name'])\n d['analysis type'] = self._cleanup_string(d['analysis type']).lower()\n date = self._cleanup_string(date)\n time = self._cleanup_string(time)\n d['date'] = self._cleanup_date(date + ' ' + time)\n\n if self.header['file type'] in (27, 29, 39):\n # Called MaskImage/readMaskIm in OpenMIMS\n d['original filename'], d['analysis duration'], d['frames'], \\\n d['scan type'], d['magnification'], d['size type'], \\\n d['size detector'], d['beam blanking'], d['presputtering'], \\\n d['presputtering duration'] = \\\n unpack(self._bo + '16s 3i 3h 2x 3i', hdr.read(48))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = {}\n d['HVControl']['hvcontrol enabled'] = False\n\n elif self.header['file type'] in (22, 41):\n # Called MaskSampleStageImage/readMaskIss in OpenMIMS\n d['original filename'], d['analysis duration'], d['scan type'], \\\n d['steps'], d['step size x'], d['step size y'], d['step size?'], \\\n d['step waittime'], d['frames'], d['beam blanking'], \\\n d['presputtering'], d['presputtering duration'] = \\\n unpack(self._bo + '16s 6i d 4i', hdr.read(64))\n\n d['scan type'] = _stage_scan_types.get(d['scan type'], str(d['scan type']))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n # Don't know if this unused byte needs to go after HVControl or after SigRef.\n hdr.seek(4, 1)\n\n elif self.header['file type'] in (21, 26):\n # Not in OpenMIMS\n # this bit same as image, 1 extra unused/unknown\n d['original filename'], d['analysis duration'], d['frames'], \\\n d['scan type'], d['magnification'], d['size type'], \\\n d['size detector'], d['beam blanking'], d['presputtering'], \\\n d['presputtering duration'] = \\\n unpack(self._bo + '16s 4x 3i 3h 2x 3i', hdr.read(52))\n\n # this bit same as stage scan\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n\n # 24 bytes unknown, not sure if they go here or before AutoCal\n hdr.seek(24, 1)\n\n elif self.header['file type'] == 31:\n # Don't know if this is correct, all 0s anyway\n d['original filename'], d['scan type'], \\\n d['beam blanking'], d['presputtering'] = \\\n unpack(self._bo + '16s 3i 4x', hdr.read(32))\n\n elif self.header['file type'] == 35:\n d['original filename'], d['scan type'], d['analysis duration'], \\\n d['frames'], d['beam blanking'], d['presputtering'] = \\\n unpack(self._bo + '16s 5i 40x', hdr.read(76))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n\n else:\n raise TypeError('What type of image are you? {}'.format(self.header['file type']))\n\n # Continue main header for all types\n d['SigRef'] = self._sigref(hdr)\n d['masses'] = unpack(self._bo + 'i', hdr.read(4))[0]\n\n # scan type is set for stage scan analysis, set others\n if isinstance(d['scan type'], int):\n if d['scan type'] == 0:\n d['scan type'] = ''\n else:\n d['scan type'] = str(d['scan type'])\n\n d['beam blanking'] = bool(d['beam blanking'])\n d['presputtering'] = bool(d['presputtering'])\n d['original filename'] = self._cleanup_string(d['original filename'])\n\n if self.header['file type'] in (21, 26, 27, 29, 35, 39):\n if self.header['file version'] >= 4108:\n n = 60\n else:\n n = 10\n elif self.header['file type'] in (22, 31, 40, 41):\n n = 20\n else:\n n = 0\n\n # Not sure what this is, memory pointers? Not needed.\n # d['mass table ptr'] = unpack(self._bo + 2*n*'h', hdr.read(n*4))\n hdr.seek(n*4, 1)\n\n if self.header['file type'] in (21, 22, 26, 40, 41, 35):\n hdr.seek(4, 1) # 4 bytes unused\n\n # Mass table, dict by species label.\n d['MassTable'] = collections.OrderedDict()\n for m in range(d['masses']):\n mi = {}\n mi['trolley index'], unknown, mi['mass'], mi['matrix or trace'], \\\n mi['detector'], mi['wait time'], mi['frame count time'] = \\\n unpack(self._bo + '2i d 2i 2d', hdr.read(40))\n\n if self.header['file type'] == 31:\n if d['analysis type'].endswith('trolley step scan'):\n # start and end are in mm, step is in μm; convert to mm\n mi['radius start'], mi['radius end'], \\\n mi['radius step'], mi['b field bits'] = \\\n unpack(self._bo + '3d i', hdr.read(28))\n mi['radius step'] /= 1000\n else:\n mi['voltage start'], mi['voltage end'], \\\n mi['voltage step'], mi['b field bits'] = \\\n unpack(self._bo + '3d i', hdr.read(28))\n else:\n mi['offset'], mi['b field bits'] = unpack(self._bo + '2i', hdr.read(8))\n\n mi.update(self._species(hdr))\n\n if self.header['file type'] == 31:\n hdr.seek(4, 1)\n\n # Add correction controls, my own addition.\n mi['background corrected'] = False\n mi['deadtime corrected'] = False\n mi['yield corrected'] = False\n\n label = mi.pop('label')\n # This is true for NS50L and file version 4108.\n # Anywhere else different?\n # Maybe confirm this with the Trolleys dict,\n # there is an Esi trolley.\n if mi['trolley index'] == 8:\n label = 'SE'\n\n d['MassTable'][label] = mi\n\n # Create a few convenient lists\n d['label list'] = tuple(d['MassTable'].keys())\n d['label list fmt'] = tuple(format_species(m) for m in d['label list'])\n d['mass list'] = tuple(d['MassTable'][m]['mass'] for m in d['label list'])\n\n return d",
"def mergeHeaders(self, headersList: List, chanHeadersList: List) -> None:\n # take the first header as an example\n self.headers = headersList[0]\n self.chanHeaders = chanHeadersList[0]\n if len(headersList) == 1:\n # just fill in the data file list and data ranges\n self.dataFileList = [self.headers[\"ats_data_file\"]]\n self.dataRanges = [[0, self.headers[\"num_samples\"] - 1]]\n self.scalings = []\n tmp = {}\n for cHeader in self.chanHeaders:\n tmp[cHeader[\"channel_type\"]] = cHeader[\"ts_lsb\"]\n self.scalings.append(tmp)\n return # then there was only one file - no need to do all the below\n\n # make sure that all headers have the same sample rate\n # and save the start and stop times and dates\n startTimes = []\n stopTimes = []\n numSamples = []\n for idx, header in enumerate(headersList):\n if header[\"sample_freq\"] != self.headers[\"sample_freq\"]:\n self.printError(\n \"Not all datasets in {} have the same sample frequency.\\nExiting...\".format(\n self.dataPath\n ),\n quitrun=True,\n )\n if header[\"meas_channels\"] != self.headers[\"meas_channels\"]:\n self.printError(\n \"Not all datasets in {} have the same number of channels.\\nExiting...\".format(\n self.dataPath\n ),\n quitrun=True,\n )\n # now store startTimes, stopTimes and numSamples\n # do this as datetimes, will be easier\n startString = \"{} {}\".format(header[\"start_date\"], header[\"start_time\"])\n stopString = \"{} {}\".format(header[\"stop_date\"], header[\"stop_time\"])\n datetimeStart = datetime.strptime(startString, \"%Y-%m-%d %H:%M:%S.%f\")\n datetimeStop = datetime.strptime(stopString, \"%Y-%m-%d %H:%M:%S.%f\")\n startTimes.append(datetimeStart)\n stopTimes.append(datetimeStop)\n numSamples.append(header[\"num_samples\"])\n # check the start and end times\n sampleTime = timedelta(seconds=1.0 / self.headers[\"sample_freq\"])\n # sort by start times\n sortIndices = sorted(list(range(len(startTimes))), key=lambda k: startTimes[k])\n # now sort stop times by the same indices\n check = True\n for i in range(1, self.numHeaderFiles):\n # get the stop time of the previous dataset\n stopTimePrev = stopTimes[sortIndices[i - 1]]\n startTimeNow = startTimes[sortIndices[i]]\n if startTimeNow != stopTimePrev + sampleTime:\n self.printWarning(\n \"There is a gap between the datafiles in {}\".format(self.dataPath)\n )\n self.printWarning(\n \"Please separate out datasets with gaps into separate folders\"\n )\n # print out where the gap was found\n self.printWarning(\"Gap found between datafiles:\")\n self.printWarning(\n \"1. {}\".format(headersList[sortIndices[i - 1]][\"ats_data_file\"])\n )\n self.printWarning(\n \"2. {}\".format(headersList[sortIndices[i]][\"ats_data_file\"])\n )\n # set check as false\n check = False\n # if did not pass check, then exit\n if not check:\n self.printError(\n \"Gaps in data. All data for a single recording must be continuous. Exiting...\",\n quitrun=True,\n )\n\n # make sure there are no gaps\n totalSamples = sum(numSamples)\n\n # get a list of all the datafiles, scalings and the sample ranges\n self.dataFileList = []\n self.dataRanges = []\n self.scalings = []\n sample = -1\n # now need some sort of lookup table to say where the sample ranges are\n for i in range(0, self.numHeaderFiles):\n iSort = sortIndices[i] # get the sorted index\n self.dataFileList.append(headersList[iSort][\"ats_data_file\"])\n startSample = sample + 1\n endSample = (\n startSample + numSamples[iSort] - 1\n ) # -1 because this is inclusive of the start sample\n self.dataRanges.append([startSample, endSample])\n # increment sample\n sample = endSample\n # save the scalings for each chan\n tmp = {}\n for cHeader in self.chanHeadersList[iSort]:\n tmp[cHeader[\"channel_type\"]] = cHeader[\"ts_lsb\"]\n self.scalings.append(tmp)\n\n # now set the LSB information for the chanHeaders\n # i.e. if they change, this should reflect that\n for i in range(0, len(self.chanHeaders)):\n chan = self.chanHeaders[i][\"channel_type\"]\n lsbSet = set()\n for scalar in self.scalings:\n lsbSet.add(scalar[chan])\n if len(lsbSet) == 1:\n self.chanHeaders[i][\"ts_lsb\"] = list(lsbSet)[0]\n else:\n self.printWarning(\n \"Multiple different LSB values found for chan {}: {}\".format(\n chan, list(lsbSet)\n )\n )\n self.printWarning(\n \"This is handled, but the header information given will show only a single LSB value\"\n )\n self.chanHeaders[i][\"ts_lsb\"] = list(lsbSet)[0]\n\n # set start and end time for headers and chan headers\n # do the same with number of samples\n datetimeStart = min(startTimes)\n datetimeStop = max(stopTimes)\n self.headers[\"start_date\"] = datetimeStart.strftime(\"%Y-%m-%d\")\n self.headers[\"start_time\"] = datetimeStart.strftime(\"%H:%M:%S.%f\")\n self.headers[\"stop_date\"] = datetimeStop.strftime(\"%Y-%m-%d\")\n self.headers[\"stop_time\"] = datetimeStop.strftime(\"%H:%M:%S.%f\")\n self.headers[\"num_samples\"] = totalSamples\n # set datafiles = the whole list of datafiles\n self.headers[\"ats_data_file\"] = self.dataFileList\n for iChan in range(0, len(self.chanHeaders)):\n self.chanHeaders[iChan][\"start_date\"] = datetimeStart.strftime(\"%Y-%m-%d\")\n self.chanHeaders[iChan][\"start_time\"] = datetimeStart.strftime(\n \"%H:%M:%S.%f\"\n )\n self.chanHeaders[iChan][\"stop_date\"] = datetimeStop.strftime(\"%Y-%m-%d\")\n self.chanHeaders[iChan][\"stop_time\"] = datetimeStop.strftime(\"%H:%M:%S.%f\")\n self.chanHeaders[iChan][\"num_samples\"] = totalSamples\n self.chanHeaders[iChan][\"ats_data_file\"] = self.dataFileList",
"def _header(self, path, files):\n headers = [fits.getheader(os.path.join(path, f))\n for f in sorted(files)]\n N = len(headers)\n\n def mean_key(headers, key, comment, type):\n return (np.mean([type(h[key]) for h in headers]), comment)\n\n h = fits.Header()\n h['BUNIT'] = 'e-/s'\n h['ORIGIN'] = 'Zwicky Transient Facility', 'Data origin'\n h['OBSERVER'] = 'ZTF Robotic Software', 'Observer'\n h['INSTRUME'] = 'ZTF/MOSAIC', 'Instrument name'\n h['OBSERVAT'] = 'Palomar Observatory', 'Observatory'\n h['TELESCOP'] = 'Palomar 48-inch', 'Observatory telescope'\n h['OBSLON'] = -116.8597, 'Observatory longitude (deg)'\n h['OBSLAT'] = 33.3483, 'Observatory latitude (deg E)'\n h['OBSALT'] = 1706., 'Observatory altitude (m)'\n h['IMGTYPE'] = 'object', 'Image type'\n h['NIMAGES'] = N, 'Number of images in stack'\n h['EXPOSURE'] = (sum([_['EXPOSURE'] for _ in headers]),\n 'Total stack exposure time (s)')\n if len(headers) == 0:\n return h\n\n h['MAGZP'] = 25.0, 'Magnitude zero point, solar color'\n h['MAGZPRMS'] = (\n np.sqrt(np.sum([h.get('MAGZPRMS', 0)**2 for h in headers])) / N,\n 'Mean MAGZP RMS')\n h['PCOLOR'] = headers[0]['PCOLOR']\n h['CLRCOEFF'] = mean_key(headers, 'CLRCOEFF',\n 'Mean color coefficient', float)\n\n h['OBSJD1'] = float(headers[0]['OBSJD']), 'First shutter start time'\n h['OBSJDN'] = float(headers[-1]['OBSJD']), 'Last shutter start time'\n h['OBSJDM'] = mean_key(\n headers, 'OBSJD', 'Mean shutter start time', float)\n\n wcsfn = sorted(files)[0]\n wcs = WCS(fits.getheader(os.path.join(path, wcsfn),\n extname='SANGLE'))\n h.update(wcs.to_header())\n h['WCSORIGN'] = wcsfn\n\n h['DBPID'] = (','.join([str(_['DBPID']) for _ in headers]),\n 'Database processed-image IDs')\n h['DESG'] = headers[0]['DESG'], 'Target designation'\n for k, comment in {\n 'RH': 'Mean heliocentric distance (au)',\n 'DELTA': 'Mean observer-target distance (au)',\n 'PHASE': 'Mean Sun-target-observer angle (deg)',\n 'RDOT': 'Mean heliocentric radial velocity, km/s',\n 'SELONG': 'Mean solar elongation, deg',\n 'SANGLE': 'Mean projected target->Sun position angle, deg',\n 'VANGLE': 'Mean projected velocity position angle, deg',\n 'TRUEANOM': 'Mean true anomaly (osculating), deg',\n 'TMTP': 'Mean T-Tp (osculating), days',\n 'TGTRA': 'Mean target RA, deg',\n 'TGTDEC': 'Mean target Dec, deg',\n 'TGTDRA': 'Mean target RA*cos(dec) rate of change,arcsec/s',\n 'TGTDDEC': 'Mean target Dec rate of change, arcsec/s',\n 'TGTRASIG': 'Mean target RA 3-sigma uncertainty, arcsec',\n 'TGTDESIG': 'Mean target Dec 3-sigma uncertainty, arcsec',\n }.items():\n try:\n h[k] = mean_key(headers, k, comment, float)\n except ValueError:\n # target rates might be empty strings\n h[k] = ''\n\n return h",
"def getHeaders(self):\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\tif len(self.line) == 7:\n\t\t\tself.header.kod = self.line[0]\n\t\t\tself.header.ver = self.line[1]\n\t\t\tpID_date = self.line[2]\n\t\t\tself.header.probid = np.append(self.header.probid, pID_date)\n\t\t\tpID_time = self.line[3]\n\t\t\tself.header.probid = np.append(self.header.probid, pID_time)\n\t\t\tself.header.knod = int(self.line[4])\n\t\t\tself.header.nps = int(self.line[5])\n\t\t\tself.header.rnr = int(self.line[6])\n\t\telif len(self.line) == 3:\n\t\t\tself.header.knod = int(self.line[0])\n\t\t\tself.header.nps = int(self.line[1])\n\t\t\tself.header.rnr = int(self.line[2])\n\t\t\t\n\n\t\tself.header.title = self.mctalFile.readline().strip()\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\tself.header.ntal = int(self.line[1])\n\n\t\tif self.header.ntal == 0:\n\t\t\tprint >> sys.stderr, \"\\n \\033[1;31mNo tallies in this MCTAL file. Exiting.\\033[0m\\n\"\n\t\t\tsys.exit(1)\n\n\t\tif len(self.line) == 4:\n\t\t\tself.header.npert = int(self.line[3])\n\t\t\tprint >> sys.stderr, \"\\n \\033[1;31mMCTAL file with perturbation card. Not supported. Exiting.\\033[0m\\n\"\n\t\t\tsys.exit(1)\n\n\t\tself.line = self.mctalFile.readline().split()\n\n\t\twhile self.line[0].lower() != \"tally\":\n\t\t\tfor l in self.line: self.header.ntals = np.append(self.header.ntals,int(l))\n\t\t\tself.line = self.mctalFile.readline().split()",
"def _parse_header(self):\n # read the first bytes from the file\n header = self._stream_handle.read(HEADER_BYTES)\n match = HEADER_MATCHER.match(header)\n if not match:\n raise SampleException(\"File header does not match the header regex\")\n\n # update the state to show we have read the header\n self._increment_state(HEADER_BYTES)",
"def headers(self, min_rt=None, max_rt=None, ms_level=None, polarity=None, **kwargs):\n \n # iterate through file\n for evt, elm in etree.iterparse(self.path, ('end',)):\n \n # retrieve instrument configs\n if elm.tag == self._prefix+'instrumentConfigurationList':\n self._retrieve_instrument_configurations(elm)\n \n # process spectrum data\n if elm.tag == self._prefix+'spectrum':\n \n # init scan data container\n scan_data = self._make_template()\n \n # retrieve raw header data\n self._retrieve_header_data(elm, scan_data)\n \n # check raw header data\n if not self._check_header_data(scan_data, min_rt, max_rt, ms_level, polarity):\n elm.clear()\n continue\n \n # free memory\n elm.clear()\n \n # create scan header\n yield self._make_header(scan_data)",
"def _readheader(lines):\n hdrdict = {}\n #input list of 26 lines of header\n #station and channel\n line = lines[5]\n parts = line.strip().split()\n fname = parts[1]\n fparts = fname.split('_')\n hdrdict['station'] = fparts[-2]+'_'+fparts[-1]\n\n #the \"Component\" lines look like either: Component S00W, Component S90E, Component Up\n compstr = lines[12].strip().split()[1]\n hdrdict['channel'] = get_comp_name(compstr)\n\n #instrument\n hdrdict['instrument'] = lines[3].split()[1].strip()\n \n #location string\n line = lines[6]\n hdrdict['location'] = line.strip()\n #event origin, buffer start year/month\n line = lines[16]\n parts = line.strip().split()\n bufyear = int(parts[8])\n bufmonth = int(parts[9])\n #epicentral location, buffer start day/hour\n line = lines[17]\n parts = line.strip().split()\n bufday = int(parts[8])\n bufhour = int(parts[9])\n #numpoints, buffer start min/sec\n line = lines[19]\n parts = line.strip().split()\n hdrdict['npts'] = int(parts[0])\n bufmin = int(parts[8])\n millisec = int(parts[9])\n bufsec = int(millisec/1000)\n bufmicrosec = int(np.round(millisec/1000.0 - bufsec))\n hdrdict['starttime'] = UTCDateTime(datetime(bufyear,bufmonth,bufday,bufhour,bufmin,bufsec,bufmicrosec))\n #part C\n #frequency, calibration value and some other stuff we don't care about\n line = lines[20]\n parts = line.strip().split()\n hdrdict['sampling_rate'] = float(parts[0])\n hdrdict['delta'] = 1.0/hdrdict['sampling_rate']\n hdrdict['calib'] = float(parts[7])\n #site location info, this time in dd\n line = lines[21]\n parts = line.strip().split()\n hdrdict['lat'] = float(parts[0]) * -1\n hdrdict['lon'] = float(parts[1])\n hdrdict['height'] = 0.0\n #duration\n line = lines[22]\n parts = line.strip().split()\n hdrdict['duration'] = float(parts[0])\n hdrdict['endtime'] = hdrdict['starttime'] + hdrdict['duration']\n #max acceleration - good for sanity check\n line = lines[23]\n parts = line.strip().split()\n hdrdict['maxacc'] = float(parts[0])\n hdrdict['network'] = 'NZ'\n hdrdict['units'] = 'acc'\n return hdrdict",
"def _read_hdr_file(ktlx_file):\r\n with open(ktlx_file, 'rb') as f:\r\n\r\n hdr = {}\r\n assert f.tell() == 0\r\n\r\n hdr['file_guid'] = hexlify(f.read(16))\r\n hdr['file_schema'], = unpack('<H', f.read(2))\r\n if not hdr['file_schema'] in (1, 3, 7, 8, 9):\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'file_schema ' + str(hdr['file_schema']))\r\n\r\n hdr['base_schema'], = unpack('<H', f.read(2))\r\n if not hdr['base_schema'] == 1: # p.3: base_schema 0 is rare, I think\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'base_schema ' + str(hdr['base_schema']))\r\n\r\n hdr['creation_time'] = datetime.fromtimestamp(unpack('<i',\r\n f.read(4))[0])\r\n hdr['patient_id'], = unpack('<i', f.read(4))\r\n hdr['study_id'], = unpack('<i', f.read(4))\r\n hdr['pat_last_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_first_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_middle_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['patient_id'] = _make_str(unpack('c' * 80, f.read(80)))\r\n assert f.tell() == 352\r\n\r\n if hdr['file_schema'] >= 7:\r\n hdr['sample_freq'], = unpack('<d', f.read(8))\r\n n_chan, = unpack('<i', f.read(4))\r\n hdr['num_channels'] = n_chan\r\n hdr['deltabits'], = unpack('<i', f.read(4))\r\n hdr['phys_chan'] = unpack('<' + 'i' * hdr['num_channels'],\r\n f.read(hdr['num_channels'] * 4))\r\n\r\n f.seek(4464)\r\n hdr['headbox_type'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sn'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sw_version'] = _make_str(unpack('c' * 40, f.read(40)))\r\n hdr['dsp_hw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['dsp_sw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['discardbits'], = unpack('<i', f.read(4))\r\n\r\n if hdr['file_schema'] >= 8:\r\n hdr['shorted'] = unpack('<' + 'h' * 1024, f.read(2048))[:n_chan]\r\n hdr['frequency_factor'] = unpack('<' + 'h' * 1024,\r\n f.read(2048))[:n_chan]\r\n return hdr",
"def read_header(datafile):\n\thead = []\n\tf = open(datafile,'r')\n\tfor i,line in enumerate(f):\n\t\tif i is 10: break\n\t\thead += [line]\n\tf.close()\n\treturn head",
"def _read_header(self):\n\n stream = self.stream\n\n self._seek_to_table(tables.header)\n\n # Read header[0 ... 1]\n checksum = stream.read_unsigned_byte4()\n design_font_size = stream.read_fix_word()\n\n # Read header[2 ... 11] if there\n character_info_table_position = self.table_pointers[\n tables.character_info]\n position = stream.tell()\n if position < character_info_table_position:\n character_coding_scheme = stream.read_bcpl()\n else:\n character_coding_scheme = None\n\n # Read header[12 ... 16] if there\n character_coding_scheme_length = 40 # bytes (11 - 2 + 1) * 4 = 10 * 4\n position += character_coding_scheme_length\n if position < character_info_table_position:\n family = stream.read_bcpl(position)\n else:\n family = None\n\n # Read header[12 ... 16] if there\n family_length = 20 # bytes (16 - 12 +1) * 4 = 5 * 4\n position += family_length\n if position < character_info_table_position:\n seven_bit_safe_flag = stream.read_unsigned_byte1(position)\n stream.read_unsigned_byte2()\n face = stream.read_unsigned_byte1()\n # Fixme: complete\n\n # don't read header [18 ... whatever]\n\n self.tfm = Tfm(self.font_name,\n self.filename,\n self.smallest_character_code,\n self.largest_character_code,\n checksum,\n design_font_size,\n character_coding_scheme,\n family)",
"def read_header(fid):\r\n\r\n # Check 'magic number' at beginning of file to make sure this is an Intan\r\n # Technologies RHD2000 data file.\r\n magic_number, = struct.unpack('<I', fid.read(4)) \r\n if magic_number != int('c6912702', 16): raise Exception('Unrecognized file type.')\r\n\r\n header = {}\r\n # Read version number.\r\n version = {}\r\n (version['major'], version['minor']) = struct.unpack('<hh', fid.read(4)) \r\n header['version'] = version\r\n\r\n print('')\r\n print('Reading Intan Technologies RHD2000 Data File, Version {}.{}'.format(version['major'], version['minor']))\r\n print('')\r\n\r\n freq = {}\r\n\r\n # Read information of sampling rate and amplifier frequency settings.\r\n header['sample_rate'], = struct.unpack('<f', fid.read(4))\r\n (freq['dsp_enabled'], freq['actual_dsp_cutoff_frequency'], freq['actual_lower_bandwidth'], freq['actual_upper_bandwidth'], \r\n freq['desired_dsp_cutoff_frequency'], freq['desired_lower_bandwidth'], freq['desired_upper_bandwidth']) = struct.unpack('<hffffff', fid.read(26))\r\n\r\n\r\n # This tells us if a software 50/60 Hz notch filter was enabled during\r\n # the data acquisition.\r\n notch_filter_mode, = struct.unpack('<h', fid.read(2))\r\n header['notch_filter_frequency'] = 0\r\n if notch_filter_mode == 1:\r\n header['notch_filter_frequency'] = 50\r\n elif notch_filter_mode == 2:\r\n header['notch_filter_frequency'] = 60\r\n freq['notch_filter_frequency'] = header['notch_filter_frequency']\r\n\r\n (freq['desired_impedance_test_frequency'], freq['actual_impedance_test_frequency']) = struct.unpack('<ff', fid.read(8))\r\n\r\n note1 = read_qstring(fid)\r\n note2 = read_qstring(fid)\r\n note3 = read_qstring(fid)\r\n header['notes'] = { 'note1' : note1, 'note2' : note2, 'note3' : note3}\r\n\r\n # If data file is from GUI v1.1 or later, see if temperature sensor data was saved.\r\n header['num_temp_sensor_channels'] = 0\r\n if (version['major'] == 1 and version['minor'] >= 1) or (version['major'] > 1) :\r\n header['num_temp_sensor_channels'], = struct.unpack('<h', fid.read(2))\r\n \r\n # If data file is from GUI v1.3 or later, load eval board mode.\r\n header['eval_board_mode'] = 0\r\n if ((version['major'] == 1) and (version['minor'] >= 3)) or (version['major'] > 1) :\r\n header['eval_board_mode'], = struct.unpack('<h', fid.read(2))\r\n \r\n \r\n header['num_samples_per_data_block'] = 60\r\n # If data file is from v2.0 or later (Intan Recording Controller), load name of digital reference channel\r\n if (version['major'] > 1):\r\n header['reference_channel'] = read_qstring(fid)\r\n header['num_samples_per_data_block'] = 128\r\n\r\n # Place frequency-related information in data structure. (Note: much of this structure is set above)\r\n freq['amplifier_sample_rate'] = header['sample_rate']\r\n freq['aux_input_sample_rate'] = header['sample_rate'] / 4\r\n freq['supply_voltage_sample_rate'] = header['sample_rate'] / header['num_samples_per_data_block']\r\n freq['board_adc_sample_rate'] = header['sample_rate']\r\n freq['board_dig_in_sample_rate'] = header['sample_rate']\r\n\r\n header['frequency_parameters'] = freq\r\n\r\n # Create structure arrays for each type of data channel.\r\n header['spike_triggers'] = []\r\n header['amplifier_channels'] = []\r\n header['aux_input_channels'] = []\r\n header['supply_voltage_channels'] = []\r\n header['board_adc_channels'] = []\r\n header['board_dig_in_channels'] = []\r\n header['board_dig_out_channels'] = []\r\n\r\n # Read signal summary from data file header.\r\n\r\n number_of_signal_groups, = struct.unpack('<h', fid.read(2))\r\n print('n signal groups {}'.format(number_of_signal_groups))\r\n\r\n for signal_group in range(1, number_of_signal_groups + 1):\r\n signal_group_name = read_qstring(fid)\r\n signal_group_prefix = read_qstring(fid)\r\n (signal_group_enabled, signal_group_num_channels, signal_group_num_amp_channels) = struct.unpack('<hhh', fid.read(6))\r\n\r\n if (signal_group_num_channels > 0) and (signal_group_enabled > 0):\r\n for signal_channel in range(0, signal_group_num_channels):\r\n new_channel = {'port_name' : signal_group_name, 'port_prefix' : signal_group_prefix, 'port_number' : signal_group}\r\n new_channel['native_channel_name'] = read_qstring(fid)\r\n new_channel['custom_channel_name'] = read_qstring(fid)\r\n (new_channel['native_order'], new_channel['custom_order'], signal_type, channel_enabled, new_channel['chip_channel'], new_channel['board_stream']) = struct.unpack('<hhhhhh', fid.read(12))\r\n new_trigger_channel = {}\r\n (new_trigger_channel['voltage_trigger_mode'], new_trigger_channel['voltage_threshold'], new_trigger_channel['digital_trigger_channel'], new_trigger_channel['digital_edge_polarity']) = struct.unpack('<hhhh', fid.read(8))\r\n (new_channel['electrode_impedance_magnitude'], new_channel['electrode_impedance_phase']) = struct.unpack('<ff', fid.read(8))\r\n\r\n if channel_enabled:\r\n if signal_type == 0:\r\n header['amplifier_channels'].append(new_channel)\r\n header['spike_triggers'].append(new_trigger_channel)\r\n elif signal_type == 1:\r\n header['aux_input_channels'].append(new_channel)\r\n elif signal_type == 2:\r\n header['supply_voltage_channels'].append(new_channel)\r\n elif signal_type == 3:\r\n header['board_adc_channels'].append(new_channel)\r\n elif signal_type == 4:\r\n header['board_dig_in_channels'].append(new_channel)\r\n elif signal_type == 5:\r\n header['board_dig_out_channels'].append(new_channel)\r\n else:\r\n raise Exception('Unknown channel type.')\r\n \r\n # Summarize contents of data file.\r\n header['num_amplifier_channels'] = len(header['amplifier_channels'])\r\n header['num_aux_input_channels'] = len(header['aux_input_channels'])\r\n header['num_supply_voltage_channels'] = len(header['supply_voltage_channels'])\r\n header['num_board_adc_channels'] = len(header['board_adc_channels'])\r\n header['num_board_dig_in_channels'] = len(header['board_dig_in_channels'])\r\n header['num_board_dig_out_channels'] = len(header['board_dig_out_channels'])\r\n\r\n return header",
"def _read_file_definition(self):\n row_count = 0\n #\n # THIS METHOD ASSUMES A 14 ROW HEADER\n # If the number of header row lines in the glider ASCII input file changes from 14,\n # this method will NOT WORK\n num_hdr_lines = 14\n\n header_pattern = r'(.*): (.*)$'\n header_re = re.compile(header_pattern)\n\n line = self._stream_handle.readline()\n\n while line and row_count < num_hdr_lines:\n\n match = header_re.match(line)\n\n if match:\n key = match.group(1)\n value = match.group(2)\n value = value.strip()\n\n # update num_hdr_lines based on the header info.\n if key == 'num_ascii_tags':\n # this key has a required value of 14, otherwise we don't know how to parse the file\n if int(value) != num_hdr_lines:\n raise DatasetParserException(\"Header must be %d rows, but it is %s\" % (num_hdr_lines, value))\n\n elif key == 'num_label_lines':\n # this key has a required value of 3, otherwise we don't know how to parse the file\n if int(value) != 3:\n raise DatasetParserException(\"There must be 3 Label lines from the header for this parser\")\n\n elif key == 'sensors_per_cycle':\n # save for future use\n self._header_dict[key] = int(value)\n\n elif key in ['filename_label', 'mission_name', 'fileopen_time']:\n # create a dictionary of these 3 key/value pairs strings from\n # the header rows that need to be saved for future use\n self._header_dict[key] = value\n\n else:\n log.warn(\"Failed to parse header row: %s.\", line)\n\n row_count += 1\n # only read the header lines in this method so make sure we stop\n if row_count < num_hdr_lines:\n line = self._stream_handle.readline()\n\n if row_count < num_hdr_lines:\n log.error('Not enough data lines for a full header')\n raise DatasetParserException('Not enough data lines for a full header')",
"def _readCommonHeader(self):\n for i in range(self.ignore_header_lines):\n self.ignored_header_lines.append(nappy.utils.text_parser.readItemFromLine(self.file.readline()))\n \n self._readTopLine()\n self.ONAME = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n self.ORG = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n self.SNAME = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n self.MNAME = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n (self.IVOL, self.NVOL) = nappy.utils.text_parser.readItemsFromLine(self.file.readline(), 2, int)\n dates = nappy.utils.text_parser.readItemsFromLine(self.file.readline(), 6, int)\n (self.DATE, self.RDATE) = (dates[:3], dates[3:])\n self.NLHEAD += self.ignore_header_lines",
"def process_header_data(spark, input_dir, output):\n\theader = spark.read \\\n\t\t.option(\"header\", True) \\\n\t\t.option(\"escape\", '\"') \\\n\t\t.option(\"inferSchema\", True) \\\n\t\t.csv(f\"{input_dir}/ams/*/*/ams__header_*__*.csv\") \\\n\t\t.select(*header_cols) \\\n\t\t.where(col('identifier').isNotNull())\n\n\tbill = spark.read \\\n\t\t.option(\"header\", True) \\\n\t\t.option(\"escape\", '\"') \\\n\t\t.option(\"inferSchema\", True) \\\n\t\t.csv(f\"{input_dir}/ams/*/*/ams__billgen_*__*.csv\") \\\n\t\t.select(*bill_cols)\n\n\theader_full = header.join(bill, ['identifier'], how='left')\n\n\theader_full.repartition(1).write.mode('overwrite').format(\"csv\") \\\n\t\t.option(\"header\", True) \\\n\t\t.option(\"escape\", '\"') \\\n\t\t.save(f\"{output}/header/\")",
"def test_fitsheader():\n extensions = ('fts', 'fits')\n for ext in extensions:\n for ffile in Path(testpath).glob(f\"*.{ext}*\"):\n fits_file = fits.open(ffile)\n fits_file.verify(\"fix\")\n data, header = fits_file[0].data, fits_file[0].header\n meta_header = MetaDict(OrderedDict(header))\n sunpy.io.fits.header_to_fits(meta_header)",
"def header(filename):\n\n if not os.path.isfile(filename):\n filename = match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")",
"def get_header(fname, path='./'):\r\n f = file(path+fname,'r')\r\n \r\n header = {}\r\n headlines = 0\r\n \r\n while True:\r\n line = f.readline()\r\n clean_line = string.strip(line).split()\r\n key = string.strip(clean_line[0])\r\n val = string.strip(clean_line[-1])\r\n if not key[0].isalpha():\r\n break\r\n try:\r\n val = int(val)\r\n except:\r\n val = float(val)\r\n if key != 'NODATA_value':\r\n key = key.lower()\r\n header[key] = val\r\n headlines += 1\r\n \r\n f.close()\r\n\r\n for key in ['ncols','nrows','cellsize','xllcorner','yllcorner']:\r\n if not header.has_key(key):\r\n raise KeyError, 'File %s header does not contain key %s'%(path+fname, key)\r\n \r\n return header, headlines",
"def read_headers(input_file):\n\n with open(input_file+'.hdr','r') as f:\n return [float(h) if not h.isalpha() else h for h in [l.split()[1] for l in f.readlines()]] #isdigit() does not catch floats",
"def header(filename):\n\n if not os.path.isfile(filename):\n filename = glia.match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")",
"def _read_headers(self):\n # Read the textual header.\n self._read_textual_header()\n # The next 400 bytes are from the Binary File Header.\n binary_file_header = self.file.read(400)\n bfh = SEGYBinaryFileHeader(binary_file_header, self.endian)\n self.binary_file_header = bfh\n self.data_encoding = self.binary_file_header.data_sample_format_code\n # If bytes 3506-3506 are not zero, an extended textual header follows\n # which is not supported so far.\n if bfh.number_of_3200_byte_ext_file_header_records_following != 0:\n msg = 'Extended textual headers are supported yet. ' + \\\n 'Please contact the developers.'\n raise NotImplementedError(msg)"
] | [
"0.68931645",
"0.68798554",
"0.68419707",
"0.6750417",
"0.67503536",
"0.6726142",
"0.64970535",
"0.64856166",
"0.6483004",
"0.6462398",
"0.642394",
"0.63903534",
"0.6343897",
"0.63411504",
"0.62974936",
"0.6291137",
"0.62813884",
"0.6280724",
"0.62803566",
"0.62442863",
"0.6229269",
"0.6198344",
"0.6155622",
"0.6150223",
"0.61451304",
"0.61402285",
"0.6111482",
"0.6105259",
"0.6081413",
"0.60623807"
] | 0.7617964 | 0 |
Read a XTR header file The raw data for SPAM is in single precision Volts. However, if there are multiple data files for a single recording, each one may have a different gain. Therefore, a scaling has to be calculated for each data file and channel. This scaling will convert all channels to mV. For the most part, this method only reads recording information. However, it does additionally calculate out the lsb scaling and store it in the ts_lsb channel header. More information is provided in the notes. Notes The raw data for SPAM is in single precision floats and record the raw Voltage measurements of the sensors. However, if there are multiple data files for a single continuous recording, each one may have a different gain. Therefore, a scaling has to be calculated for each data file. For electric channels, the scaling begins with the scaling provided in the header file in the DATA section. This incorporates any gain occuring in the device. This scaling is further amended by a conversion to mV and polarity reversal, | def readHeaderXTR(self, headerFile: str) -> None:
with open(headerFile, "r") as f:
lines = f.readlines()
sectionLines = {}
# let's get data
for line in lines:
line = line.strip()
line = line.replace("'", " ")
# continue if line is empty
if line == "":
continue
if "[" in line:
sec = line[1:-1]
sectionLines[sec] = []
else:
sectionLines[sec].append(line)
# the base class is built around a set of headers based on ATS headers
# though this is a bit more work here, it saves lots of code repetition
headers = {}
# recording information (start_time, start_date, stop_time, stop_date, ats_data_file)
fileLine = sectionLines["FILE"][0]
fileSplit = fileLine.split()
headers["sample_freq"] = np.absolute(float(fileSplit[-1]))
timeLine = sectionLines["FILE"][2]
timeSplit = timeLine.split()
# these are the unix time stamps
startDate = float(timeSplit[1] + "." + timeSplit[2])
datetimeStart = datetime.utcfromtimestamp(startDate)
stopDate = float(timeSplit[3] + "." + timeSplit[4])
datetimeStop = datetime.utcfromtimestamp(stopDate)
headers["start_date"] = datetimeStart.strftime("%Y-%m-%d")
headers["start_time"] = datetimeStart.strftime("%H:%M:%S.%f")
headers["stop_date"] = datetimeStop.strftime("%Y-%m-%d")
headers["stop_time"] = datetimeStop.strftime("%H:%M:%S.%f")
# here calculate number of samples
deltaSeconds = (datetimeStop - datetimeStart).total_seconds()
# calculate number of samples - have to add one because the time given in SPAM recording is the actual time of the last sample
numSamples = int(deltaSeconds * headers["sample_freq"]) + 1
# put these in headers for ease of future calculations in merge headers
headers["num_samples"] = numSamples
# spam datasets only have the one data file for all channels
headers["ats_data_file"] = fileSplit[1]
# data information (meas_channels, sample_freq)
chanLine = sectionLines["CHANNAME"][0]
# this gets reformatted to an int later
headers["meas_channels"] = chanLine.split()[1]
numChansInt = int(headers["meas_channels"])
# deal with the channel headers
chanHeaders = []
for iChan in range(0, numChansInt):
chanH = self.chanDefaults()
# set the sample frequency from the main headers
chanH["sample_freq"] = headers["sample_freq"]
# line data - read through the data in the correct channel order
chanLine = sectionLines["CHANNAME"][iChan + 1]
chanSplit = chanLine.split()
dataLine = sectionLines["DATA"][iChan + 1]
dataSplit = dataLine.split()
# channel input information (gain_stage1, gain_stage2, hchopper, echopper)
chanH["gain_stage1"] = 1
chanH["gain_stage2"] = 1
# channel output information (sensor_type, channel_type, ts_lsb, pos_x1, pos_x2, pos_y1, pos_y2, pos_z1, pos_z2, sensor_sernum)
chanH["ats_data_file"] = fileSplit[1]
chanH["num_samples"] = numSamples
# channel information
# spams often use Bx, By - use H within the software as a whole
chanH["channel_type"] = consistentChans(chanSplit[2])
# the sensor number is a bit of a hack - want MFSXXe or something - add MFS in front of the sensor number - this is liable to break
# at the same time, set the chopper
calLine = sectionLines["200{}003".format(iChan + 1)][0]
calSplit = calLine.split()
if isMagnetic(chanH["channel_type"]):
chanH["sensor_sernum"] = calSplit[
2
] # the last three digits is the serial number
sensorType = calSplit[1].split("_")[1][-2:]
chanH["sensor_type"] = "MFS{:02d}".format(int(sensorType))
if "LF" in calSplit[1]:
chanH["hchopper"] = 1
else:
chanH["sensor_type"] = "ELC00"
if "LF" in calLine:
chanH["echopper"] = 1
# data is raw voltage of sensors
# both E and H fields need polarity reversal (from email with Reinhard)
# get scaling from headers
scaling = float(dataSplit[-2])
if isElectric(chanH["channel_type"]):
# the factor of 1000 is not entirely clear
lsb = 1000.0 * scaling
# volts to millivolts and a minus to switch polarity giving data in mV
lsb = -1000.0 * lsb
else:
# volts to millivolts and a minus to switch polarity giving data in mV
# scaling in header file is ignored because it duplicates static gain correction in calibration
lsb = -1000.0
chanH["ts_lsb"] = lsb
# the distances
if chanSplit[2] == "Ex":
chanH["pos_x1"] = float(dataSplit[4]) / 2
chanH["pos_x2"] = chanH["pos_x1"]
if chanSplit[2] == "Ey":
chanH["pos_y1"] = float(dataSplit[4]) / 2
chanH["pos_y2"] = chanH["pos_y1"]
if chanSplit[2] == "Ez":
chanH["pos_z1"] = float(dataSplit[4]) / 2
chanH["pos_z2"] = chanH["pos_z1"]
# append chanHeaders to the list
chanHeaders.append(chanH)
# check information from raw file headers
self.headersFromRawFile(headers["ats_data_file"], headers)
# return the headers and chanHeaders from this file
return headers, chanHeaders | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readHead(self):\n filesize = self.rhd.tell()\n \n #the order in which all of this is called is critcal\n self.header_identifier = hex(np.uint32(struct.unpack('<I', self.rhd.read(4))))\n v = np.int8(struct.unpack('BBBB', self.rhd.read(4)))\n\n #read each property of the header\n self.version = str(v[0]) + '.' + str(v[2])\n self.sample_rate = np.float32(struct.unpack('f', self.rhd.read(4)))[0] \n self.dsp_enabled = np.int8(struct.unpack('BB', self.rhd.read(2)))[0]\n self.actual_dsp_cutoff_frequency = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.actual_lower_bandwidth = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.actual_upper_bandwidth = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.desired_dsp_cutoff_frequency = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.desired_lower_bandwidth = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.desired_upper_bandwidth = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.notch_cutoff_mode = np.int8(struct.unpack('BB', self.rhd.read(2)))[0]\n self.desired_impedance_test_frequency = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n self.actual_impedance_test_frequency = np.float32(struct.unpack('f', self.rhd.read(4)))[0]\n #list of 3 notes\n self.note = [_qstring(self.rhd),_qstring(self.rhd),_qstring(self.rhd)]\n self.number_of_temperature_sensors = np.int16(struct.unpack('h', self.rhd.read(2)))[0]\n self._TEMP_SENSORS = self.number_of_temperature_sensors\n self.board_mode = np.int16(struct.unpack('h', self.rhd.read(2)))[0]\n self.number_of_signal_groups = np.int16(struct.unpack('h', self.rhd.read(2)))[0]\n\n #dict of signal groups\n self.signal_groups = {} \n for i in range(self.number_of_signal_groups):\n sg = Signal_Group(self)\n self.signal_groups[sg.signal_group_name] = sg\n \n #dict of channels\n self.channels = {}\n for key, group in self.signal_groups.iteritems():\n self.channels.update(group.channels)",
"def readHeader(self) -> None:\n # read header files\n self.headersList = []\n self.chanHeadersList = []\n for headerFile in self.headerF:\n if \"xtrx\" in headerFile.lower():\n headers, chanHeaders = self.readHeaderXTRX(headerFile)\n else:\n headers, chanHeaders = self.readHeaderXTR(headerFile)\n self.headersList.append(headers)\n self.chanHeadersList.append(chanHeaders)\n\n # check to make sure no gaps, calculate out the sample ranges and list the data files for each sample\n self.mergeHeaders(self.headersList, self.chanHeadersList)",
"def _read_hdr_file(ktlx_file):\r\n with open(ktlx_file, 'rb') as f:\r\n\r\n hdr = {}\r\n assert f.tell() == 0\r\n\r\n hdr['file_guid'] = hexlify(f.read(16))\r\n hdr['file_schema'], = unpack('<H', f.read(2))\r\n if not hdr['file_schema'] in (1, 3, 7, 8, 9):\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'file_schema ' + str(hdr['file_schema']))\r\n\r\n hdr['base_schema'], = unpack('<H', f.read(2))\r\n if not hdr['base_schema'] == 1: # p.3: base_schema 0 is rare, I think\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'base_schema ' + str(hdr['base_schema']))\r\n\r\n hdr['creation_time'] = datetime.fromtimestamp(unpack('<i',\r\n f.read(4))[0])\r\n hdr['patient_id'], = unpack('<i', f.read(4))\r\n hdr['study_id'], = unpack('<i', f.read(4))\r\n hdr['pat_last_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_first_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_middle_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['patient_id'] = _make_str(unpack('c' * 80, f.read(80)))\r\n assert f.tell() == 352\r\n\r\n if hdr['file_schema'] >= 7:\r\n hdr['sample_freq'], = unpack('<d', f.read(8))\r\n n_chan, = unpack('<i', f.read(4))\r\n hdr['num_channels'] = n_chan\r\n hdr['deltabits'], = unpack('<i', f.read(4))\r\n hdr['phys_chan'] = unpack('<' + 'i' * hdr['num_channels'],\r\n f.read(hdr['num_channels'] * 4))\r\n\r\n f.seek(4464)\r\n hdr['headbox_type'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sn'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sw_version'] = _make_str(unpack('c' * 40, f.read(40)))\r\n hdr['dsp_hw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['dsp_sw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['discardbits'], = unpack('<i', f.read(4))\r\n\r\n if hdr['file_schema'] >= 8:\r\n hdr['shorted'] = unpack('<' + 'h' * 1024, f.read(2048))[:n_chan]\r\n hdr['frequency_factor'] = unpack('<' + 'h' * 1024,\r\n f.read(2048))[:n_chan]\r\n return hdr",
"def readHeaderXTRX(self, headerFile):\n raise NotImplementedError(\"Support for XTRX files has not yet been implemented\")",
"def _readHeader(self):\n self.ControllerVersion = self._readInt(0)\n self.LogicOutput = self._readInt(2)\n self.AppHiCapLowNoise = self._readInt(4)\n self.TimingMode = self._readInt(8)\n self.Exposure = self._readFloat(10)\n self.DetTemperature = self._readFloat(36)\n self.DetectorType = self._readInt(40)\n self.TriggerDiode = self._readInt(44)\n self.DelayTime = self._readFloat(46)\n self.ShutterControl = self._readInt(50)\n self.AbsorbLive = self._readInt(52)\n self.AbsorbMode = self._readInt(54)\n self.CanDoVirtualChip = self._readInt(56)\n self.ThresholdMinLive = self._readInt(58)\n self.ThresholdMin = self._readFloat(60)\n self.ThresholdMaxLive = self._readInt(64)\n self.ThresholdMax = self._readFloat(66)\n self.ADCOffset = self._readInt(188)\n self.ADCRate = self._readInt(190)\n self.ADCType = self._readInt(192)\n self.ADCRes = self._readInt(194)\n self.ADCBitAdj = self._readInt(196)\n self.Gain = self._readInt(198)\n self.GeometricOps = self._readInt(600)",
"def read_header(fid):\r\n\r\n # Check 'magic number' at beginning of file to make sure this is an Intan\r\n # Technologies RHD2000 data file.\r\n magic_number, = struct.unpack('<I', fid.read(4)) \r\n if magic_number != int('c6912702', 16): raise Exception('Unrecognized file type.')\r\n\r\n header = {}\r\n # Read version number.\r\n version = {}\r\n (version['major'], version['minor']) = struct.unpack('<hh', fid.read(4)) \r\n header['version'] = version\r\n\r\n print('')\r\n print('Reading Intan Technologies RHD2000 Data File, Version {}.{}'.format(version['major'], version['minor']))\r\n print('')\r\n\r\n freq = {}\r\n\r\n # Read information of sampling rate and amplifier frequency settings.\r\n header['sample_rate'], = struct.unpack('<f', fid.read(4))\r\n (freq['dsp_enabled'], freq['actual_dsp_cutoff_frequency'], freq['actual_lower_bandwidth'], freq['actual_upper_bandwidth'], \r\n freq['desired_dsp_cutoff_frequency'], freq['desired_lower_bandwidth'], freq['desired_upper_bandwidth']) = struct.unpack('<hffffff', fid.read(26))\r\n\r\n\r\n # This tells us if a software 50/60 Hz notch filter was enabled during\r\n # the data acquisition.\r\n notch_filter_mode, = struct.unpack('<h', fid.read(2))\r\n header['notch_filter_frequency'] = 0\r\n if notch_filter_mode == 1:\r\n header['notch_filter_frequency'] = 50\r\n elif notch_filter_mode == 2:\r\n header['notch_filter_frequency'] = 60\r\n freq['notch_filter_frequency'] = header['notch_filter_frequency']\r\n\r\n (freq['desired_impedance_test_frequency'], freq['actual_impedance_test_frequency']) = struct.unpack('<ff', fid.read(8))\r\n\r\n note1 = read_qstring(fid)\r\n note2 = read_qstring(fid)\r\n note3 = read_qstring(fid)\r\n header['notes'] = { 'note1' : note1, 'note2' : note2, 'note3' : note3}\r\n\r\n # If data file is from GUI v1.1 or later, see if temperature sensor data was saved.\r\n header['num_temp_sensor_channels'] = 0\r\n if (version['major'] == 1 and version['minor'] >= 1) or (version['major'] > 1) :\r\n header['num_temp_sensor_channels'], = struct.unpack('<h', fid.read(2))\r\n \r\n # If data file is from GUI v1.3 or later, load eval board mode.\r\n header['eval_board_mode'] = 0\r\n if ((version['major'] == 1) and (version['minor'] >= 3)) or (version['major'] > 1) :\r\n header['eval_board_mode'], = struct.unpack('<h', fid.read(2))\r\n \r\n \r\n header['num_samples_per_data_block'] = 60\r\n # If data file is from v2.0 or later (Intan Recording Controller), load name of digital reference channel\r\n if (version['major'] > 1):\r\n header['reference_channel'] = read_qstring(fid)\r\n header['num_samples_per_data_block'] = 128\r\n\r\n # Place frequency-related information in data structure. (Note: much of this structure is set above)\r\n freq['amplifier_sample_rate'] = header['sample_rate']\r\n freq['aux_input_sample_rate'] = header['sample_rate'] / 4\r\n freq['supply_voltage_sample_rate'] = header['sample_rate'] / header['num_samples_per_data_block']\r\n freq['board_adc_sample_rate'] = header['sample_rate']\r\n freq['board_dig_in_sample_rate'] = header['sample_rate']\r\n\r\n header['frequency_parameters'] = freq\r\n\r\n # Create structure arrays for each type of data channel.\r\n header['spike_triggers'] = []\r\n header['amplifier_channels'] = []\r\n header['aux_input_channels'] = []\r\n header['supply_voltage_channels'] = []\r\n header['board_adc_channels'] = []\r\n header['board_dig_in_channels'] = []\r\n header['board_dig_out_channels'] = []\r\n\r\n # Read signal summary from data file header.\r\n\r\n number_of_signal_groups, = struct.unpack('<h', fid.read(2))\r\n print('n signal groups {}'.format(number_of_signal_groups))\r\n\r\n for signal_group in range(1, number_of_signal_groups + 1):\r\n signal_group_name = read_qstring(fid)\r\n signal_group_prefix = read_qstring(fid)\r\n (signal_group_enabled, signal_group_num_channels, signal_group_num_amp_channels) = struct.unpack('<hhh', fid.read(6))\r\n\r\n if (signal_group_num_channels > 0) and (signal_group_enabled > 0):\r\n for signal_channel in range(0, signal_group_num_channels):\r\n new_channel = {'port_name' : signal_group_name, 'port_prefix' : signal_group_prefix, 'port_number' : signal_group}\r\n new_channel['native_channel_name'] = read_qstring(fid)\r\n new_channel['custom_channel_name'] = read_qstring(fid)\r\n (new_channel['native_order'], new_channel['custom_order'], signal_type, channel_enabled, new_channel['chip_channel'], new_channel['board_stream']) = struct.unpack('<hhhhhh', fid.read(12))\r\n new_trigger_channel = {}\r\n (new_trigger_channel['voltage_trigger_mode'], new_trigger_channel['voltage_threshold'], new_trigger_channel['digital_trigger_channel'], new_trigger_channel['digital_edge_polarity']) = struct.unpack('<hhhh', fid.read(8))\r\n (new_channel['electrode_impedance_magnitude'], new_channel['electrode_impedance_phase']) = struct.unpack('<ff', fid.read(8))\r\n\r\n if channel_enabled:\r\n if signal_type == 0:\r\n header['amplifier_channels'].append(new_channel)\r\n header['spike_triggers'].append(new_trigger_channel)\r\n elif signal_type == 1:\r\n header['aux_input_channels'].append(new_channel)\r\n elif signal_type == 2:\r\n header['supply_voltage_channels'].append(new_channel)\r\n elif signal_type == 3:\r\n header['board_adc_channels'].append(new_channel)\r\n elif signal_type == 4:\r\n header['board_dig_in_channels'].append(new_channel)\r\n elif signal_type == 5:\r\n header['board_dig_out_channels'].append(new_channel)\r\n else:\r\n raise Exception('Unknown channel type.')\r\n \r\n # Summarize contents of data file.\r\n header['num_amplifier_channels'] = len(header['amplifier_channels'])\r\n header['num_aux_input_channels'] = len(header['aux_input_channels'])\r\n header['num_supply_voltage_channels'] = len(header['supply_voltage_channels'])\r\n header['num_board_adc_channels'] = len(header['board_adc_channels'])\r\n header['num_board_dig_in_channels'] = len(header['board_dig_in_channels'])\r\n header['num_board_dig_out_channels'] = len(header['board_dig_out_channels'])\r\n\r\n return header",
"def headers(self, min_rt=None, max_rt=None, ms_level=None, polarity=None, **kwargs):\n \n # iterate through file\n for evt, elm in etree.iterparse(self.path, ('end',)):\n \n # retrieve instrument configs\n if elm.tag == self._prefix+'instrumentConfigurationList':\n self._retrieve_instrument_configurations(elm)\n \n # process spectrum data\n if elm.tag == self._prefix+'spectrum':\n \n # init scan data container\n scan_data = self._make_template()\n \n # retrieve raw header data\n self._retrieve_header_data(elm, scan_data)\n \n # check raw header data\n if not self._check_header_data(scan_data, min_rt, max_rt, ms_level, polarity):\n elm.clear()\n continue\n \n # free memory\n elm.clear()\n \n # create scan header\n yield self._make_header(scan_data)",
"def readHeader(self, filename):\n f = Data.Usrxxx.readHeader(self, filename)\n# self.sayHeader()\n \n while True:\n data = fortran.read(f)\n if data is None: break\n size = len(data)\n# print(\"size: \", size)\n\n if size == 14 and data[:10] == \"STATISTICS\":\n self.statpos = f.tell()\n for det in self.detector:\n data = Data.unpackArray(fortran.read(f))\n det.total = data[0]\n det.totalerror = data[1]\n# for j in range(6):\n# fortran.skip(f)\n break\n\n if size != 50: raise IOError(\"Invalid USRTRACK/USRCOLL file\")\n\n header = struct.unpack(\"=i10siiififfif\", data)\n\n det = Data.Detector()\n det.nb = header[0]\n det.name = header[1].strip() # titutc - track/coll name\n det.type = header[2] # itustc - type of binning: 1 - linear energy etc\n det.dist = header[3] # idustc = distribution to be scored\n det.reg = header[4] # nrustc = region\n det.volume = header[5] # vusrtc = volume (cm**3) of the detector\n det.lowneu = header[6] # llnutc = low energy neutron flag\n det.elow = header[7] # etclow = minimum energy [GeV]\n det.ehigh = header[8] # etchgh = maximum energy [GeV]\n det.ne = header[9] # netcbn = number of energy intervals\n det.de = header[10] # detcbn = energy bin width\n\n self.detector.append(det)\n\n if det.lowneu:\n data = fortran.read(f)\n det.ngroup = struct.unpack(\"=i\",data[:4])[0]\n det.egroup = struct.unpack(\"=%df\"%(det.ngroup+1), data[4:])\n print(\"Low energy neutrons scored with %d groups\" % det.ngroup)\n else:\n\t\tdet.ngroup = 0\n\t\tdet.egroup = []\n\n\t size = (det.ngroup+det.ne) * 4\n\t if size != fortran.skip(f):\n\t\traise IOError(\"Invalid USRTRACK file\")\n f.close()",
"def _readBTS(self,fname):\n with BinaryFile(fname) as f:\n #\n # read header info\n #\n if self.verbose: print('Reading header information from',fname)\n\n ID = f.read_int2()\n assert( ID==7 or ID==8 )\n if ID==7: filetype = 'non-periodic'\n elif ID==8: filetype = 'periodic'\n else: filetype = 'UNKNOWN'\n if self.verbose:\n print(' id= {:d} ({:s})'.format(ID,filetype))\n\n # - read resolution settings\n self.NZ = f.read_int4()\n self.NY = f.read_int4()\n self.Ntower = f.read_int4()\n if self.verbose:\n print(' NumGrid_Z,_Y=',self.NZ,self.NY)\n print(' ntower=',self.Ntower)\n self.N = f.read_int4()\n self.dz = f.read_float(dtype=self.realtype)\n self.dy = f.read_float(dtype=self.realtype)\n self.dt = f.read_float(dtype=self.realtype)\n self.period = self.realtype(self.N * self.dt)\n self.Nsize = 3*self.NY*self.NZ*self.N\n if self.verbose:\n print(' nt=',self.N)\n print(' (problem size: {:d} points)'.format(self.Nsize))\n print(' dz,dy=',self.dz,self.dy)\n print(' TimeStep=',self.dt)\n print(' Period=',self.period)\n\n # - read reference values\n self.uhub = f.read_float(dtype=self.realtype)\n self.zhub = f.read_float(dtype=self.realtype) # NOT USED\n self.zbot = f.read_float(dtype=self.realtype)\n if self.Umean is None:\n self.Umean = self.uhub\n if self.verbose:\n print(' Umean = uhub =',self.Umean,\n '(for calculating fluctuations)')\n else: # user-specified Umean\n if self.verbose:\n print(' Umean =',self.Umean,\n '(for calculating fluctuations)')\n print(' uhub=',self.uhub,' (NOT USED)')\n if self.verbose:\n print(' HubHt=',self.zhub,' (NOT USED)')\n print(' Zbottom=',self.zbot)\n\n # - read scaling factors\n self.Vslope = np.zeros(3,dtype=self.realtype)\n self.Vintercept = np.zeros(3,dtype=self.realtype)\n for i in range(3):\n self.Vslope[i] = f.read_float(dtype=self.realtype)\n self.Vintercept[i] = f.read_float(dtype=self.realtype)\n if self.verbose:\n # output is float64 precision by default...\n print(' Vslope=',self.Vslope)\n print(' Vintercept=',self.Vintercept)\n\n # - read turbsim info string\n nchar = f.read_int4()\n version = f.read(N=nchar)\n if self.verbose: print(version)\n\n #\n # read normalized data\n #\n # note: need to specify Fortran-order to properly read data using np.nditer\n t0 = time.process_time()\n if self.verbose: print('Reading normalized grid data')\n\n self.U = np.zeros((3,self.NY,self.NZ,self.N),order='F',dtype=self.realtype)\n self.T = np.zeros((self.N,self.NY,self.NZ))\n if self.verbose:\n print(' U size :',self.U.nbytes/1024.**2,'MB')\n\n for val in np.nditer(self.U, op_flags=['writeonly']):\n val[...] = f.read_int2()\n self.U = self.U.swapaxes(3,2).swapaxes(2,1) # new shape: (3,self.N,self.NY,self.NZ)\n\n if self.Ntower > 0:\n if self.verbose:\n print('Reading normalized tower data')\n self.Utow = np.zeros((3,self.Ntower,self.N),\n order='F',dtype=self.realtype)\n if self.verbose:\n print(' Utow size :',self.Utow.nbytes/1024.**2,'MB')\n for val in np.nditer(self.Utow, op_flags=['writeonly']):\n val[...] = f.read_int2()\n\n if self.verbose:\n print(' Read velocitiy fields in',time.process_time()-t0,'s')\n \n #\n # calculate dimensional velocity\n #\n if self.verbose:\n print('Calculating velocities from normalized data')\n for i in range(3):\n self.U[i,:,:,:] -= self.Vintercept[i]\n self.U[i,:,:,:] /= self.Vslope[i]\n if self.Ntower > 0:\n self.Utow[i,:,:] -= self.Vintercept[i]\n self.Utow[i,:,:] /= self.Vslope[i]\n self.U[0,:,:,:] -= self.Umean # uniform inflow w/ no shear assumed\n\n print(' u min/max [',np.min(self.U[0,:,:,:]),\n np.max(self.U[0,:,:,:]),']')\n print(' v min/max [',np.min(self.U[1,:,:,:]),\n np.max(self.U[1,:,:,:]),']')\n print(' w min/max [',np.min(self.U[2,:,:,:]),\n np.max(self.U[2,:,:,:]),']')\n\n self.scaling = np.ones((3,self.NZ))\n\n #\n # calculate coordinates\n #\n if self.verbose:\n print('Calculating coordinates')\n #self.y = -0.5*(self.NY-1)*self.dy + np.arange(self.NY,dtype=self.realtype)*self.dy\n self.y = np.arange(self.NY,dtype=self.realtype)*self.dy\n self.z = self.zbot + np.arange(self.NZ,dtype=self.realtype)*self.dz\n #self.ztow = self.zbot - np.arange(self.NZ,dtype=self.realtype)*self.dz #--NOT USED\n\n self.t = np.arange(self.N,dtype=self.realtype)*self.dt\n if self.verbose:\n print('Read times [',self.t[0],self.t[1],'...',self.t[-1],']')",
"def _read_headers(self):\n # Read the textual header.\n self._read_textual_header()\n # The next 400 bytes are from the Binary File Header.\n binary_file_header = self.file.read(400)\n bfh = SEGYBinaryFileHeader(binary_file_header, self.endian)\n self.binary_file_header = bfh\n self.data_encoding = self.binary_file_header.data_sample_format_code\n # If bytes 3506-3506 are not zero, an extended textual header follows\n # which is not supported so far.\n if bfh.number_of_3200_byte_ext_file_header_records_following != 0:\n msg = 'Extended textual headers are supported yet. ' + \\\n 'Please contact the developers.'\n raise NotImplementedError(msg)",
"def _readheader(lines):\n hdrdict = {}\n #input list of 26 lines of header\n #station and channel\n line = lines[5]\n parts = line.strip().split()\n fname = parts[1]\n fparts = fname.split('_')\n hdrdict['station'] = fparts[-2]+'_'+fparts[-1]\n\n #the \"Component\" lines look like either: Component S00W, Component S90E, Component Up\n compstr = lines[12].strip().split()[1]\n hdrdict['channel'] = get_comp_name(compstr)\n\n #instrument\n hdrdict['instrument'] = lines[3].split()[1].strip()\n \n #location string\n line = lines[6]\n hdrdict['location'] = line.strip()\n #event origin, buffer start year/month\n line = lines[16]\n parts = line.strip().split()\n bufyear = int(parts[8])\n bufmonth = int(parts[9])\n #epicentral location, buffer start day/hour\n line = lines[17]\n parts = line.strip().split()\n bufday = int(parts[8])\n bufhour = int(parts[9])\n #numpoints, buffer start min/sec\n line = lines[19]\n parts = line.strip().split()\n hdrdict['npts'] = int(parts[0])\n bufmin = int(parts[8])\n millisec = int(parts[9])\n bufsec = int(millisec/1000)\n bufmicrosec = int(np.round(millisec/1000.0 - bufsec))\n hdrdict['starttime'] = UTCDateTime(datetime(bufyear,bufmonth,bufday,bufhour,bufmin,bufsec,bufmicrosec))\n #part C\n #frequency, calibration value and some other stuff we don't care about\n line = lines[20]\n parts = line.strip().split()\n hdrdict['sampling_rate'] = float(parts[0])\n hdrdict['delta'] = 1.0/hdrdict['sampling_rate']\n hdrdict['calib'] = float(parts[7])\n #site location info, this time in dd\n line = lines[21]\n parts = line.strip().split()\n hdrdict['lat'] = float(parts[0]) * -1\n hdrdict['lon'] = float(parts[1])\n hdrdict['height'] = 0.0\n #duration\n line = lines[22]\n parts = line.strip().split()\n hdrdict['duration'] = float(parts[0])\n hdrdict['endtime'] = hdrdict['starttime'] + hdrdict['duration']\n #max acceleration - good for sanity check\n line = lines[23]\n parts = line.strip().split()\n hdrdict['maxacc'] = float(parts[0])\n hdrdict['network'] = 'NZ'\n hdrdict['units'] = 'acc'\n return hdrdict",
"def MTread(fn,slMode='s',leng=0,start=0, wav_out=None, outpath='Default Folder',header=None):\n #check variables\n try:\n fn\n except NameError:\n raise Warning('Filename fn needs to be defined!')\n \n try:\n slMode\n except NameError:\n warnings.warn('slMode - the start and length mode was not defined...defaulting to s for seconds')\n slMode = 's'\n if slMode.upper() not in ['S','P']:\n warnings.warn('slMode - the start and length mode has to be either s for seconds or p for points...defaulting to s for seconds')\n slMode = 's'\n \n try:\n leng\n except NameError:\n warnings.warn('leng - the length of the data to be read in was not defined...defaulting to leng = 0, reading in all data')\n leng = 0\n if type(leng) != int:\n warnings.warn('leng - the length of the data has to be an integer...defaulting to leng = 0, reading in all data')\n leng = 0\n \n try:\n start\n except NameError:\n warnings.warn('start - the starting point or time was not defined...defaulting to start = 0, reading from the start')\n start = 0\n if type(leng) != int:\n warnings.warn('start - the starting point or time was not defined...defaulting to start = 0, reading from the start')\n start = 0\n \n # Create empty dictionaries\n HEADER = {}\n INFO = {}\n \n if leng==0: leng = np.inf\n \n #check if auxiliary data\n vcode = path.basename(fn)[2]\n aux = True if vcode in ['I','J','K','P','T','X','Y','Z'] else False\n \n #open the binary file and start reading\n with open(fn, \"rb\") as f:\n magicstring = f.read(8).decode('ascii').strip().strip('\\x00')\n if magicstring == 'DATA':\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Found Data...')\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Getting Header information...')\n HEADER['totalhdrs'] = int(f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['abbrev '] = f.read(8).decode('ascii').strip().strip('\\x00')\n HEADER['stationcode'] = f.read(3).decode('ascii').strip().strip('\\x00')\n HEADER['title'] = f.read(82).decode('ascii').strip().strip('\\x00')\n HEADER['month'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['day'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['year'] = (f.read(5).decode('ascii').strip().strip('\\x00'))\n HEADER['hours'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['minutes'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['seconds'] = (f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['msec'] = (f.read(4).decode('ascii').strip().strip('\\x00'))\n HEADER['sampling_period'] = float(f.read(15).decode('ascii').strip().strip('\\x00'))\n HEADER['samplebits'] = int(f.read(3).decode('ascii').strip().strip('\\x00'))\n HEADER['wordsize'] = int(f.read(2).decode('ascii').strip().strip('\\x00'))\n \n #if HEADER['wordsize'] < HEADER['samplebits']/8:\n #warnings.warn('The samplebits field Does not fit the wordsize field. --- This file may be bad. ')\n HEADER['typemark'] = f.read(1).decode('ascii').strip().strip('\\x00')\n \n HEADER['swapping'] = f.read(1).decode('ascii').strip().strip('\\x00')\n \n HEADER['signing'] = f.read(1).decode('ascii').strip().strip('\\x00')\n HEADER['caltype'] = f.read(1).decode('ascii').strip().strip('\\x00')\n HEADER['calmin'] = float(f.read(15).decode('ascii').strip().strip('\\x00'))\n HEADER['calmax'] = float(f.read(15).decode('ascii').strip().strip('\\x00'))\n HEADER['calunits'] = f.read(40).decode('ascii').strip().strip('\\x00')\n HEADER['recordsize'] = int(f.read(6).decode('ascii').strip().strip('\\x00'))\n HEADER['sourcevers'] = f.read(9).decode('ascii').strip().strip('\\x00')\n HEADER['sourcesn'] = f.read(16).decode('ascii').strip().strip('\\x00')\n print(HEADER)\n \n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Getting Meta data...')\n INFO['filename'] = fn\n INFO['filesize'] = path.getsize(fn)\n INFO['srate'] = 1/HEADER['sampling_period']\n INFO['when'] = datetime.strptime(HEADER['year'] + '/' + HEADER['month'] + '/' + HEADER['day'] + ' ' + HEADER['hours'] + ':' + HEADER['minutes'] + ':' + HEADER['seconds'] + '.' + HEADER['msec'],'%Y/%m/%d %H:%M:%S.%f')\n INFO['datenumber'] = date.toordinal(INFO['when'])\n \n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Reading Data...')\n if slMode.upper() == 'P': # Start & Length specified in # Points (samples)\n INFO['whenC'] = INFO['when'] + timedelta(seconds=start/INFO['srate'])\n INFO['datenumber'] = INFO['datenumber'] + (start/INFO['srate']/24/3600)\n else:\n INFO['whenC'] = INFO['when'] + timedelta(seconds=start) # Corrected start time (with offset)\n INFO['datenumber'] = INFO['datenumber'] + start/24/3600\n \n if 'wordsize' in HEADER:\n if HEADER['wordsize'] == '':\n HEADER['wordsize'] = 2\n else:\n HEADER['wordsize'] = 2\n \n INFO['nsamp'] = int((INFO['filesize'] - 512 * HEADER['totalhdrs']) / HEADER['wordsize'])\n INFO['seconds'] = INFO['nsamp'] / INFO['srate']\n \n if leng > 0: # Only load data if it's been asked for.\n if any(x in HEADER['swapping'] for x in ['S','L','s','l']):\n mode = '<'\n else:\n mode = '>'\n \n status = 0\n if slMode.upper() == 'P': # specified start time in sample 'P'oints rather than time\n try:\n f.seek(int(512 * HEADER['totalhdrs']) + int(start) * HEADER['wordsize']) # Skip by samples/points\n except:\n status = 1\n else:\n try:\n f.seek(int(512 * HEADER['totalhdrs']) + round(start * INFO['srate'] * HEADER['wordsize'])) # skip by time (seconds)\n except:\n status = 1\n \n if status == 0: # If status is nonzero, we probably went past the end of the file.\n if HEADER['caltype'].upper() == 'F':\n if not any(x == HEADER['wordsize'] for x in [4,8]):\n f.close(f)\n #raise Warning('Invalid word size! Only valid Float sizes are four or eight bytes.')\n binType = 'float' + str(HEADER['wordsize'] * 8)\n else:\n binType = 'bit' + str(HEADER['wordsize'] * 8)\n if any(x in HEADER['signing'] for x in ['U','u']):\n binType = 'u' + binType\n \n \n if slMode.upper() == 'P':\n if leng == np.inf:\n fi = f.read()\n else:\n fi = f.read(leng)\n \n else:\n if leng == np.inf:\n fi = f.read()\n else:\n fi = f.read(int(leng*INFO['srate'])*2)\n if aux:\n fmt = '%c%iH' %(mode,len(fi)/2)\n else:\n fmt = '%c%ih' %(mode,len(fi)/2)\n p = unpack(fmt,fi)\n \n calmax = HEADER['calmax']\n calmin = HEADER['calmin']\n \n if (type(calmin) == float and type(calmax) == float and ((calmin + np.spacing(1)) < calmax) and HEADER['caltype'].upper() != 'F'):\n calmax = HEADER['calmax']\n calmin = HEADER['calmin']\n if HEADER['signing'].upper() == 'U':\n bitmin = 0\n bitmax = 2**HEADER['samplebits'] - 1\n else:\n bitmin = -(2**(HEADER['samplebits']-1))\n bitmax = (2**(HEADER['samplebits']-1)) - 1\n \n \n multiplier = (calmax - calmin) / (bitmax - bitmin)\n p = (np.array(p) - bitmin) * multiplier + calmin\n else:\n p = []# Output an empty matrix if requested data is beyond the length of the current file\n \n else:\n p = [] # Also output an empty matrix of zero length LENGTH input is requested (ie, only return header/info values)\n INFO['count'] = 0\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Returning data...')\n \n #check if it is a data or aux file\n \n if aux:\n p = pd.DataFrame({'Value':p})\n p['VarCode'] = vcode\n p['mission'] = HEADER['title'].split('-')[0] \n p['sampling_rate'] = HEADER['sampling_period']\n p['nSample'] = np.arange(1,p.shape[0]+1)\n p['start_time'] = pd.to_datetime(HEADER[\"year\"] + \"-\" + HEADER[\"month\"] + \"-\" + HEADER[\"day\"] + \" \" + HEADER[\"hours\"] + \":\" +\\\n HEADER[\"minutes\"] + \":\" + HEADER[\"seconds\"] + \".\" + HEADER[\"msec\"])\n p['sec_since_start'] = p['nSample'] * p['sampling_rate']\n p['Time'] = p['start_time'] + pd.to_timedelta(p['sec_since_start'], unit='s')\n return(p,HEADER,'aux')\n else:\n if wav_out != None:\n print(datetime.now().strftime(\"%H:%M:%S\") + ' - Saving wav file...' + HEADER['title'].split('-')[0] )\n if 'p':\n if outpath=='Default Folder':\n outpath = path.dirname(fn)\n outfn = outpath +'\\\\' + INFO['when'].strftime('D%m%d%YT%H%M%S') + '_' + path.basename(fn)[:-3] + '.wav'\n sr = int(INFO['srate'])\n data = p\n write(outfn,int(sr), np.int16(data/(abs(data).max())*np.iinfo(np.int16).max))\n \n if header != None:\n if outpath=='Default Folder':\n outpath = path.dirname(fn)\n hh = pd.DataFrame.from_dict(HEADER, orient='index')\n hh.to_csv( outpath +'\\\\' + INFO['when'].strftime('D%m%d%YT%H%M%S') + '_' + path.basename(fn)[:-3] + '.csv')\n if 'p':\n return p,HEADER,INFO",
"def __read_header(self):\n header = self.__file_object.readline()\n header_string = header.decode('utf-8')\n print(header_string)\n # Ignore first letter\n self.frame_width = int(re.findall('W\\d+', header_string)[0][1:])\n self.frame_height = int(re.findall('H\\d+', header_string)[0][1:])\n self.frame_rate = re.findall('F\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual frame rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.frame_rate.split(':')]\n self.frame_rate = round(tokens[0] / tokens[1], 1)\n\n self.__pixel_aspect_ratio = re.findall('A\\d+\\:\\d+', header_string)[0][1:]\n\n # Calculate actual pixel aspect ratio rate given the value is a ratio\n tokens = [int(d.replace(' ', '')) for d in self.__pixel_aspect_ratio.split(':')]\n self.__pixel_aspect_ratio = round(tokens[0] / tokens[1], 1)\n\n # Don't ignore for interlacing\n self.__interlacing_mode = re.findall('I(p|t|b|m)', header_string)[0]\n\n # Ignore first 'FRAME\\n' terminator so the file object points to the first byte of raw data of the first frame\n self.__file_object.readline()\n\n self.__first_frame_raw_data_position = self.__file_object.tell()\n\n self.determine_color_space_by_frame_size()\n\n # Restore\n self.__file_object.seek(self.__first_frame_raw_data_position)\n\n return header\n\n # Color space parameter is missing?\n print('FourCC:\\t\\t', header_string[:4])\n print('Input file:\\t', self.__input_file_path)\n print('Frame size:\\t', f'{self.frame_width}x{self.frame_height}')\n print('Frame rate:\\t', f'{self.frame_rate} FPS')\n print('Aspect Ratio:\\t', self.__pixel_aspect_ratio)\n print('Color space\\t', self.color_space)\n print('Frame size (raw data):', self.__frame_raw_data_size)\n print('Position of first raw:', self.__first_frame_raw_data_position)",
"def read_header(infile):\n h = dict()\n fid = open(infile, 'r+b')\n h['filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['parent_filename'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 20))\n h['comments1'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['comments2'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 80))\n h['energy_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['config_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['file_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['trans_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['date_modified'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 16))\n h['frequency'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['mat_velocity'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_polarization_channels'] =np.fromfile(fid, dtype = np.int16,count = 1)\n h['spare00'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['adc_min_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_max_voltage'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['band_width'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare01'] = np.fromfile(fid, dtype = np.int16, count = 5)\n h['polarization_type'] = np.fromfile(fid, dtype = np.int16, count = 4)\n h['record_header_size'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['word_precision'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['min_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['max_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['avg_data_value'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_scale_factor'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['data_units'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['surf_removal'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['edge_weighting'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['x_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['y_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['z_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['t_units'] = np.fromfile(fid, dtype = np.uint16, count = 1)\n h['spare02'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_return_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['scan_orientation'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scan_direction'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['data_storage_order'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['x_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_inc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['num_x_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_y_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_z_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['num_t_pts'] = np.fromfile(fid, dtype = np.int32, count = 1)\n h['x_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_speed'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_acc'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_motor_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_encoder_res'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['date_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['time_processed'] = b''.join(np.fromfile(fid, dtype = 'S1', count = 8))\n h['depth_recon'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['elevation_offset_angle'] = np.fromfile(fid,dtype = np.float32, count = 1)\n h['roll_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_max_travel'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['azimuth_offset_angle'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['adc_type'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['spare06'] = np.fromfile(fid, dtype = np.int16, count = 1)\n h['scanner_radius'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['x_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['y_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['z_offset'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['t_delay'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_start'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['range_gate_end'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['ahis_software_version'] = np.fromfile(fid, dtype = np.float32, count = 1)\n h['spare_end'] = np.fromfile(fid, dtype = np.float32, count = 10)\n return h",
"def read_2hps2_acc(filename, multi_header=True):\n\n num_headers = 27\n header_row = 16\n units_row = 17\n timestamp_row = 20\n\n with open(filename, \"r\") as f:\n accreader = csv.reader(f, delimiter=\" \")\n\n # Skip file info headers\n for i in range(num_headers):\n if i == header_row - 1:\n channels = next(accreader)\n elif i == units_row - 1:\n units = next(accreader)\n elif i == timestamp_row - 1:\n ts_start = next(accreader)\n else:\n next(accreader)\n\n # Read body - drop blanks\n data = [[x for x in line if x != \"\"] for line in accreader]\n\n # Convert column names list so that split by \",\" not \" \", drop \"Time\" item and trim\n channels = \" \".join(channels).split(\",\")[1:]\n channels = [c.strip() for c in channels]\n\n # Read the start timestamp marker and get start datetime\n ts_start = [int(i) for i in ts_start[5:]]\n dt_start = datetime(\n ts_start[5], # year\n ts_start[4], # month\n ts_start[3], # day\n ts_start[2], # hour\n ts_start[1], # minute\n ts_start[0], # second\n )\n\n # Create dataframe and timestamps using start timestamp marker and time steps column\n df = pd.DataFrame(data, dtype=\"float\")\n ts = df.iloc[:, 0].values\n timestamps = [dt_start + timedelta(seconds=t) for t in ts]\n\n # For raw data module\n if multi_header is True:\n # Create multi-index header of channel names and units and time steps index\n units = \" \".join(units).split(\",\")[1:]\n units = [i.strip().split(\"(\")[1][:-1] for i in units]\n header = list(zip(channels, units))\n header.insert(0, (\"Timestamp\", \"\"))\n header = pd.MultiIndex.from_tuples(header, names=[\"channels\", \"units\"])\n df = df.set_index(df.columns[0])\n df.index.name = \"Time (s)\"\n df.insert(loc=0, column=\"Timestamp\", value=timestamps)\n # For screening module\n else:\n # Create single row header of only channel names (i.e. strip out the units)\n # Replace time steps column with timestamps and use range index\n header = [\"Timestamp\"] + channels\n df.iloc[:, 0] = timestamps\n\n # Set desired header (single or multi-index)\n df.columns = header\n\n return df",
"def _read_trace_header(self, header):\n # Set the start position.\n pos = 0\n # Loop over all items in the TRACE_HEADER_FORMAT list which is supposed\n # to be in the correct order.\n for item in TRACE_HEADER_FORMAT:\n length, name, special_format, _ = item\n string = header[pos: pos + length]\n pos += length\n setattr(self, name, unpack_header_value(self.endian, string,\n length, special_format))",
"def _read_file_definition(self):\n row_count = 0\n #\n # THIS METHOD ASSUMES A 14 ROW HEADER\n # If the number of header row lines in the glider ASCII input file changes from 14,\n # this method will NOT WORK\n num_hdr_lines = 14\n\n header_pattern = r'(.*): (.*)$'\n header_re = re.compile(header_pattern)\n\n line = self._stream_handle.readline()\n\n while line and row_count < num_hdr_lines:\n\n match = header_re.match(line)\n\n if match:\n key = match.group(1)\n value = match.group(2)\n value = value.strip()\n\n # update num_hdr_lines based on the header info.\n if key == 'num_ascii_tags':\n # this key has a required value of 14, otherwise we don't know how to parse the file\n if int(value) != num_hdr_lines:\n raise DatasetParserException(\"Header must be %d rows, but it is %s\" % (num_hdr_lines, value))\n\n elif key == 'num_label_lines':\n # this key has a required value of 3, otherwise we don't know how to parse the file\n if int(value) != 3:\n raise DatasetParserException(\"There must be 3 Label lines from the header for this parser\")\n\n elif key == 'sensors_per_cycle':\n # save for future use\n self._header_dict[key] = int(value)\n\n elif key in ['filename_label', 'mission_name', 'fileopen_time']:\n # create a dictionary of these 3 key/value pairs strings from\n # the header rows that need to be saved for future use\n self._header_dict[key] = value\n\n else:\n log.warn(\"Failed to parse header row: %s.\", line)\n\n row_count += 1\n # only read the header lines in this method so make sure we stop\n if row_count < num_hdr_lines:\n line = self._stream_handle.readline()\n\n if row_count < num_hdr_lines:\n log.error('Not enough data lines for a full header')\n raise DatasetParserException('Not enough data lines for a full header')",
"def header(filename):\n\n if not os.path.isfile(filename):\n filename = match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")",
"def read_scamp_head(fname, header=None):\n\n with open(fname) as fobj:\n lines = fobj.readlines()\n\n lines = [l.strip() for l in lines if l[0:3] != 'END']\n\n # if header is None an empty FITSHDR is created\n hdr = FITSHDR(header)\n\n for l in lines:\n hdr.add_record(l)\n\n return hdr",
"def _read_header(self):\n f = self._open(self.filename, 'rb')\n idx = 0\n header = b''\n # reading the header \n while idx < 13: \n header += f.readline().rstrip() # removes the \"\\n\\r\" at the end\n idx += 1\n # \"magically\" compute the data offset\n try:\n self._offset_auto = ord(header[2]) + 1856\n except:\n self._offset_auto = header[2] + 1856\n\n\n\n header = header[:self._offset_auto+300] # add an extra random header for offset\n header = re.sub(r'(?P<section>\\[[^\\]]+\\])', '\\n\\g<section>', header.decode('latin1'))\n header = header.splitlines()[1:]\n self.header = dict([self._header_sect2dict(line) for line in header])\n self.shape = np.array(self.header['Acquisition']['areGRBScan'].split(',')[-2:]).astype(np.int)\n f.close()\n\n offset_list = {'auto': self._offset_auto,\n 'from_end': -np.prod(self.shape)*self._nbytes,\n 'from_end_4k': - np.prod(self.shape)*self._nbytes - 4092}\n\n if self._offset_input in offset_list:\n\n self._offset_data = offset_list[self._offset_input]\n if self._offset_input.startswith('from_end'):\n # set the flag to seek from the end of the file.\n self._offset_whence = 2\n elif type(self._offset_input) is int:\n self._offset_data = self._offset_input\n else:\n raise ValueError\n\n \n\n return self.header",
"def _parseHeader(self):\n # Big or little endian for the header.\n self._getEndianess()\n # Read the fixed header.\n self._readFixedHeader()\n # Get the present blockettes.\n self._getBlockettes()\n # Calculate the starttime.\n self._calculateStarttime()",
"def read_data(filename):\n # Store debug mode\n debug = params.debug\n params.debug = None\n\n # Initialize dictionary\n header_dict = {}\n\n headername = filename + \".hdr\"\n\n with open(headername, \"r\") as f:\n # Replace characters for easier parsing\n hdata = f.read()\n hdata = hdata.replace(\",\\n\", \",\")\n hdata = hdata.replace(\"\\n,\", \",\")\n hdata = hdata.replace(\"{\\n\", \"{\")\n hdata = hdata.replace(\"\\n}\", \"}\")\n hdata = hdata.replace(\" \\n \", \"\")\n hdata = hdata.replace(\";\", \"\")\n hdata = hdata.split(\"\\n\")\n\n # Loop through and create a dictionary from the header file\n for i, string in enumerate(hdata):\n if ' = ' in string:\n header_data = string.split(\" = \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n elif ' : ' in string:\n header_data = string.split(\" : \")\n header_dict.update({header_data[0].rstrip(): header_data[1].rstrip()})\n\n # Reformat wavelengths\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"{\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\"}\", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].replace(\" \", \"\")\n header_dict[\"wavelength\"] = header_dict[\"wavelength\"].split(\",\")\n\n # Create dictionary of wavelengths\n wavelength_dict = {}\n for j, wavelength in enumerate(header_dict[\"wavelength\"]):\n wavelength_dict.update({float(wavelength): float(j)})\n\n # Replace datatype ID number with the numpy datatype\n dtype_dict = {\"1\": np.uint8, \"2\": np.int16, \"3\": np.int32, \"4\": np.float32, \"5\": np.float64, \"6\": np.complex64,\n \"9\": np.complex128, \"12\": np.uint16, \"13\": np.uint32, \"14\": np.uint64, \"15\": np.uint64}\n header_dict[\"data type\"] = dtype_dict[header_dict[\"data type\"]]\n\n # Read in the data from the file\n raw_data = np.fromfile(filename, header_dict[\"data type\"], -1)\n\n # Reshape the raw data into a datacube array\n array_data = raw_data.reshape(int(header_dict[\"lines\"]),\n int(header_dict[\"bands\"]),\n int(header_dict[\"samples\"])).transpose((0, 2, 1))\n\n if \"default bands\" in header_dict:\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"{\", \"\")\n header_dict[\"default bands\"] = header_dict[\"default bands\"].replace(\"}\", \"\")\n default_bands = header_dict[\"default bands\"].split(\",\")\n\n pseudo_rgb = cv2.merge((array_data[:, :, int(default_bands[0])],\n array_data[:, :, int(default_bands[1])],\n array_data[:, :, int(default_bands[2])]))\n\n else:\n max_wavelength = max([float(i) for i in wavelength_dict.keys()])\n min_wavelength = min([float(i) for i in wavelength_dict.keys()])\n # Check range of available wavelength\n if max_wavelength >= 635 and min_wavelength <= 490:\n id_red = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 710)\n id_green = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 540)\n id_blue = _find_closest(np.array([float(i) for i in wavelength_dict.keys()]), 480)\n\n pseudo_rgb = cv2.merge((array_data[:, :, [id_blue]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n else:\n # Otherwise take 3 wavelengths, first, middle and last available wavelength\n id_red = int(header_dict[\"bands\"]) - 1\n id_green = int(id_red / 2)\n pseudo_rgb = cv2.merge((array_data[:, :, [0]],\n array_data[:, :, [id_green]],\n array_data[:, :, [id_red]]))\n\n # Gamma correct pseudo_rgb image\n pseudo_rgb = pseudo_rgb ** (1 / 2.2)\n # Scale each of the channels up to 255\n pseudo_rgb = cv2.merge((rescale(pseudo_rgb[:, :, 0]),\n rescale(pseudo_rgb[:, :, 1]),\n rescale(pseudo_rgb[:, :, 2])))\n\n max_wl = float(str(header_dict[\"wavelength\"][-1]).rstrip())\n min_wl = float(str(header_dict[\"wavelength\"][0]).rstrip())\n\n # Create an instance of the spectral_data class\n spectral_array = Spectral_data(array_data=array_data, max_wavelength=max_wl,\n min_wavelength=min_wl, d_type=header_dict[\"data type\"],\n wavelength_dict=wavelength_dict, samples=int(header_dict[\"samples\"]),\n lines=int(header_dict[\"lines\"]), interleave=header_dict[\"interleave\"],\n wavelength_units=header_dict[\"wavelength units\"], array_type=\"datacube\",\n pseudo_rgb=pseudo_rgb, filename=filename)\n\n # Reset debug mode\n params.debug = debug\n\n if params.debug == \"plot\":\n # Gamma correct pseudo_rgb image\n plot_image(pseudo_rgb)\n elif params.debug == \"print\":\n print_image(pseudo_rgb, os.path.join(params.debug_outdir, str(params.device) + \"_pseudo_rgb.png\"))\n\n return spectral_array",
"def _read_header(\n self, header, filename, run_check_acceptability=True, background_lsts=True\n ):\n # get telescope information\n latitude = header[\"latitude\"][()]\n longitude = header[\"longitude\"][()]\n altitude = header[\"altitude\"][()]\n self.telescope_location_lat_lon_alt_degrees = (latitude, longitude, altitude)\n self.instrument = header[\"instrument\"][()].tobytes().decode(\"utf8\")\n self.telescope_name = header[\"telescope_name\"][()].tobytes().decode(\"utf8\")\n\n # get source information\n self.object_name = header[\"object_name\"][()].tobytes().decode(\"utf8\")\n\n # set history appropriately\n self.history = header[\"history\"][()].tobytes().decode(\"utf8\")\n if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):\n self.history += self.pyuvdata_version_str\n\n # check for vis_units\n if \"vis_units\" in header:\n self.vis_units = header[\"vis_units\"][()].tobytes().decode(\"utf8\")\n else:\n # default to uncalibrated data\n self.vis_units = \"UNCALIB\"\n\n # check for optional values\n if \"dut1\" in header:\n self.dut1 = float(header[\"dut1\"][()])\n if \"earth_omega\" in header:\n self.earth_omega = float(header[\"earth_omega\"][()])\n if \"gst0\" in header:\n self.gst0 = float(header[\"gst0\"][()])\n if \"rdate\" in header:\n self.rdate = header[\"rdate\"][()].tobytes().decode(\"utf8\")\n if \"timesys\" in header:\n self.timesys = header[\"timesys\"][()].tobytes().decode(\"utf8\")\n if \"x_orientation\" in header:\n self.x_orientation = header[\"x_orientation\"][()].tobytes().decode(\"utf8\")\n if \"blt_order\" in header:\n blt_order_str = header[\"blt_order\"][()].tobytes().decode(\"utf8\")\n self.blt_order = tuple(blt_order_str.split(\", \"))\n if self.blt_order == (\"bda\",):\n self._blt_order.form = (1,)\n\n if \"antenna_diameters\" in header:\n self.antenna_diameters = header[\"antenna_diameters\"][()]\n if \"uvplane_reference_time\" in header:\n self.uvplane_reference_time = int(header[\"uvplane_reference_time\"][()])\n if \"eq_coeffs\" in header:\n self.eq_coeffs = header[\"eq_coeffs\"][()]\n if \"eq_coeffs_convention\" in header:\n self.eq_coeffs_convention = (\n header[\"eq_coeffs_convention\"][()].tobytes().decode(\"utf8\")\n )\n\n # check for phasing information\n self.phase_type = header[\"phase_type\"][()].tobytes().decode(\"utf8\")\n if self.phase_type == \"phased\":\n self._set_phased()\n self.phase_center_ra = float(header[\"phase_center_ra\"][()])\n self.phase_center_dec = float(header[\"phase_center_dec\"][()])\n self.phase_center_epoch = float(header[\"phase_center_epoch\"][()])\n if \"phase_center_frame\" in header:\n self.phase_center_frame = (\n header[\"phase_center_frame\"][()].tobytes().decode(\"utf8\")\n )\n elif self.phase_type == \"drift\":\n self._set_drift()\n else:\n self._set_unknown_phase_type()\n\n # get antenna arrays\n # cast to native python int type\n self.Nants_data = int(header[\"Nants_data\"][()])\n self.Nants_telescope = int(header[\"Nants_telescope\"][()])\n self.ant_1_array = header[\"ant_1_array\"][:]\n self.ant_2_array = header[\"ant_2_array\"][:]\n self.antenna_names = [\n n.tobytes().decode(\"utf8\") for n in header[\"antenna_names\"][:]\n ]\n self.antenna_numbers = header[\"antenna_numbers\"][:]\n self.antenna_positions = header[\"antenna_positions\"][:]\n\n # set telescope params\n try:\n self.set_telescope_params()\n except ValueError as ve:\n warnings.warn(str(ve))\n\n # get baseline array\n self.baseline_array = self.antnums_to_baseline(\n self.ant_1_array, self.ant_2_array\n )\n self.Nbls = len(np.unique(self.baseline_array))\n\n # get uvw array\n self.uvw_array = header[\"uvw_array\"][:, :]\n\n # get time information\n self.time_array = header[\"time_array\"][:]\n integration_time = header[\"integration_time\"]\n self.integration_time = integration_time[:]\n proc = None\n if \"lst_array\" in header:\n self.lst_array = header[\"lst_array\"][:]\n # check that lst_array in file is self-consistent\n if run_check_acceptability:\n (\n latitude,\n longitude,\n altitude,\n ) = self.telescope_location_lat_lon_alt_degrees\n lst_array = uvutils.get_lst_for_time(\n self.time_array, latitude, longitude, altitude\n )\n if not np.all(\n np.isclose(\n self.lst_array,\n lst_array,\n rtol=self._lst_array.tols[0],\n atol=self._lst_array.tols[1],\n )\n ):\n warnings.warn(\n \"LST values stored in {file} are not self-consistent \"\n \"with time_array and telescope location. Consider \"\n \"recomputing with utils.get_lst_for_time.\".format(file=filename)\n )\n else:\n # compute lst_array from time_array and telescope location\n proc = self.set_lsts_from_time_array(background=background_lsts)\n\n # get frequency information\n self.freq_array = header[\"freq_array\"][:, :]\n self.channel_width = float(header[\"channel_width\"][()])\n self.spw_array = header[\"spw_array\"][:]\n\n # get polarization information\n self.polarization_array = header[\"polarization_array\"][:]\n\n # get data shapes\n self.Nfreqs = int(header[\"Nfreqs\"][()])\n self.Npols = int(header[\"Npols\"][()])\n self.Ntimes = int(header[\"Ntimes\"][()])\n self.Nblts = int(header[\"Nblts\"][()])\n self.Nspws = int(header[\"Nspws\"][()])\n\n # get extra_keywords\n if \"extra_keywords\" in header:\n self.extra_keywords = {}\n for key in header[\"extra_keywords\"].keys():\n if header[\"extra_keywords\"][key].dtype.type in (np.string_, np.object_):\n self.extra_keywords[key] = (\n header[\"extra_keywords\"][key][()].tobytes().decode(\"utf8\")\n )\n else:\n self.extra_keywords[key] = header[\"extra_keywords\"][key][()]\n\n if proc is not None:\n # if lsts are in the background wait for them to return\n proc.join()\n\n return",
"def read_additional_info_from_header(wcsprm, hdr, RA_input=None, DEC_input=None, projection_ra=None, projection_dec=None, ignore_header_rot=False, radius = -1., silent=False):\n fov_radius = 4 #arcmin radius to include field of view\n if(radius > 0):\n fov_radius = radius\n INCREASE_FOV_FLAG = False # increase the field to view by 50% to search in catalog if position on sky is inaccurate\n PIXSCALE_UNCLEAR = False\n\n keywords_check = [\"PIXSCALE\", \"NAXIS1\", \"NAXIS2\", \"RA\", \"DEC\"] #list of possible keywords the scs parser might miss\n keywords_present = [] # list of keywords that are actually present\n for i in keywords_check:\n if(i in hdr.keys()):\n keywords_present.append(i)\n\n if(\"NAXIS1\" not in keywords_present or \"NAXIS2\" not in keywords_present ):\n print(\"ERROR: NAXIS1 or NAXIS2 missing in file. Please add!\")\n else:\n axis1 = hdr[\"NAXIS1\"]\n axis2 = hdr[\"NAXIS2\"]\n\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n if((np.abs(wcs_pixscale[0])) < 1e-7 or (np.abs(wcs_pixscale[1])) < 1e-7 or\n (np.abs(wcs_pixscale[0])) > 5e-3 or (np.abs(wcs_pixscale[1])) > 5e-3):\n if(not silent):\n print(\"pixelscale is completely unrealistic. Will guess\")\n print(wcs_pixscale)\n guess = 8.43785734e-05\n #guess = 6.94444461259988e-05\n wcsprm.pc = [[1,0],[0,1]]\n wcsprm.cdelt = [guess, guess]\n if(not silent):\n print(\"Changed pixelscale to {:.3g} deg/arcsec\".format(guess))\n PIXSCALE_UNCLEAR = True\n if(ignore_header_rot):\n wcsprm.pc = [[1,0],[0,1]]\n #wcsprm.cdelt = [8.0006871225376e-05, 8.0006871225376e-05]\n if(\"PIXSCALE\" in keywords_present):\n #normal around 0.450000 / arcsec/pixel, for now i assume arcsec per pixel\n pixscale = hdr[\"PIXSCALE\"]\n if(\"deg\" in hdr.comments['PIXSCALE']): #correction if in deg/pixel\n pixscale = pixscale *60*60\n x_size = axis1 * pixscale /60# arcmin\n y_size = axis2 * pixscale /60# arcmin\n\n if 20 > x_size > 0.5 and 20 > y_size> 0.5 :\n #pixscale is sensical\n #Now: is the pixscale of the current wcs realistic?\n pc = wcsprm.get_pc()\n cdelt = wcsprm.get_cdelt()\n wcs_pixscale = (pc @ cdelt )\n pixscale = pixscale /60 /60 #pixelscale now in deg / pixel\n if( wcs_pixscale[0]/pixscale < 0.1 or wcs_pixscale[0]/pixscale > 10 or wcs_pixscale[1]/pixscale < 0.1 or wcs_pixscale[1]/pixscale > 10):\n #check if there is a huge difference in the scales\n #if yes then replace the wcs scale with the pixelscale info\n wcsprm.pc = [[1,0],[0,1]]\n\n wcsprm.cdelt = [pixscale, pixscale]\n if(not silent):\n print(\"changed pixelscale to {:.3g} deg/arcsec\".format(pixscale))\n fov_radius = (x_size/2+y_size/2)/np.sqrt(2) #try to get corners\n PIXSCALE_UNCLEAR=True\n\n\n if(np.array_equal(wcsprm.crpix, [0,0])):\n #centrl pixel seems to not be in header, better set in middle\n wcsprm.crpix = [axis1/2, axis2/2]\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n ###sky position not found. Maybe there is some RA and DEC info in the header:\n INCREASE_FOV_FLAG = True\n if (\"RA\" in keywords_present and \"DEC\" in keywords_present): ##carefull degree and hourangle!!!\n wcsprm.crval = [hdr[\"RA\"], hdr[\"DEC\"]]\n if(not silent):\n print(\"Found ra and dec information in the header\")\n print(wcsprm.crval)\n if(not silent):\n print(\"Is this position within the field of view in degrees? otherwise it will not work. In that case give a more accurate position as an argument: -ra XX -dec XX both in degrees\")\n\n if (RA_input is not None): #use user input if provided\n wcsprm.crval = [RA_input, wcsprm.crval[1]]\n wcsprm.crpix = [axis1/2, wcsprm.crpix[1]]\n\n if (DEC_input is not None):\n wcsprm.crval = [wcsprm.crval[0], DEC_input]\n wcsprm.crpix = [wcsprm.crpix[0], axis2/2, ]\n\n\n if(np.array_equal(wcsprm.crval, [0,0] )):\n print(\">>>>>>>>>WARNING\")\n print(\"No rough sky position was found for this object. Please add as -ra XX -dex XX both in degress. Adding the position as keywords in the fits file header will also work. The keywords are RA and DEC. The program expects the values in degrees. \")\n\n if(np.array_equal(wcsprm.ctype, [\"\",\"\"])):\n INCREASE_FOV_FLAG = True\n if(projection_ra is not None and projection_dec is not None):\n wcsprm.ctype = [ projection_ra, projection_dec]\n else:\n wcsprm.ctype = [ 'RA---TAN', 'DEC--TAN'] #this is a guess\n print(\">>>>>>>>>WARNING\")\n print(\"The wcs in the header has no projection specified. Will guess 'RA---TAN', 'DEC--TAN' (gnomonic projection) if this is incorrect the fit will fail. You can specify the projection via -projection_ra XX -projection_dec XX\")\n print(\"make sure you do not use quotations, example: -proj1 RA---TAN -proj2 DEC--TAN\")\n if(INCREASE_FOV_FLAG):\n fov_radius = fov_radius*2.5\n return wcsprm, fov_radius, INCREASE_FOV_FLAG, PIXSCALE_UNCLEAR",
"def header(filename):\n\n if not os.path.isfile(filename):\n filename = glia.match_filename(filename,'voltages')\n try:\n print('header length: ', glia.get_header(filename)[1])\n except:\n raise(ValueError, \"Could not get header, are you sure it's a MCD binary export?\")",
"def fix_header(file_path):\n logging.warning(\"Couldn't open edf {}. Trying to fix the header ...\".format(file_path))\n f = open(file_path, 'rb')\n content = f.read()\n f.close()\n \n header = content[:256]\n # print(header)\n\n # version = header[:8].decode('ascii')\n # patient_id = header[8:88].decode('ascii')\n # [age] = re.findall(\"Age:(\\d+)\", patient_id)\n # [sex] = re.findall(\"\\s\\w\\s\", patient_id)\n\n recording_id = header[88:168].decode('ascii')\n # startdate = header[168:176]\n # starttime = header[176:184]\n # n_bytes_in_header = header[184:192].decode('ascii')\n # reserved = header[192:236].decode('ascii')\n # THIS IS MESSED UP IN THE HEADER DESCRIPTION\n # duration = header[236:244].decode('ascii')\n # n_data_records = header[244:252].decode('ascii')\n # n_signals = header[252:].decode('ascii')\n \n date = recording_id[10:21]\n day, month, year = date.split('-')\n if month == 'JAN':\n month = '01'\n\n elif month == 'FEB':\n month = '02'\n\n elif month == 'MAR':\n month = '03'\n\n elif month == 'APR':\n month = '04'\n\n elif month == 'MAY':\n month = '05'\n\n elif month == 'JUN':\n month = '06'\n\n elif month == 'JUL':\n month = '07'\n\n elif month == 'AUG':\n month = '08'\n\n elif month == 'SEP':\n month = '09'\n\n elif month == 'OCT':\n month = '10'\n\n elif month == 'NOV':\n month = '11'\n\n elif month == 'DEC':\n month = '12'\n\n year = year[-2:]\n date = '.'.join([day, month, year])\n \n fake_time = '00.00.00'\n \n # n_bytes = int(n_bytes_in_header) - 256\n # n_signals = int(n_bytes / 256)\n # n_signals = str(n_signals) + ' '\n # n_signals = n_signals[:4]\n \n # new_header = version + patient_id + recording_id + date + fake_time + n_bytes_in_header + reserved +\n # new_header += n_data_records + duration + n_signals\n # new_content = (bytes(new_header, encoding=\"ascii\") + content[256:])\n\n new_content = header[:168] + bytes(date + fake_time, encoding=\"ascii\") + header[184:] + content[256:]\n\n # f = open(file_path, 'wb')\n # f.write(new_content)\n # f.close()",
"def _load(self):\n # Extract the ASCII header (5 first lines)\n with open(self._xst_bin, 'rb') as f:\n header = list(islice(f, 0, 5))\n assert header[0] == b'HeaderStart\\n',\\\n 'Wrong header start'\n assert header[-1] == b'HeaderStop\\n',\\\n 'Wrong header stop'\n header = [s.decode('utf-8') for s in header]\n hd_size = sum([len(s) for s in header])\n\n # Parse informations into a metadata dictionnary\n keys = ['freq', 'ma', 'accu']\n search = ['Freq.List', 'Mr.List', 'accumulation']\n types = ['float64', 'int', 'int']\n for key, word, typ in zip(keys, search, types):\n for h in header:\n if word in h:\n self.meta[key] = np.array(\n h.split('=')[1].split(','),\n dtype=typ\n )\n\n # Deduce the dtype for decoding\n n_ma = self.meta['ma'].size\n n_sb = self.meta['freq'].size\n dtype = np.dtype(\n [('jd', 'float64'),\n ('data', 'complex64', (n_sb, n_ma*n_ma*2 + n_ma))]\n )\n\n # Decoding the binary file\n tmp = np.memmap(\n filename=self._xst_bin,\n dtype='int8',\n mode='r',\n offset=hd_size\n )\n decoded = tmp.view(dtype)\n\n self.data = decoded['data'] / self.meta['accu']\n self.time = Time(decoded['jd'], format='jd', precision=0)\n\n return",
"def spectrum_tsv3(f):\n skip = 0\n while True:\n try:\n wav, flux, dflux = np.loadtxt(f, skiprows=skip, unpack=True)\n\n except ValueError:\n # If the first lines have a header\n skip += 1\n\n else:\n break\n\n return wav, flux",
"def spectrum_tsv(f):\n\n skip = 0\n while True:\n try:\n wav, flux = np.loadtxt(f, skiprows=skip, unpack=True)\n\n except ValueError:\n # If the first lines have a header\n skip += 1\n\n else:\n break\n\n return wav, flux",
"def _read_header(self):\n\n stream = self.stream\n\n self._seek_to_table(tables.header)\n\n # Read header[0 ... 1]\n checksum = stream.read_unsigned_byte4()\n design_font_size = stream.read_fix_word()\n\n # Read header[2 ... 11] if there\n character_info_table_position = self.table_pointers[\n tables.character_info]\n position = stream.tell()\n if position < character_info_table_position:\n character_coding_scheme = stream.read_bcpl()\n else:\n character_coding_scheme = None\n\n # Read header[12 ... 16] if there\n character_coding_scheme_length = 40 # bytes (11 - 2 + 1) * 4 = 10 * 4\n position += character_coding_scheme_length\n if position < character_info_table_position:\n family = stream.read_bcpl(position)\n else:\n family = None\n\n # Read header[12 ... 16] if there\n family_length = 20 # bytes (16 - 12 +1) * 4 = 5 * 4\n position += family_length\n if position < character_info_table_position:\n seven_bit_safe_flag = stream.read_unsigned_byte1(position)\n stream.read_unsigned_byte2()\n face = stream.read_unsigned_byte1()\n # Fixme: complete\n\n # don't read header [18 ... whatever]\n\n self.tfm = Tfm(self.font_name,\n self.filename,\n self.smallest_character_code,\n self.largest_character_code,\n checksum,\n design_font_size,\n character_coding_scheme,\n family)"
] | [
"0.6155055",
"0.5993816",
"0.5884923",
"0.58070946",
"0.55780053",
"0.5524579",
"0.55149055",
"0.5444751",
"0.5440984",
"0.5410213",
"0.5406023",
"0.5403349",
"0.540114",
"0.53573203",
"0.52880806",
"0.52805924",
"0.52777493",
"0.5273233",
"0.5264056",
"0.52473116",
"0.52338374",
"0.5220043",
"0.5213736",
"0.5213252",
"0.5208832",
"0.52087724",
"0.5195103",
"0.51939654",
"0.5188107",
"0.51814973"
] | 0.66186225 | 0 |
Read a XTRX header files XTRX are newer header files and will supercede XTR | def readHeaderXTRX(self, headerFile):
raise NotImplementedError("Support for XTRX files has not yet been implemented") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readHeader(self) -> None:\n # read header files\n self.headersList = []\n self.chanHeadersList = []\n for headerFile in self.headerF:\n if \"xtrx\" in headerFile.lower():\n headers, chanHeaders = self.readHeaderXTRX(headerFile)\n else:\n headers, chanHeaders = self.readHeaderXTR(headerFile)\n self.headersList.append(headers)\n self.chanHeadersList.append(chanHeaders)\n\n # check to make sure no gaps, calculate out the sample ranges and list the data files for each sample\n self.mergeHeaders(self.headersList, self.chanHeadersList)",
"def _read_hdr_file(ktlx_file):\r\n with open(ktlx_file, 'rb') as f:\r\n\r\n hdr = {}\r\n assert f.tell() == 0\r\n\r\n hdr['file_guid'] = hexlify(f.read(16))\r\n hdr['file_schema'], = unpack('<H', f.read(2))\r\n if not hdr['file_schema'] in (1, 3, 7, 8, 9):\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'file_schema ' + str(hdr['file_schema']))\r\n\r\n hdr['base_schema'], = unpack('<H', f.read(2))\r\n if not hdr['base_schema'] == 1: # p.3: base_schema 0 is rare, I think\r\n raise NotImplementedError('Reading header not implemented for ' +\r\n 'base_schema ' + str(hdr['base_schema']))\r\n\r\n hdr['creation_time'] = datetime.fromtimestamp(unpack('<i',\r\n f.read(4))[0])\r\n hdr['patient_id'], = unpack('<i', f.read(4))\r\n hdr['study_id'], = unpack('<i', f.read(4))\r\n hdr['pat_last_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_first_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['pat_middle_name'] = _make_str(unpack('c' * 80, f.read(80)))\r\n hdr['patient_id'] = _make_str(unpack('c' * 80, f.read(80)))\r\n assert f.tell() == 352\r\n\r\n if hdr['file_schema'] >= 7:\r\n hdr['sample_freq'], = unpack('<d', f.read(8))\r\n n_chan, = unpack('<i', f.read(4))\r\n hdr['num_channels'] = n_chan\r\n hdr['deltabits'], = unpack('<i', f.read(4))\r\n hdr['phys_chan'] = unpack('<' + 'i' * hdr['num_channels'],\r\n f.read(hdr['num_channels'] * 4))\r\n\r\n f.seek(4464)\r\n hdr['headbox_type'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sn'] = unpack('<' + 'i' * 4, f.read(16))\r\n hdr['headbox_sw_version'] = _make_str(unpack('c' * 40, f.read(40)))\r\n hdr['dsp_hw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['dsp_sw_version'] = _make_str(unpack('c' * 10, f.read(10)))\r\n hdr['discardbits'], = unpack('<i', f.read(4))\r\n\r\n if hdr['file_schema'] >= 8:\r\n hdr['shorted'] = unpack('<' + 'h' * 1024, f.read(2048))[:n_chan]\r\n hdr['frequency_factor'] = unpack('<' + 'h' * 1024,\r\n f.read(2048))[:n_chan]\r\n return hdr",
"def readHeaderXTR(self, headerFile: str) -> None:\n with open(headerFile, \"r\") as f:\n lines = f.readlines()\n sectionLines = {}\n # let's get data\n for line in lines:\n line = line.strip()\n line = line.replace(\"'\", \" \")\n # continue if line is empty\n if line == \"\":\n continue\n if \"[\" in line:\n sec = line[1:-1]\n sectionLines[sec] = []\n else:\n sectionLines[sec].append(line)\n # the base class is built around a set of headers based on ATS headers\n # though this is a bit more work here, it saves lots of code repetition\n headers = {}\n # recording information (start_time, start_date, stop_time, stop_date, ats_data_file)\n fileLine = sectionLines[\"FILE\"][0]\n fileSplit = fileLine.split()\n headers[\"sample_freq\"] = np.absolute(float(fileSplit[-1]))\n timeLine = sectionLines[\"FILE\"][2]\n timeSplit = timeLine.split()\n # these are the unix time stamps\n startDate = float(timeSplit[1] + \".\" + timeSplit[2])\n datetimeStart = datetime.utcfromtimestamp(startDate)\n stopDate = float(timeSplit[3] + \".\" + timeSplit[4])\n datetimeStop = datetime.utcfromtimestamp(stopDate)\n headers[\"start_date\"] = datetimeStart.strftime(\"%Y-%m-%d\")\n headers[\"start_time\"] = datetimeStart.strftime(\"%H:%M:%S.%f\")\n headers[\"stop_date\"] = datetimeStop.strftime(\"%Y-%m-%d\")\n headers[\"stop_time\"] = datetimeStop.strftime(\"%H:%M:%S.%f\")\n # here calculate number of samples\n deltaSeconds = (datetimeStop - datetimeStart).total_seconds()\n # calculate number of samples - have to add one because the time given in SPAM recording is the actual time of the last sample\n numSamples = int(deltaSeconds * headers[\"sample_freq\"]) + 1\n # put these in headers for ease of future calculations in merge headers\n headers[\"num_samples\"] = numSamples\n # spam datasets only have the one data file for all channels\n headers[\"ats_data_file\"] = fileSplit[1]\n # data information (meas_channels, sample_freq)\n chanLine = sectionLines[\"CHANNAME\"][0]\n # this gets reformatted to an int later\n headers[\"meas_channels\"] = chanLine.split()[1]\n numChansInt = int(headers[\"meas_channels\"])\n # deal with the channel headers\n chanHeaders = []\n for iChan in range(0, numChansInt):\n chanH = self.chanDefaults()\n # set the sample frequency from the main headers\n chanH[\"sample_freq\"] = headers[\"sample_freq\"]\n # line data - read through the data in the correct channel order\n chanLine = sectionLines[\"CHANNAME\"][iChan + 1]\n chanSplit = chanLine.split()\n dataLine = sectionLines[\"DATA\"][iChan + 1]\n dataSplit = dataLine.split()\n # channel input information (gain_stage1, gain_stage2, hchopper, echopper)\n chanH[\"gain_stage1\"] = 1\n chanH[\"gain_stage2\"] = 1\n # channel output information (sensor_type, channel_type, ts_lsb, pos_x1, pos_x2, pos_y1, pos_y2, pos_z1, pos_z2, sensor_sernum)\n chanH[\"ats_data_file\"] = fileSplit[1]\n chanH[\"num_samples\"] = numSamples\n\n # channel information\n # spams often use Bx, By - use H within the software as a whole\n chanH[\"channel_type\"] = consistentChans(chanSplit[2])\n # the sensor number is a bit of a hack - want MFSXXe or something - add MFS in front of the sensor number - this is liable to break\n # at the same time, set the chopper\n calLine = sectionLines[\"200{}003\".format(iChan + 1)][0]\n calSplit = calLine.split()\n if isMagnetic(chanH[\"channel_type\"]):\n chanH[\"sensor_sernum\"] = calSplit[\n 2\n ] # the last three digits is the serial number\n sensorType = calSplit[1].split(\"_\")[1][-2:]\n chanH[\"sensor_type\"] = \"MFS{:02d}\".format(int(sensorType))\n if \"LF\" in calSplit[1]:\n chanH[\"hchopper\"] = 1\n else:\n chanH[\"sensor_type\"] = \"ELC00\"\n if \"LF\" in calLine:\n chanH[\"echopper\"] = 1\n\n # data is raw voltage of sensors\n # both E and H fields need polarity reversal (from email with Reinhard)\n # get scaling from headers\n scaling = float(dataSplit[-2])\n if isElectric(chanH[\"channel_type\"]):\n # the factor of 1000 is not entirely clear\n lsb = 1000.0 * scaling\n # volts to millivolts and a minus to switch polarity giving data in mV\n lsb = -1000.0 * lsb\n else:\n # volts to millivolts and a minus to switch polarity giving data in mV\n # scaling in header file is ignored because it duplicates static gain correction in calibration\n lsb = -1000.0\n chanH[\"ts_lsb\"] = lsb\n\n # the distances\n if chanSplit[2] == \"Ex\":\n chanH[\"pos_x1\"] = float(dataSplit[4]) / 2\n chanH[\"pos_x2\"] = chanH[\"pos_x1\"]\n if chanSplit[2] == \"Ey\":\n chanH[\"pos_y1\"] = float(dataSplit[4]) / 2\n chanH[\"pos_y2\"] = chanH[\"pos_y1\"]\n if chanSplit[2] == \"Ez\":\n chanH[\"pos_z1\"] = float(dataSplit[4]) / 2\n chanH[\"pos_z2\"] = chanH[\"pos_z1\"]\n\n # append chanHeaders to the list\n chanHeaders.append(chanH)\n\n # check information from raw file headers\n self.headersFromRawFile(headers[\"ats_data_file\"], headers)\n # return the headers and chanHeaders from this file\n return headers, chanHeaders",
"def _read_headers(self):\n # Read the textual header.\n self._read_textual_header()\n # The next 400 bytes are from the Binary File Header.\n binary_file_header = self.file.read(400)\n bfh = SEGYBinaryFileHeader(binary_file_header, self.endian)\n self.binary_file_header = bfh\n self.data_encoding = self.binary_file_header.data_sample_format_code\n # If bytes 3506-3506 are not zero, an extended textual header follows\n # which is not supported so far.\n if bfh.number_of_3200_byte_ext_file_header_records_following != 0:\n msg = 'Extended textual headers are supported yet. ' + \\\n 'Please contact the developers.'\n raise NotImplementedError(msg)",
"def read_headers(filelike):\n return reader.Reader.read_headers(filelike).datafile",
"def _read_header(\n self, header, filename, run_check_acceptability=True, background_lsts=True\n ):\n # get telescope information\n latitude = header[\"latitude\"][()]\n longitude = header[\"longitude\"][()]\n altitude = header[\"altitude\"][()]\n self.telescope_location_lat_lon_alt_degrees = (latitude, longitude, altitude)\n self.instrument = header[\"instrument\"][()].tobytes().decode(\"utf8\")\n self.telescope_name = header[\"telescope_name\"][()].tobytes().decode(\"utf8\")\n\n # get source information\n self.object_name = header[\"object_name\"][()].tobytes().decode(\"utf8\")\n\n # set history appropriately\n self.history = header[\"history\"][()].tobytes().decode(\"utf8\")\n if not uvutils._check_history_version(self.history, self.pyuvdata_version_str):\n self.history += self.pyuvdata_version_str\n\n # check for vis_units\n if \"vis_units\" in header:\n self.vis_units = header[\"vis_units\"][()].tobytes().decode(\"utf8\")\n else:\n # default to uncalibrated data\n self.vis_units = \"UNCALIB\"\n\n # check for optional values\n if \"dut1\" in header:\n self.dut1 = float(header[\"dut1\"][()])\n if \"earth_omega\" in header:\n self.earth_omega = float(header[\"earth_omega\"][()])\n if \"gst0\" in header:\n self.gst0 = float(header[\"gst0\"][()])\n if \"rdate\" in header:\n self.rdate = header[\"rdate\"][()].tobytes().decode(\"utf8\")\n if \"timesys\" in header:\n self.timesys = header[\"timesys\"][()].tobytes().decode(\"utf8\")\n if \"x_orientation\" in header:\n self.x_orientation = header[\"x_orientation\"][()].tobytes().decode(\"utf8\")\n if \"blt_order\" in header:\n blt_order_str = header[\"blt_order\"][()].tobytes().decode(\"utf8\")\n self.blt_order = tuple(blt_order_str.split(\", \"))\n if self.blt_order == (\"bda\",):\n self._blt_order.form = (1,)\n\n if \"antenna_diameters\" in header:\n self.antenna_diameters = header[\"antenna_diameters\"][()]\n if \"uvplane_reference_time\" in header:\n self.uvplane_reference_time = int(header[\"uvplane_reference_time\"][()])\n if \"eq_coeffs\" in header:\n self.eq_coeffs = header[\"eq_coeffs\"][()]\n if \"eq_coeffs_convention\" in header:\n self.eq_coeffs_convention = (\n header[\"eq_coeffs_convention\"][()].tobytes().decode(\"utf8\")\n )\n\n # check for phasing information\n self.phase_type = header[\"phase_type\"][()].tobytes().decode(\"utf8\")\n if self.phase_type == \"phased\":\n self._set_phased()\n self.phase_center_ra = float(header[\"phase_center_ra\"][()])\n self.phase_center_dec = float(header[\"phase_center_dec\"][()])\n self.phase_center_epoch = float(header[\"phase_center_epoch\"][()])\n if \"phase_center_frame\" in header:\n self.phase_center_frame = (\n header[\"phase_center_frame\"][()].tobytes().decode(\"utf8\")\n )\n elif self.phase_type == \"drift\":\n self._set_drift()\n else:\n self._set_unknown_phase_type()\n\n # get antenna arrays\n # cast to native python int type\n self.Nants_data = int(header[\"Nants_data\"][()])\n self.Nants_telescope = int(header[\"Nants_telescope\"][()])\n self.ant_1_array = header[\"ant_1_array\"][:]\n self.ant_2_array = header[\"ant_2_array\"][:]\n self.antenna_names = [\n n.tobytes().decode(\"utf8\") for n in header[\"antenna_names\"][:]\n ]\n self.antenna_numbers = header[\"antenna_numbers\"][:]\n self.antenna_positions = header[\"antenna_positions\"][:]\n\n # set telescope params\n try:\n self.set_telescope_params()\n except ValueError as ve:\n warnings.warn(str(ve))\n\n # get baseline array\n self.baseline_array = self.antnums_to_baseline(\n self.ant_1_array, self.ant_2_array\n )\n self.Nbls = len(np.unique(self.baseline_array))\n\n # get uvw array\n self.uvw_array = header[\"uvw_array\"][:, :]\n\n # get time information\n self.time_array = header[\"time_array\"][:]\n integration_time = header[\"integration_time\"]\n self.integration_time = integration_time[:]\n proc = None\n if \"lst_array\" in header:\n self.lst_array = header[\"lst_array\"][:]\n # check that lst_array in file is self-consistent\n if run_check_acceptability:\n (\n latitude,\n longitude,\n altitude,\n ) = self.telescope_location_lat_lon_alt_degrees\n lst_array = uvutils.get_lst_for_time(\n self.time_array, latitude, longitude, altitude\n )\n if not np.all(\n np.isclose(\n self.lst_array,\n lst_array,\n rtol=self._lst_array.tols[0],\n atol=self._lst_array.tols[1],\n )\n ):\n warnings.warn(\n \"LST values stored in {file} are not self-consistent \"\n \"with time_array and telescope location. Consider \"\n \"recomputing with utils.get_lst_for_time.\".format(file=filename)\n )\n else:\n # compute lst_array from time_array and telescope location\n proc = self.set_lsts_from_time_array(background=background_lsts)\n\n # get frequency information\n self.freq_array = header[\"freq_array\"][:, :]\n self.channel_width = float(header[\"channel_width\"][()])\n self.spw_array = header[\"spw_array\"][:]\n\n # get polarization information\n self.polarization_array = header[\"polarization_array\"][:]\n\n # get data shapes\n self.Nfreqs = int(header[\"Nfreqs\"][()])\n self.Npols = int(header[\"Npols\"][()])\n self.Ntimes = int(header[\"Ntimes\"][()])\n self.Nblts = int(header[\"Nblts\"][()])\n self.Nspws = int(header[\"Nspws\"][()])\n\n # get extra_keywords\n if \"extra_keywords\" in header:\n self.extra_keywords = {}\n for key in header[\"extra_keywords\"].keys():\n if header[\"extra_keywords\"][key].dtype.type in (np.string_, np.object_):\n self.extra_keywords[key] = (\n header[\"extra_keywords\"][key][()].tobytes().decode(\"utf8\")\n )\n else:\n self.extra_keywords[key] = header[\"extra_keywords\"][key][()]\n\n if proc is not None:\n # if lsts are in the background wait for them to return\n proc.join()\n\n return",
"def _read_header(self, stream):\n return",
"def _readCommonHeader(self):\n for i in range(self.ignore_header_lines):\n self.ignored_header_lines.append(nappy.utils.text_parser.readItemFromLine(self.file.readline()))\n \n self._readTopLine()\n self.ONAME = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n self.ORG = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n self.SNAME = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n self.MNAME = nappy.utils.text_parser.readItemFromLine(self.file.readline(), str)\n (self.IVOL, self.NVOL) = nappy.utils.text_parser.readItemsFromLine(self.file.readline(), 2, int)\n dates = nappy.utils.text_parser.readItemsFromLine(self.file.readline(), 6, int)\n (self.DATE, self.RDATE) = (dates[:3], dates[3:])\n self.NLHEAD += self.ignore_header_lines",
"def parse_headers(self):\n\n logger.debug(f\"parse headers of {self.path}\")\n with open(self.path, 'rb') as f:\n parser = BinaryParser(f)\n magic, version_major, version_minor = parser.unpack(\"<2sBB\")\n if magic != b'RW':\n raise ValueError(\"invalid magic code\")\n self.version = (version_major, version_minor)\n\n if version_major == 1:\n parser.seek(8)\n elif version_major == 2:\n parser.seek(100)\n elif version_major == 3:\n parser.seek(268)\n else:\n raise ValueError(f\"unsupported WAD version: {version_major}.{version_minor}\")\n\n entry_count, = parser.unpack(\"<I\")\n\n if version_major == 1:\n self.files = [WadFileHeader(*parser.unpack(\"<QIIII\")) for _ in range(entry_count)]\n else:\n self.files = [WadFileHeader(*parser.unpack(\"<QIIIBBBBQ\")) for _ in range(entry_count)]",
"def fix_header(file_path):\n logging.warning(\"Couldn't open edf {}. Trying to fix the header ...\".format(file_path))\n f = open(file_path, 'rb')\n content = f.read()\n f.close()\n \n header = content[:256]\n # print(header)\n\n # version = header[:8].decode('ascii')\n # patient_id = header[8:88].decode('ascii')\n # [age] = re.findall(\"Age:(\\d+)\", patient_id)\n # [sex] = re.findall(\"\\s\\w\\s\", patient_id)\n\n recording_id = header[88:168].decode('ascii')\n # startdate = header[168:176]\n # starttime = header[176:184]\n # n_bytes_in_header = header[184:192].decode('ascii')\n # reserved = header[192:236].decode('ascii')\n # THIS IS MESSED UP IN THE HEADER DESCRIPTION\n # duration = header[236:244].decode('ascii')\n # n_data_records = header[244:252].decode('ascii')\n # n_signals = header[252:].decode('ascii')\n \n date = recording_id[10:21]\n day, month, year = date.split('-')\n if month == 'JAN':\n month = '01'\n\n elif month == 'FEB':\n month = '02'\n\n elif month == 'MAR':\n month = '03'\n\n elif month == 'APR':\n month = '04'\n\n elif month == 'MAY':\n month = '05'\n\n elif month == 'JUN':\n month = '06'\n\n elif month == 'JUL':\n month = '07'\n\n elif month == 'AUG':\n month = '08'\n\n elif month == 'SEP':\n month = '09'\n\n elif month == 'OCT':\n month = '10'\n\n elif month == 'NOV':\n month = '11'\n\n elif month == 'DEC':\n month = '12'\n\n year = year[-2:]\n date = '.'.join([day, month, year])\n \n fake_time = '00.00.00'\n \n # n_bytes = int(n_bytes_in_header) - 256\n # n_signals = int(n_bytes / 256)\n # n_signals = str(n_signals) + ' '\n # n_signals = n_signals[:4]\n \n # new_header = version + patient_id + recording_id + date + fake_time + n_bytes_in_header + reserved +\n # new_header += n_data_records + duration + n_signals\n # new_content = (bytes(new_header, encoding=\"ascii\") + content[256:])\n\n new_content = header[:168] + bytes(date + fake_time, encoding=\"ascii\") + header[184:] + content[256:]\n\n # f = open(file_path, 'wb')\n # f.write(new_content)\n # f.close()",
"def _read_header(self):\n f = self._open(self.filename, 'rb')\n idx = 0\n header = b''\n # reading the header \n while idx < 13: \n header += f.readline().rstrip() # removes the \"\\n\\r\" at the end\n idx += 1\n # \"magically\" compute the data offset\n try:\n self._offset_auto = ord(header[2]) + 1856\n except:\n self._offset_auto = header[2] + 1856\n\n\n\n header = header[:self._offset_auto+300] # add an extra random header for offset\n header = re.sub(r'(?P<section>\\[[^\\]]+\\])', '\\n\\g<section>', header.decode('latin1'))\n header = header.splitlines()[1:]\n self.header = dict([self._header_sect2dict(line) for line in header])\n self.shape = np.array(self.header['Acquisition']['areGRBScan'].split(',')[-2:]).astype(np.int)\n f.close()\n\n offset_list = {'auto': self._offset_auto,\n 'from_end': -np.prod(self.shape)*self._nbytes,\n 'from_end_4k': - np.prod(self.shape)*self._nbytes - 4092}\n\n if self._offset_input in offset_list:\n\n self._offset_data = offset_list[self._offset_input]\n if self._offset_input.startswith('from_end'):\n # set the flag to seek from the end of the file.\n self._offset_whence = 2\n elif type(self._offset_input) is int:\n self._offset_data = self._offset_input\n else:\n raise ValueError\n\n \n\n return self.header",
"def getNexHeader( nexFile ):\r\n\tnexHeader = []\r\n\tnexIn = open(nexFile,'r')\r\n\treadFile = nexIn.read()\r\n\tfor line in readFile.splitlines(True):\r\n\t\tif \"nexus\" in line:\r\n\t\t\tnexHeader.append(line)\r\n\t\tif \"NEXUS\" in line:\r\n\t\t\tnexHeader.append(line)\r\n\t\tif \"begin data\" in line:\r\n\t\t\tnexHeader.append(line)\r\n\t\tif \"dimensions\" in line:\r\n\t\t\tnexHeader.append(line)\r\n\t\tif \"format\" in line:\r\n\t\t\tnexHeader.append(line)\r\n\t\tif \"matrix\" in line:\r\n\t\t\tnexHeader.append(line)\r\n\tnexIn.close()\r\n\treturn nexHeader",
"def read_headers(input_file):\n\n with open(input_file+'.hdr','r') as f:\n return [float(h) if not h.isalpha() else h for h in [l.split()[1] for l in f.readlines()]] #isdigit() does not catch floats",
"def readFrom(self,fn):\n hdrs = {}\n try:\n f = open(fn+\".headers\",\"tr\")\n for l in f:\n if l[-1:]==\"\\n\":\n l = l[:-1]\n i = l.find(\": \")\n if -1!=i:\n hdrs[l[:i]] = l[i+2:]\n f.close()\n except (Exception,Error) as err:\n log(\"readFrom: header: error: \"+str(err))\n try:\n f2 = open(fn,\"br\")\n data = f2.read()\n f2.close()\n except (Exception,Error) as err:\n log(\"readFrom: body: error: \"+str(err))\n return (hdrs,data)",
"def headersFromRawFile(self, rawFile: str, headers: Dict) -> None:\n dFile = open(os.path.join(self.dataPath, rawFile), \"r\", encoding=\"ISO-8859-1\")\n generalHeaderString = dFile.read(1000) # this should be long enough\n generalSplit = generalHeaderString.split()\n # read GENERAL HEADER\n generalHeader = {}\n generalHeader[\"recLength\"] = int(generalSplit[0])\n generalHeader[\"fileType\"] = generalSplit[1]\n generalHeader[\"wordLength\"] = int(generalSplit[2])\n generalHeader[\"version\"] = generalSplit[3]\n generalHeader[\"procId\"] = generalSplit[4]\n generalHeader[\"numCh\"] = int(generalSplit[5])\n generalHeader[\"totalRec\"] = int(generalSplit[6])\n generalHeader[\"firstEvent\"] = int(generalSplit[7])\n generalHeader[\"numEvent\"] = int(generalSplit[8])\n generalHeader[\"extend\"] = int(generalSplit[9])\n\n # read EVENT HEADER - there can be multiple of these, but normally only the one\n # Multiple events are largely deprecated. Only a single event is used\n eventHeaders = []\n fileSize = os.path.getsize(os.path.join(self.dataPath, rawFile))\n record = generalHeader[\"firstEvent\"]\n for ir in range(0, generalHeader[\"numEvent\"]):\n seekPt = (record - 1) * generalHeader[\"recLength\"]\n if not seekPt > fileSize:\n # seek from beginning of file\n dFile.seek(seekPt, 0)\n # read extra to make sure\n eventString = dFile.read(1000)\n eventSplit = eventString.split()\n eH = {}\n eH[\"start\"] = int(eventSplit[0])\n eH[\"startms\"] = int(eventSplit[1])\n eH[\"stop\"] = int(eventSplit[2])\n eH[\"stopms\"] = int(eventSplit[3])\n eH[\"cvalue1\"] = float(eventSplit[4])\n eH[\"cvalue2\"] = float(eventSplit[5])\n eH[\"cvalue3\"] = float(eventSplit[6])\n eH[\"EHInfile\"] = int(eventSplit[7])\n eH[\"nextEH\"] = int(eventSplit[8])\n eH[\"previousEH\"] = int(eventSplit[9])\n eH[\"numData\"] = int(eventSplit[10])\n eH[\"startData\"] = int(eventSplit[11])\n eH[\"extended\"] = int(eventSplit[12])\n eventHeaders.append(eH)\n if eH[\"nextEH\"] < generalHeader[\"totalRec\"]:\n record = eH[\"nextEH\"] # set to go to next eH\n else:\n break # otherwise break out of for loops\n # close the data file\n dFile.close()\n # now compare number of samples with that calculated previously\n if eventHeaders[0][\"numData\"] != headers[\"num_samples\"]:\n self.printWarning(\"Data file: {}\".format(dFile))\n self.printWarning(\n \"Number of samples in raw file header {} does not equal that calculated from data {}\".format(\n eventHeaders[0][\"numData\"], headers[\"num_samples\"]\n )\n )\n self.printWarning(\"Number of samples calculated from data will be used\")\n # set the byte offset for the file\n self.dataByteOffset[rawFile] = (\n eventHeaders[0][\"startData\"] - 1\n ) * generalHeader[\"recLength\"]\n self.recChannels[rawFile] = generalHeader[\"numCh\"]",
"def _parse_header(self):\n # read the first bytes from the file\n header = self._stream_handle.read(HEADER_BYTES)\n match = HEADER_MATCHER.match(header)\n if not match:\n raise SampleException(\"File header does not match the header regex\")\n\n # update the state to show we have read the header\n self._increment_state(HEADER_BYTES)",
"def keyFileHeaderReader(keyFileName):\n\n KEYSIGNATURE=2718281 \n SIGNATURE = KEYSIGNATURE\n keyFile = open(keyFileName,'rb')\n headerHeader = array.array('I')\n headerHeader.fromfile(keyFile, 3)\n\n # to handle endianess, read keySignature\n keySignature = headerHeader[0]>>8\n needToSwap=0\n if keySignature != SIGNATURE:\n headerHeader.byteswap()\n needToSwap=1\n\n position = keyFile.tell()\n keyFile.close()\n return [position,needToSwap,headerHeader[1],headerHeader[2]]",
"def read_header(fid):\r\n\r\n # Check 'magic number' at beginning of file to make sure this is an Intan\r\n # Technologies RHD2000 data file.\r\n magic_number, = struct.unpack('<I', fid.read(4)) \r\n if magic_number != int('c6912702', 16): raise Exception('Unrecognized file type.')\r\n\r\n header = {}\r\n # Read version number.\r\n version = {}\r\n (version['major'], version['minor']) = struct.unpack('<hh', fid.read(4)) \r\n header['version'] = version\r\n\r\n print('')\r\n print('Reading Intan Technologies RHD2000 Data File, Version {}.{}'.format(version['major'], version['minor']))\r\n print('')\r\n\r\n freq = {}\r\n\r\n # Read information of sampling rate and amplifier frequency settings.\r\n header['sample_rate'], = struct.unpack('<f', fid.read(4))\r\n (freq['dsp_enabled'], freq['actual_dsp_cutoff_frequency'], freq['actual_lower_bandwidth'], freq['actual_upper_bandwidth'], \r\n freq['desired_dsp_cutoff_frequency'], freq['desired_lower_bandwidth'], freq['desired_upper_bandwidth']) = struct.unpack('<hffffff', fid.read(26))\r\n\r\n\r\n # This tells us if a software 50/60 Hz notch filter was enabled during\r\n # the data acquisition.\r\n notch_filter_mode, = struct.unpack('<h', fid.read(2))\r\n header['notch_filter_frequency'] = 0\r\n if notch_filter_mode == 1:\r\n header['notch_filter_frequency'] = 50\r\n elif notch_filter_mode == 2:\r\n header['notch_filter_frequency'] = 60\r\n freq['notch_filter_frequency'] = header['notch_filter_frequency']\r\n\r\n (freq['desired_impedance_test_frequency'], freq['actual_impedance_test_frequency']) = struct.unpack('<ff', fid.read(8))\r\n\r\n note1 = read_qstring(fid)\r\n note2 = read_qstring(fid)\r\n note3 = read_qstring(fid)\r\n header['notes'] = { 'note1' : note1, 'note2' : note2, 'note3' : note3}\r\n\r\n # If data file is from GUI v1.1 or later, see if temperature sensor data was saved.\r\n header['num_temp_sensor_channels'] = 0\r\n if (version['major'] == 1 and version['minor'] >= 1) or (version['major'] > 1) :\r\n header['num_temp_sensor_channels'], = struct.unpack('<h', fid.read(2))\r\n \r\n # If data file is from GUI v1.3 or later, load eval board mode.\r\n header['eval_board_mode'] = 0\r\n if ((version['major'] == 1) and (version['minor'] >= 3)) or (version['major'] > 1) :\r\n header['eval_board_mode'], = struct.unpack('<h', fid.read(2))\r\n \r\n \r\n header['num_samples_per_data_block'] = 60\r\n # If data file is from v2.0 or later (Intan Recording Controller), load name of digital reference channel\r\n if (version['major'] > 1):\r\n header['reference_channel'] = read_qstring(fid)\r\n header['num_samples_per_data_block'] = 128\r\n\r\n # Place frequency-related information in data structure. (Note: much of this structure is set above)\r\n freq['amplifier_sample_rate'] = header['sample_rate']\r\n freq['aux_input_sample_rate'] = header['sample_rate'] / 4\r\n freq['supply_voltage_sample_rate'] = header['sample_rate'] / header['num_samples_per_data_block']\r\n freq['board_adc_sample_rate'] = header['sample_rate']\r\n freq['board_dig_in_sample_rate'] = header['sample_rate']\r\n\r\n header['frequency_parameters'] = freq\r\n\r\n # Create structure arrays for each type of data channel.\r\n header['spike_triggers'] = []\r\n header['amplifier_channels'] = []\r\n header['aux_input_channels'] = []\r\n header['supply_voltage_channels'] = []\r\n header['board_adc_channels'] = []\r\n header['board_dig_in_channels'] = []\r\n header['board_dig_out_channels'] = []\r\n\r\n # Read signal summary from data file header.\r\n\r\n number_of_signal_groups, = struct.unpack('<h', fid.read(2))\r\n print('n signal groups {}'.format(number_of_signal_groups))\r\n\r\n for signal_group in range(1, number_of_signal_groups + 1):\r\n signal_group_name = read_qstring(fid)\r\n signal_group_prefix = read_qstring(fid)\r\n (signal_group_enabled, signal_group_num_channels, signal_group_num_amp_channels) = struct.unpack('<hhh', fid.read(6))\r\n\r\n if (signal_group_num_channels > 0) and (signal_group_enabled > 0):\r\n for signal_channel in range(0, signal_group_num_channels):\r\n new_channel = {'port_name' : signal_group_name, 'port_prefix' : signal_group_prefix, 'port_number' : signal_group}\r\n new_channel['native_channel_name'] = read_qstring(fid)\r\n new_channel['custom_channel_name'] = read_qstring(fid)\r\n (new_channel['native_order'], new_channel['custom_order'], signal_type, channel_enabled, new_channel['chip_channel'], new_channel['board_stream']) = struct.unpack('<hhhhhh', fid.read(12))\r\n new_trigger_channel = {}\r\n (new_trigger_channel['voltage_trigger_mode'], new_trigger_channel['voltage_threshold'], new_trigger_channel['digital_trigger_channel'], new_trigger_channel['digital_edge_polarity']) = struct.unpack('<hhhh', fid.read(8))\r\n (new_channel['electrode_impedance_magnitude'], new_channel['electrode_impedance_phase']) = struct.unpack('<ff', fid.read(8))\r\n\r\n if channel_enabled:\r\n if signal_type == 0:\r\n header['amplifier_channels'].append(new_channel)\r\n header['spike_triggers'].append(new_trigger_channel)\r\n elif signal_type == 1:\r\n header['aux_input_channels'].append(new_channel)\r\n elif signal_type == 2:\r\n header['supply_voltage_channels'].append(new_channel)\r\n elif signal_type == 3:\r\n header['board_adc_channels'].append(new_channel)\r\n elif signal_type == 4:\r\n header['board_dig_in_channels'].append(new_channel)\r\n elif signal_type == 5:\r\n header['board_dig_out_channels'].append(new_channel)\r\n else:\r\n raise Exception('Unknown channel type.')\r\n \r\n # Summarize contents of data file.\r\n header['num_amplifier_channels'] = len(header['amplifier_channels'])\r\n header['num_aux_input_channels'] = len(header['aux_input_channels'])\r\n header['num_supply_voltage_channels'] = len(header['supply_voltage_channels'])\r\n header['num_board_adc_channels'] = len(header['board_adc_channels'])\r\n header['num_board_dig_in_channels'] = len(header['board_dig_in_channels'])\r\n header['num_board_dig_out_channels'] = len(header['board_dig_out_channels'])\r\n\r\n return header",
"def _read_old_header(self, raw):\n\n byte_count = 0\n\n data_size = 4\n self.label = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.version = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.revision = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 26\n self.date = struct.unpack('<26s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.file_format = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.file_type = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.original_file_name = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.reference_file_name = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_a = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_b = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_c = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.related_file_name_d = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 82\n self.annotate = struct.unpack('<82s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.instrument_model = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.instrument_serial_number = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.software_version_number = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 33\n self.crystal_material = struct.unpack('<33s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.laser_wavelength_microns = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.laser_null_doubling = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.optical_ratio = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.dispersion_constant_xc = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.dispersion_constant_xm = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.dispersion_constant_xb = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.interferogram_size = struct.unpack('<H',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.interferogram_center.append(struct.unpack('<H',\n raw[byte_count:byte_count+data_size])[0])\n byte_count += data_size\n\n data_size = 2\n self.interferogram_center.append(struct.unpack('<H',\n raw[byte_count:byte_count+data_size])[0])\n byte_count += data_size\n\n data_size = 2\n self.acquire_mode = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.emissivity = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.apodization = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.zero_fill = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.run_time_math = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.fft_size = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.number_of_coadds = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 2\n self.number_of_igrams = struct.unpack('<h',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.amb_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.inst_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.wbb_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.cbb_temperature = struct.unpack('<f',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 20\n self.spare_i = struct.unpack('<hhhhhhhhhh',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 40\n self.spare_f = struct.unpack('<ffffffffff',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 40\n self.spare_l = struct.unpack('<ffffffffff',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 65\n self.spare_na = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_nb = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_nc = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_nd = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 65\n self.spare_ne = struct.unpack('<65s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.header_end = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size",
"def test_headers(self):\r\n for path in self.get_files():\r\n po = polib.pofile(path)\r\n header = po.header\r\n self.assertEqual(\r\n header.find('edX translation file'),\r\n 0,\r\n msg='Missing header in %s:\\n\"%s\"' % (os.path.basename(path), header)\r\n )",
"def parse_header(self):",
"def __symbolsHeader(self):\n print \"Reading symbols header...\"\n \tif self.hdr.Version <= 8:\n self.ocad.seek(48)\n self.syhdr = Structure.read('TSymHeader', self.ocad)\n #print \"TSymHeader: \", self.syhdr",
"def _read_header(edf_file):\n read = edf_file.read\n read_ascii = lambda n: read(n).decode('ascii').strip()\n read_int = lambda n: int(read_ascii(n))\n read_float = lambda n: float(read_ascii(n))\n\n version = int(read(8).decode('ascii').strip())\n assert version == 0\n\n header = OrderedDict()\n\n header['local_patient_id'] = read_ascii(80)\n header['local_recording_id'] = read_ascii(80)\n\n unpack_ts = lambda n: [int(x) for x in read_ascii(n).split('.')]\n header['start_date'] = StartDate(*unpack_ts(8))\n header['start_time'] = StartTime(*unpack_ts(8))\n\n header['num_header_bytes'] = read_int(8)\n\n read(44)\n\n header['num_records'] = read_int(8)\n header['seconds_per_record'] = read_int(8)\n header['num_signals'] = nsig = read_int(4)\n\n header['label'] = [read_ascii(16) for _ in range(nsig)]\n header['transducer_type'] = [read_ascii(80) for _ in range(nsig)]\n header['units'] = [read_ascii(8) for _ in range(nsig)]\n header['physical_min'] = np.array([read_float(8) for _ in range(nsig)])\n header['physical_max'] = np.array([read_float(8) for _ in range(nsig)])\n header['digital_min'] = np.array([read_float(8) for _ in range(nsig)])\n header['digital_max'] = np.array([read_float(8) for _ in range(nsig)])\n header['prefiltering'] = [read_ascii(80) for _ in range(nsig)]\n header['samples_per_record'] = np.array([read_int(8) for _ in range(nsig)])\n\n read(32 * nsig)\n\n assert edf_file.tell() == header['num_header_bytes']\n\n return header",
"def GetHeaders(the_file):\n\n data = exifread.process_file(the_file, 'UNDEF', False, False, False)\n return data",
"def read_header(fname):\n with gzip.open(fname, 'rt') as f:\n content = f.readline().split()\n return content[:-1], int(content[-1])",
"def _read_header(self, line):\n try:\n creation_date = datetime.strptime(line[23:33], '%y%m%d%H%M')\n except ValueError as err:\n print('Error parsing file creation date -> ' + str(err))\n creation_date = '000000'\n\n self.file_header = {'Priority Code': line[1:3],\n 'Immediate Destination': line[3:13].strip(),\n 'Immediate Origin': line[13:23].strip(),\n 'Creation Date': creation_date,\n 'File ID Modifier': line[33],\n 'Record Size': int(line[34:37].strip()),\n 'Blocking Factor': int(line[37:39]),\n 'Format Code': line[39],\n 'Immediate Destination Name': line[40:63].strip(),\n 'Immediate Origin Name': line[63:86].strip(),\n 'Reference Code': line[86:93]}",
"def _read_new_header(self, raw):\n\n byte_count = 0\n\n data_size = 4\n self.label = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.version = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.revision = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 28\n self.date = struct.unpack('<28s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.file_format = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.file_type = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.original_file_name = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.reference_file_name = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.related_file_name_a = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.related_file_name_b = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.related_file_name_c = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 84\n self.annotate = struct.unpack('<84s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.instrument_model = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.instrument_serial_number = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.software_version_number = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 36\n self.crystal_material = struct.unpack('<36s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.laser_wavelength_microns = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.laser_null_doubling = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.padding = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.dispersion_constant_xc = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.dispersion_constant_xm = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.dispersion_constant_xb = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.num_chan = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.interferogram_size = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.scan_direction = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.acquire_mode = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.emissivity = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.apodization = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.zero_fill = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.run_time_math = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.fft_size = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.number_of_coadds = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.single_sided = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.chan_display = struct.unpack('<l',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.amb_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.inst_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.wbb_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.cbb_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.temperature_dwr = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.emissivity_dwr = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 8\n self.laser_temperature = struct.unpack('<d',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 40\n self.spare_i = struct.unpack('<llllllllll',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 80\n self.spare_f = struct.unpack('<dddddddddd',\n raw[byte_count:byte_count+data_size])\n byte_count += data_size\n\n data_size = 68\n self.spare_na = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_nb = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_nc = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_nd = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 68\n self.spare_ne = struct.unpack('<68s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size\n\n data_size = 4\n self.header_end = struct.unpack('<4s',\n raw[byte_count:byte_count+data_size])[0]\n byte_count += data_size",
"def read_xtidefile(fid):\n l = fgetl_nocom(fid)\n ncon = sscanf(l, '\\n %d')\n xtide = type('struct', (), {})()\n for k in range(1, (ncon +1)):\n l = fgetl_nocom(fid)\n xtide.name(k, :) = l[0:8]\n xtide.speed(k) = sscanf(l[8:l.shape[0]], '\\n %f')\n xtide.startyear = sscanf(fgetl_nocom(fid), '\\n %d')\n nyear = sscanf(fgetl_nocom(fid), '\\n %d')\n for k in range(1, (ncon +1)):\n l = fgetl(fid)\n xtide.equilibarg(k, :) = fscanf(fid, '\\n %f', nyear)\n l = fgetl(fid)\n l = fgetl(fid)\n # Skip *END*\n nyear = sscanf(fgetl_nocom(fid), '\\n %d')\n for k in range(1, (ncon +1)):\n l = fgetl(fid)\n xtide.nodefactor(k, :) = fscanf(fid, '\\n %f', nyear)\n l = fgetl(fid)\n l = fgetl(fid)\n # Skip *END*\n # Now read in all harmonic data\n #nsta=1754; \n # This is number of stations in harmonics (1998-07-18)\n #nsta=3351; \n # This is number of stations in v1.42 or harmonics file\n nsta = 3316\n # This is number in v1.51\n xharm = type('struct', (), {})()\n nh = 0\n while max(l.shape) > 0 & l[0] != - 1:\n\n l = l + ' '\n nh = nh + 1\n while not l[0:3] == '# !':\n\n l = fgetl(fid) + ' '\n\n while l[0:3] == '# !':\n\n if 'unit' == l[((3:7 -1) -1)]:\n tmp = deblank(l[(findstr(l, ':') + 2 -1):l.shape[0]])\n xharm.units(nh, range(1, (max(tmp.shape) +1))) = tmp\n else:\n if 'long' == l[((3:7 -1) -1)]:\n xharm.longitude(nh) = sscanf(l[(findstr(l, ':') + 1 -1):l.shape[0]], '\\n %f')\n else:\n if 'lati' == l[((3:7 -1) -1)]:\n xharm.latitude(nh) = sscanf(l[(findstr(l, ':') + 1 -1):l.shape[0]], '\\n %f')\n l = fgetl(fid)\n\n tmp = deblank(l)\n if tmp[0] != '#':\n # Not commented out\n xharm.station(nh, range(1, (max(tmp.shape) +1))) = tmp\n tmp = fgetl(fid)\n k = np.min(findstr(tmp, ':'))\n tim = sscanf(tmp[0:k - 1], '\\n %d') + sscanf(tmp[(k + np.array([range(1, 3)]).reshape(1, -1) -1)], '\\n %d') / 60\n xharm.timezone(nh) = tim\n tmp = fgetl(fid)\n xharm.datum(nh) = sscanf(tmp, '\\n %f')\n for k in range(1, (ncon +1)):\n l = fgetl(fid)\n if l[0] != 'x':\n ll = np.min(np.array([findstr(' ', l), np.flatnonzero(abs(l) == 9)]).reshape(1, -1))\n # space or tab\n tmp = sscanf(l[(ll + 1 -1):l.shape[0]], '\\n %f', 2)\n xharm.A(nh, k) = tmp[0]\n xharm.kappa(nh, k) = tmp[1]\n l = fgetl(fid)\n else:\n nh = nh - 1\n if rem(nh, 50) == 0:\n fprintf('.')\n\n fprintf('\\\\n')\n # Convert internally to sparse matrix storage (much smaller).\n xharm.A = sparse(xharm.A)\n xharm.kappa = sparse(xharm.kappa)\n return xtide, xharm\n # \n return xtide, xharm",
"def get_file_headers(hdulist):\n if 'TELESCOP' in hdulist[0].header:\n get_instrume = hdulist[0].header['INSTRUME']\n get_telescop = hdulist[0].header['TELESCOP']\n get_reftype = hdulist[0].header['REFTYPE']\n if get_reftype == \"FLAT\":\n get_reftype = \"_FLAT\"\n return (get_instrume, get_telescop, get_reftype)\n else:\n get_instrume = hdulist[0].header['INSTRUME']\n get_telescop = False\n get_reftype = hdulist[0].header['REFTYPE']\n if get_reftype == \"FLAT\":\n get_reftype = \"_FLAT\"\n return (get_instrume, get_telescop, get_reftype)",
"def read_header(datafile):\n\thead = []\n\tf = open(datafile,'r')\n\tfor i,line in enumerate(f):\n\t\tif i is 10: break\n\t\thead += [line]\n\tf.close()\n\treturn head"
] | [
"0.6655318",
"0.6545926",
"0.6512777",
"0.5899843",
"0.58021617",
"0.5711781",
"0.5699319",
"0.568349",
"0.5606272",
"0.5598395",
"0.5573075",
"0.5572416",
"0.556946",
"0.5562428",
"0.5521986",
"0.55197376",
"0.5510907",
"0.5490756",
"0.54724866",
"0.54674906",
"0.5455055",
"0.54270947",
"0.54230213",
"0.5411136",
"0.540761",
"0.5383771",
"0.53498256",
"0.53241307",
"0.53226995",
"0.53217065"
] | 0.7716968 | 0 |
Merge headers from all the header files Checks all the header files to see if there are any gaps and calculates the sample ranges for each file together with the total number of samples. Sets the start and end time of the recording and class variables datetimeStart and datetimeStop. | def mergeHeaders(self, headersList: List, chanHeadersList: List) -> None:
# take the first header as an example
self.headers = headersList[0]
self.chanHeaders = chanHeadersList[0]
if len(headersList) == 1:
# just fill in the data file list and data ranges
self.dataFileList = [self.headers["ats_data_file"]]
self.dataRanges = [[0, self.headers["num_samples"] - 1]]
self.scalings = []
tmp = {}
for cHeader in self.chanHeaders:
tmp[cHeader["channel_type"]] = cHeader["ts_lsb"]
self.scalings.append(tmp)
return # then there was only one file - no need to do all the below
# make sure that all headers have the same sample rate
# and save the start and stop times and dates
startTimes = []
stopTimes = []
numSamples = []
for idx, header in enumerate(headersList):
if header["sample_freq"] != self.headers["sample_freq"]:
self.printError(
"Not all datasets in {} have the same sample frequency.\nExiting...".format(
self.dataPath
),
quitrun=True,
)
if header["meas_channels"] != self.headers["meas_channels"]:
self.printError(
"Not all datasets in {} have the same number of channels.\nExiting...".format(
self.dataPath
),
quitrun=True,
)
# now store startTimes, stopTimes and numSamples
# do this as datetimes, will be easier
startString = "{} {}".format(header["start_date"], header["start_time"])
stopString = "{} {}".format(header["stop_date"], header["stop_time"])
datetimeStart = datetime.strptime(startString, "%Y-%m-%d %H:%M:%S.%f")
datetimeStop = datetime.strptime(stopString, "%Y-%m-%d %H:%M:%S.%f")
startTimes.append(datetimeStart)
stopTimes.append(datetimeStop)
numSamples.append(header["num_samples"])
# check the start and end times
sampleTime = timedelta(seconds=1.0 / self.headers["sample_freq"])
# sort by start times
sortIndices = sorted(list(range(len(startTimes))), key=lambda k: startTimes[k])
# now sort stop times by the same indices
check = True
for i in range(1, self.numHeaderFiles):
# get the stop time of the previous dataset
stopTimePrev = stopTimes[sortIndices[i - 1]]
startTimeNow = startTimes[sortIndices[i]]
if startTimeNow != stopTimePrev + sampleTime:
self.printWarning(
"There is a gap between the datafiles in {}".format(self.dataPath)
)
self.printWarning(
"Please separate out datasets with gaps into separate folders"
)
# print out where the gap was found
self.printWarning("Gap found between datafiles:")
self.printWarning(
"1. {}".format(headersList[sortIndices[i - 1]]["ats_data_file"])
)
self.printWarning(
"2. {}".format(headersList[sortIndices[i]]["ats_data_file"])
)
# set check as false
check = False
# if did not pass check, then exit
if not check:
self.printError(
"Gaps in data. All data for a single recording must be continuous. Exiting...",
quitrun=True,
)
# make sure there are no gaps
totalSamples = sum(numSamples)
# get a list of all the datafiles, scalings and the sample ranges
self.dataFileList = []
self.dataRanges = []
self.scalings = []
sample = -1
# now need some sort of lookup table to say where the sample ranges are
for i in range(0, self.numHeaderFiles):
iSort = sortIndices[i] # get the sorted index
self.dataFileList.append(headersList[iSort]["ats_data_file"])
startSample = sample + 1
endSample = (
startSample + numSamples[iSort] - 1
) # -1 because this is inclusive of the start sample
self.dataRanges.append([startSample, endSample])
# increment sample
sample = endSample
# save the scalings for each chan
tmp = {}
for cHeader in self.chanHeadersList[iSort]:
tmp[cHeader["channel_type"]] = cHeader["ts_lsb"]
self.scalings.append(tmp)
# now set the LSB information for the chanHeaders
# i.e. if they change, this should reflect that
for i in range(0, len(self.chanHeaders)):
chan = self.chanHeaders[i]["channel_type"]
lsbSet = set()
for scalar in self.scalings:
lsbSet.add(scalar[chan])
if len(lsbSet) == 1:
self.chanHeaders[i]["ts_lsb"] = list(lsbSet)[0]
else:
self.printWarning(
"Multiple different LSB values found for chan {}: {}".format(
chan, list(lsbSet)
)
)
self.printWarning(
"This is handled, but the header information given will show only a single LSB value"
)
self.chanHeaders[i]["ts_lsb"] = list(lsbSet)[0]
# set start and end time for headers and chan headers
# do the same with number of samples
datetimeStart = min(startTimes)
datetimeStop = max(stopTimes)
self.headers["start_date"] = datetimeStart.strftime("%Y-%m-%d")
self.headers["start_time"] = datetimeStart.strftime("%H:%M:%S.%f")
self.headers["stop_date"] = datetimeStop.strftime("%Y-%m-%d")
self.headers["stop_time"] = datetimeStop.strftime("%H:%M:%S.%f")
self.headers["num_samples"] = totalSamples
# set datafiles = the whole list of datafiles
self.headers["ats_data_file"] = self.dataFileList
for iChan in range(0, len(self.chanHeaders)):
self.chanHeaders[iChan]["start_date"] = datetimeStart.strftime("%Y-%m-%d")
self.chanHeaders[iChan]["start_time"] = datetimeStart.strftime(
"%H:%M:%S.%f"
)
self.chanHeaders[iChan]["stop_date"] = datetimeStop.strftime("%Y-%m-%d")
self.chanHeaders[iChan]["stop_time"] = datetimeStop.strftime("%H:%M:%S.%f")
self.chanHeaders[iChan]["num_samples"] = totalSamples
self.chanHeaders[iChan]["ats_data_file"] = self.dataFileList | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def readHeaderXTR(self, headerFile: str) -> None:\n with open(headerFile, \"r\") as f:\n lines = f.readlines()\n sectionLines = {}\n # let's get data\n for line in lines:\n line = line.strip()\n line = line.replace(\"'\", \" \")\n # continue if line is empty\n if line == \"\":\n continue\n if \"[\" in line:\n sec = line[1:-1]\n sectionLines[sec] = []\n else:\n sectionLines[sec].append(line)\n # the base class is built around a set of headers based on ATS headers\n # though this is a bit more work here, it saves lots of code repetition\n headers = {}\n # recording information (start_time, start_date, stop_time, stop_date, ats_data_file)\n fileLine = sectionLines[\"FILE\"][0]\n fileSplit = fileLine.split()\n headers[\"sample_freq\"] = np.absolute(float(fileSplit[-1]))\n timeLine = sectionLines[\"FILE\"][2]\n timeSplit = timeLine.split()\n # these are the unix time stamps\n startDate = float(timeSplit[1] + \".\" + timeSplit[2])\n datetimeStart = datetime.utcfromtimestamp(startDate)\n stopDate = float(timeSplit[3] + \".\" + timeSplit[4])\n datetimeStop = datetime.utcfromtimestamp(stopDate)\n headers[\"start_date\"] = datetimeStart.strftime(\"%Y-%m-%d\")\n headers[\"start_time\"] = datetimeStart.strftime(\"%H:%M:%S.%f\")\n headers[\"stop_date\"] = datetimeStop.strftime(\"%Y-%m-%d\")\n headers[\"stop_time\"] = datetimeStop.strftime(\"%H:%M:%S.%f\")\n # here calculate number of samples\n deltaSeconds = (datetimeStop - datetimeStart).total_seconds()\n # calculate number of samples - have to add one because the time given in SPAM recording is the actual time of the last sample\n numSamples = int(deltaSeconds * headers[\"sample_freq\"]) + 1\n # put these in headers for ease of future calculations in merge headers\n headers[\"num_samples\"] = numSamples\n # spam datasets only have the one data file for all channels\n headers[\"ats_data_file\"] = fileSplit[1]\n # data information (meas_channels, sample_freq)\n chanLine = sectionLines[\"CHANNAME\"][0]\n # this gets reformatted to an int later\n headers[\"meas_channels\"] = chanLine.split()[1]\n numChansInt = int(headers[\"meas_channels\"])\n # deal with the channel headers\n chanHeaders = []\n for iChan in range(0, numChansInt):\n chanH = self.chanDefaults()\n # set the sample frequency from the main headers\n chanH[\"sample_freq\"] = headers[\"sample_freq\"]\n # line data - read through the data in the correct channel order\n chanLine = sectionLines[\"CHANNAME\"][iChan + 1]\n chanSplit = chanLine.split()\n dataLine = sectionLines[\"DATA\"][iChan + 1]\n dataSplit = dataLine.split()\n # channel input information (gain_stage1, gain_stage2, hchopper, echopper)\n chanH[\"gain_stage1\"] = 1\n chanH[\"gain_stage2\"] = 1\n # channel output information (sensor_type, channel_type, ts_lsb, pos_x1, pos_x2, pos_y1, pos_y2, pos_z1, pos_z2, sensor_sernum)\n chanH[\"ats_data_file\"] = fileSplit[1]\n chanH[\"num_samples\"] = numSamples\n\n # channel information\n # spams often use Bx, By - use H within the software as a whole\n chanH[\"channel_type\"] = consistentChans(chanSplit[2])\n # the sensor number is a bit of a hack - want MFSXXe or something - add MFS in front of the sensor number - this is liable to break\n # at the same time, set the chopper\n calLine = sectionLines[\"200{}003\".format(iChan + 1)][0]\n calSplit = calLine.split()\n if isMagnetic(chanH[\"channel_type\"]):\n chanH[\"sensor_sernum\"] = calSplit[\n 2\n ] # the last three digits is the serial number\n sensorType = calSplit[1].split(\"_\")[1][-2:]\n chanH[\"sensor_type\"] = \"MFS{:02d}\".format(int(sensorType))\n if \"LF\" in calSplit[1]:\n chanH[\"hchopper\"] = 1\n else:\n chanH[\"sensor_type\"] = \"ELC00\"\n if \"LF\" in calLine:\n chanH[\"echopper\"] = 1\n\n # data is raw voltage of sensors\n # both E and H fields need polarity reversal (from email with Reinhard)\n # get scaling from headers\n scaling = float(dataSplit[-2])\n if isElectric(chanH[\"channel_type\"]):\n # the factor of 1000 is not entirely clear\n lsb = 1000.0 * scaling\n # volts to millivolts and a minus to switch polarity giving data in mV\n lsb = -1000.0 * lsb\n else:\n # volts to millivolts and a minus to switch polarity giving data in mV\n # scaling in header file is ignored because it duplicates static gain correction in calibration\n lsb = -1000.0\n chanH[\"ts_lsb\"] = lsb\n\n # the distances\n if chanSplit[2] == \"Ex\":\n chanH[\"pos_x1\"] = float(dataSplit[4]) / 2\n chanH[\"pos_x2\"] = chanH[\"pos_x1\"]\n if chanSplit[2] == \"Ey\":\n chanH[\"pos_y1\"] = float(dataSplit[4]) / 2\n chanH[\"pos_y2\"] = chanH[\"pos_y1\"]\n if chanSplit[2] == \"Ez\":\n chanH[\"pos_z1\"] = float(dataSplit[4]) / 2\n chanH[\"pos_z2\"] = chanH[\"pos_z1\"]\n\n # append chanHeaders to the list\n chanHeaders.append(chanH)\n\n # check information from raw file headers\n self.headersFromRawFile(headers[\"ats_data_file\"], headers)\n # return the headers and chanHeaders from this file\n return headers, chanHeaders",
"def readHeader(self) -> None:\n # read header files\n self.headersList = []\n self.chanHeadersList = []\n for headerFile in self.headerF:\n if \"xtrx\" in headerFile.lower():\n headers, chanHeaders = self.readHeaderXTRX(headerFile)\n else:\n headers, chanHeaders = self.readHeaderXTR(headerFile)\n self.headersList.append(headers)\n self.chanHeadersList.append(chanHeaders)\n\n # check to make sure no gaps, calculate out the sample ranges and list the data files for each sample\n self.mergeHeaders(self.headersList, self.chanHeadersList)",
"def headers(self, min_rt=None, max_rt=None, ms_level=None, polarity=None, **kwargs):\n \n # iterate through file\n for evt, elm in etree.iterparse(self.path, ('end',)):\n \n # retrieve instrument configs\n if elm.tag == self._prefix+'instrumentConfigurationList':\n self._retrieve_instrument_configurations(elm)\n \n # process spectrum data\n if elm.tag == self._prefix+'spectrum':\n \n # init scan data container\n scan_data = self._make_template()\n \n # retrieve raw header data\n self._retrieve_header_data(elm, scan_data)\n \n # check raw header data\n if not self._check_header_data(scan_data, min_rt, max_rt, ms_level, polarity):\n elm.clear()\n continue\n \n # free memory\n elm.clear()\n \n # create scan header\n yield self._make_header(scan_data)",
"def read_input_files(self):\r\n\r\n for input_file in self.list_of_input_files:\r\n input_file.read_header_of_file()\r\n self.list_of_header_objects.extend(input_file.list_of_header_objects)\r\n self.list_of_header_objects_without_ID.extend(input_file.list_of_header_objects_without_ID)\r\n self.list_of_contigs.extend(input_file.list_of_contigs)\r\n\r\n self.list_of_header_objects = list(toolz.unique(self.list_of_header_objects, key=lambda x: x.tag_and_ID))\r\n self.list_of_header_objects_without_ID = list(\r\n toolz.unique(self.list_of_header_objects_without_ID, key=lambda x: x.line))\r\n self.list_of_contigs = list(toolz.unique(self.list_of_contigs, key=lambda x: x.line))\r\n self.list_of_header_objects.extend(self.list_of_header_objects_without_ID)\r\n self.list_of_header_objects.sort(key=lambda x: x.line)\r\n self.list_of_header_objects.extend(self.list_of_contigs)\r\n self.list_of_header_objects.sort(key=lambda x: x.tag, reverse=False)\r\n self.create_body_header_line_for_output()\r\n self.write_header_in_output_file()\r\n\r\n list_of_chrom = list(self.indices.keys())\r\n list_of_chrom.sort(key=lambda x: self.alphanum_key(x))\r\n for chrom in list_of_chrom:\r\n self.list_of_body_objects.clear()\r\n for input_file in self.list_of_input_files:\r\n input_file.read_specific_chrom_body_of_file(chrom)\r\n self.list_of_body_objects.extend(input_file.list_of_body_objects)\r\n\r\n self.adjust_body_records_to_samples()\r\n self.list_of_body_objects = list(toolz.unique(self.list_of_body_objects, key=lambda x: x.line))\r\n self.list_of_body_objects.sort(key=lambda x: self.alphanum_key(x.line))\r\n self.verify_and_merge_body_records()\r\n self.write_specific_chrom_in_output_file()",
"def fix_headers(hParams,testMode=False):\n \n \n fileList = glob.glob(hParams['fileList'])\n for oneFile in fileList:\n with fits.open(oneFile,'update') as HDUList_orig:\n if testMode == True:\n print(\"Doing a dry run without modifying headers\")\n HDUList = fits.HDUList([fits.PrimaryHDU(None,header=HDUList_orig[0].header)])\n primHead = HDUList[0].header\n else:\n primHead = HDUList_orig[0].header\n\n colcorner = hParams['COLCORNR'][primHead['SCA_ID']]\n rowcorner = hParams['ROWCORNR'][primHead['SCA_ID']]\n \n detTiming = pynrc.pynrc_core.DetectorOps(detector=481,\n wind_mode=hParams['wind_mode'],\n xpix=hParams['xpix'],\n ypix=hParams['ypix'],\n x0=colcorner-1,\n y0=rowcorner-1,\n nint=hParams['nint'],\n ngroup=hParams['ngroup'],\n nf=hParams['nf'])\n correctHead = detTiming.make_header()\n\n obsId = primHead['OBS_ID']\n if obsId in hParams['expStart'].keys():\n expStart = hParams['expStart'][obsId]\n date, time = expStart.split('T')\n primHead['DATE-OBS'] = date\n primHead['TIME-OBS'] = time\n \n t_expStart = Time(expStart)\n t_expEnd = t_expStart + correctHead['EXPTIME'] * u.second\n expEnd = t_expEnd.fits\n date, time = expEnd.split('T')\n primHead['DATE-END'] = date\n primHead['TIME-END'] = time\n else:\n print(\"Couldn't find exp start for {}\".format(obsId))\n \n\n for oneKey in ['TFRAME','TGROUP','INTTIME','EXPTIME',\n 'TREFROW','BREFROW','LREFCOL','RREFCOL',\n 'COLCORNR','ROWCORNR']:\n primHead[oneKey] = correctHead[oneKey]\n \n if hParams['wind_mode'] == 'WINDOW':\n primHead['HWINMODE'] = 'ENABLE'\n else:\n primHead['HWINMODE'] = 'DISABLE'\n primHead['DETECTOR'] = detectorDict[primHead['SCA_ID']]\n \n primHead['TLDYNEID'] = hParams['teledyneID'][primHead['SCA_ID']]\n if testMode == True:\n pdb.set_trace()",
"def merge_all_data(self):\n\n logging.info('***** Starting the merging process merge_all_data')\n\n \"\"\" All possible unique_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n date_times = np.array(date_times) \n\n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, = [] , [] , [] , [] , []\n best_ds_list = [] \n source_files = []\n station_configurations = []\n\n \"\"\" The items contained in the lists in the list below can be removed from the list when the record that was previously stored is removed. \"\"\"\n all_list = [all_combined_obs , all_combined_head, all_combined_era5fb , combined_indices , combined_date_time, best_ds_list, source_files , station_configurations ] # holder of all the above lists\n all_list_name = ['all_combined_obs' , 'all_combined_head', 'all_combined_era5fb' , 'combined_indices' , 'combined_date_time' , 'best_ds_list', 'source_files' ] \n \n removed_record, kept_record = [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #dt_bestds_dic = {} # store the selected best dataset for each dt \n #date_times=date_times[0:30000]\n tot = len(date_times)\n tt=time.time()\n print('*** Merging ' , tot, ' records ***')\n \n early_datasets = True\n \n self.processed_dt = [] \n \n for dt, c in zip(date_times, range(tot) ): # loop over all the possible date_times \n\n if (c+1)%1000==0:\n print('Analize : ', str(c+1) , '/', str(tot) , ' ', dt , ' ',\n now(time.time()),'{:5.3f}'.format(time.time()-tt ))\n\n delete = self.delete_ds(dt) # check if there is a dataset to delete \n \n \"\"\" Finding if this record is the same as the previous one analyzed, according to the given time_shift \"\"\"\n if c == 0:\n is_same_record = False\n else:\n is_same_record = self.is_same_record( time_shift = self.hour_time_delta , dt = dt)\n \n \"\"\" Updating list of processed datetimes \"\"\"\n self.processed_dt.append(dt) # cannot put it before the check_timeshift or it will check itself \n\n \n cleaned_df_container = {} \n all_len = [] # will hold the length of all the obs_tabs \n \n for k in self.dataset_per_dt[dt].keys() : # checking the list of available datasets \n ''' {'era5_2': ['example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._1:82930.gz.nc', \n 'example_stations/0-20000-0-82930_era5_2_harvested_era5.conv._82930.gz.nc']}\n ''' \n for F in self.dataset_per_dt[dt][k]: # checking the list of available files for the dataset\n \n if data[k][F][\"counter\"] %self.slice_size==0 or data[k][F][\"counter\"] == 0: # loading the data only at specific slices \n load = self.load_obstab_feedback_sliced(datetime=dt, dataset=k, file = F)\n \n data[k][F][\"counter\"] = data[k][F][\"counter\"] + 1 \n \n obs_tab, era5fb_tab = self.make_obstab_era5fb_dic(dataset = k , date_time = dt, File = F )\n\n if len(obs_tab['date_time'][:])==0: # go to next file if obs_tab is empty \n #print('ZERO length')\n continue \n\n all_len.append( len(obs_tab['date_time'][:] ) )\n \n if k not in cleaned_df_container.keys():\n cleaned_df_container[k] = {}\n\n cleaned_df_container[k][F] = {}\n cleaned_df_container[k][F]['obs_tab'] = obs_tab # cleaned dataframe \n cleaned_df_container[k][F]['era5fb_tab'] = era5fb_tab # cleaned dataframe \n \n \"\"\" Merging the different records found in the sifferent sources \"\"\"\n if bool(all_len): # skipping empty container dictionary. At this point I certainyl have one valid record \n best_ds, combined_obs_tab, combined_era5fb_tab, combined_head_tab, selected_file, best_file = self.combine_record(dt, container = cleaned_df_container)\n \n if is_same_record: # decide what to keep in case of same record\n temporary_previous = all_combined_obs[-1] # keep the temporary previous record \n\n if best_ds in ['era5_1','era5_2']: # best_ds from era5\n if best_ds_list[-1] not in ['era5_1','era5_2']: # remove previous non era5_1 or era5_2 record \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n\n elif best_ds_list[-1] in ['era5_1','era5_2']:\n if len(combined_obs_tab) <= len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab)\n continue # nothing to do, will keep the previous records -> go to next dt \n \n else: # case where both the current and previous are from era5_1 and era5_2, but the previous has smaller number of data \n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # best_ds not from era5\n if best_ds_list[-1] in ['era5_1','era5_2']:\n #print('This best ds is ' , best_ds , ' but I will keep ' , best_ds_list[-1] )\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else:\n if len(combined_obs_tab) < len(all_combined_obs[-1] ):\n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue # nothing to do, will keep the previous records -> go to next dt \n \n elif len(combined_obs_tab) > len(all_combined_obs[-1] ): # remove previous, keep current \n for lista in all_list:\n lista.pop() \n #kept_record.append(combined_obs_tab) \n #removed_record.append(temporary_previous)\n \n elif len(combined_obs_tab) == len(all_combined_obs[-1] ): # prefer igra2, otherwise\n if best_ds == 'igra2':\n for lista in all_list:\n lista.pop() \n #removed_record.append(temporary_previous)\n #kept_record.append(combined_obs_tab) \n \n else: # case where data source is not important, I keep the previous and do nothing \n #kept_record.append(temporary_previous) \n #removed_record.append(combined_obs_tab) \n continue \n \n else: # not the same record, nothing special to do, keep both previous and current \n pass \n else:\n print(' Found an empty record / time shifted record ')\n continue\n \n\n \"\"\" Fill the best_ds list \"\"\"\n best_ds_list.append(best_ds)\n\n \"\"\" Storing the selected file for the source_configuration \"\"\"\n source_files.append(selected_file)\n \"\"\" Selecting the station_configuration \"\"\"\n station_configurations.append(self.data[best_ds][best_file]['station_configuration'] )\n \n \"\"\" Storing the combined era5fb, header and observations tables\"\"\"\n all_combined_era5fb.append(combined_era5fb_tab)\n all_combined_obs .append(combined_obs_tab)\n \n primary, name = self.data[best_ds][best_file]['station_configuration']['primary_id'][0] , self.data[best_ds][best_file]['station_configuration']['station_name'][0] \n #combined_head_tab['primary_station_id'] = [ primary ] * len( combined_head_tab ) \n #combined_head_tab['station_name'] = [ name ] * len( combined_head_tab ) \n \n combined_head_tab['primary_station_id'] = np.array( [primary] )\n combined_head_tab['station_name'] = np.array( [name] )\n \n all_combined_head .append(combined_head_tab)\n\n \"\"\" Dictionary to fill the best_ds for duplicates \"\"\"\n #dt_bestds_dic[dt] = {}\n #dt_bestds_dic[dt]['best_ds'] = best_ds\n #dt_bestds_dic[dt]['len'] = len(combined_obs_tab['date_time'])\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n combined_indices.append(len(combined_obs_tab['date_time'])) \n combined_date_time.append(dt)\n\n del cleaned_df_container \n \n \n \n #print(blue + 'Memory used after deleting the cleaned_df_container: ', process.memory_info().rss/1000000000 , cend)\n\n \"\"\" Removing remaining loaded df \"\"\"\n for k in self.datasets_keys:\n for F in self.datasets[k]:\n try:\n del data[k][F]['era5fb_tab']\n print('=== removed era5fb ' , k , F )\n except:\n pass\n try:\n del data[k][F]['observations_table']\n print('=== removed obstab ' , k , F ) \n except:\n pass\n \n \n \"\"\" Saving a numpy dictionary \"\"\"\n print(\" === Saving the numpy dictionary of removed and kept records +++ \")\n #dic_records = { 'kept' : kept_record , 'removed': removed_record }\n #np.save(self.station + '_time_shift_removed_kept.npy',dic_records )\n \n \n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n combined_date_time = np.array(combined_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : combined_date_time.shape } , combined_date_time )\n di['recordtimestamp'].attrs['units']='seconds since 1900-01-01 00:00:00'\n\n \"\"\" Creating the merged indices mi \"\"\"\n mi = [] \n mi.append(0)\n for i in range(len(combined_indices)):\n mi.append( combined_indices[i] + mi[-1] )\n mi.pop()\n pop = np.array(mi) # removing last unecessary index \n di['recordindex'] = ( {'recordindex' : pop.shape } , pop )\n\n\n \"\"\" Creating the combined data \"\"\"\n logging.debug('*** Concatenating the observations_table ' ) \n combined_obs = {}\n #### Writing combined observations_table dic\n logging.info(' ***** Writing the observations_table to the netCDF output ***** ' ) \n for k in all_combined_obs[0].keys(): \n a = np.concatenate([all_combined_obs[i][k][:] for i in range(len(all_combined_obs))])\n if k == 'date_time':\n combined_obs[k]= a \n self.tot_records = len(combined_obs[k])\n self.write_merged(content = 'observations_table', table= {k:a})\n #logging.info('*** Written observations table %s: ', k)\n\n\n #self.tot_records = len(combined_obs['date_time'])\n del all_combined_obs\n print(blue + 'Memory used after deleting all_combined_obs dic: ', process.memory_info().rss/1000000000 , cend )\n \n dateindex = combined_obs['date_time']//86400 \n date_times, indices, counts = np.unique(dateindex, return_counts = True, return_index= True) \n di['dateindex'] = ( {'dateindex' : indices.shape } , indices ) # considers the day only \n del combined_obs\n \n combined_era5fb = {}\n #### Writing combined era5fb_table dic \n for k in all_combined_era5fb[0].keys():\n try:\n #combined_era5fb[k]=np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n #self.write_merged(content = 'era5fb', table= {k:combined_era5fb[k]})\n \"\"\" try replacing , remove combined_era5fb = {} \"\"\"\n a = np.concatenate([all_combined_era5fb[i][k][:] for i in range(len(all_combined_era5fb))])\n self.write_merged(content = 'era5fb', table= {k:a})\n logging.debug('*** Written era5fb %s: ', k)\n except:\n print(\"FAILED feedback variable \" , k)\n\n del all_combined_era5fb\n print(blue + 'Memory used after deleting era5fb_tab dic: ', process.memory_info().rss/1000000000 , cend)\n\n\n #### Writing combined header_table dic \n for k in all_combined_head[0].keys():\n print('head variable is', k )\n if ( k == 'comments' or k == 'history'):\n continue\n try:\n tab=np.concatenate([all_combined_head[i][k][:] for i in range(len(all_combined_head))])\n self.write_merged(content = 'header_table', table= {k: tab}) # { key: np.array([])}\n logging.info('*** Written header table %s: ', k)\n except:\n print('FFF FAILED variable in header table', k )\n\n del all_combined_head\n print(blue + 'Memory used after deleting all_merged head_tab dic: ', process.memory_info().rss/1000000000 , cend)\n \n self.write_merged(content = 'recordindex', table = di) \n self.write_merged(content = 'cdm_tables', table= '')\n\n\n source_conf=xr.Dataset()\n source_files = np.array(source_files).astype(dtype='|S70')\n source_conf['source_file'] = ( {'source_file' : source_files.shape } , source_files )\n self.write_merged(content = 'source_configuration', table= source_conf )\n\n print(0)\n\n\n \"\"\" Concatenation of station_configurations \"\"\"\n station_conf = pd.concat( station_configurations ) \n for k in station_conf.columns:\n try:\n a =np.array( station_conf[k])\n self.write_merged(content = 'station_configuration', table= {k:a})\n logging.debug('*** Written station_configuration %s: ', k)\n except:\n print(\" Failed station_configuration \" , k )\n \n return 0",
"def find_headers(cls, **kwargs):\n run_start = find_run_start(**kwargs)\n for rs in run_start:\n _build_header(rs)\n return run_start # these have been built out into headers",
"def _header(self, path, files):\n headers = [fits.getheader(os.path.join(path, f))\n for f in sorted(files)]\n N = len(headers)\n\n def mean_key(headers, key, comment, type):\n return (np.mean([type(h[key]) for h in headers]), comment)\n\n h = fits.Header()\n h['BUNIT'] = 'e-/s'\n h['ORIGIN'] = 'Zwicky Transient Facility', 'Data origin'\n h['OBSERVER'] = 'ZTF Robotic Software', 'Observer'\n h['INSTRUME'] = 'ZTF/MOSAIC', 'Instrument name'\n h['OBSERVAT'] = 'Palomar Observatory', 'Observatory'\n h['TELESCOP'] = 'Palomar 48-inch', 'Observatory telescope'\n h['OBSLON'] = -116.8597, 'Observatory longitude (deg)'\n h['OBSLAT'] = 33.3483, 'Observatory latitude (deg E)'\n h['OBSALT'] = 1706., 'Observatory altitude (m)'\n h['IMGTYPE'] = 'object', 'Image type'\n h['NIMAGES'] = N, 'Number of images in stack'\n h['EXPOSURE'] = (sum([_['EXPOSURE'] for _ in headers]),\n 'Total stack exposure time (s)')\n if len(headers) == 0:\n return h\n\n h['MAGZP'] = 25.0, 'Magnitude zero point, solar color'\n h['MAGZPRMS'] = (\n np.sqrt(np.sum([h.get('MAGZPRMS', 0)**2 for h in headers])) / N,\n 'Mean MAGZP RMS')\n h['PCOLOR'] = headers[0]['PCOLOR']\n h['CLRCOEFF'] = mean_key(headers, 'CLRCOEFF',\n 'Mean color coefficient', float)\n\n h['OBSJD1'] = float(headers[0]['OBSJD']), 'First shutter start time'\n h['OBSJDN'] = float(headers[-1]['OBSJD']), 'Last shutter start time'\n h['OBSJDM'] = mean_key(\n headers, 'OBSJD', 'Mean shutter start time', float)\n\n wcsfn = sorted(files)[0]\n wcs = WCS(fits.getheader(os.path.join(path, wcsfn),\n extname='SANGLE'))\n h.update(wcs.to_header())\n h['WCSORIGN'] = wcsfn\n\n h['DBPID'] = (','.join([str(_['DBPID']) for _ in headers]),\n 'Database processed-image IDs')\n h['DESG'] = headers[0]['DESG'], 'Target designation'\n for k, comment in {\n 'RH': 'Mean heliocentric distance (au)',\n 'DELTA': 'Mean observer-target distance (au)',\n 'PHASE': 'Mean Sun-target-observer angle (deg)',\n 'RDOT': 'Mean heliocentric radial velocity, km/s',\n 'SELONG': 'Mean solar elongation, deg',\n 'SANGLE': 'Mean projected target->Sun position angle, deg',\n 'VANGLE': 'Mean projected velocity position angle, deg',\n 'TRUEANOM': 'Mean true anomaly (osculating), deg',\n 'TMTP': 'Mean T-Tp (osculating), days',\n 'TGTRA': 'Mean target RA, deg',\n 'TGTDEC': 'Mean target Dec, deg',\n 'TGTDRA': 'Mean target RA*cos(dec) rate of change,arcsec/s',\n 'TGTDDEC': 'Mean target Dec rate of change, arcsec/s',\n 'TGTRASIG': 'Mean target RA 3-sigma uncertainty, arcsec',\n 'TGTDESIG': 'Mean target Dec 3-sigma uncertainty, arcsec',\n }.items():\n try:\n h[k] = mean_key(headers, k, comment, float)\n except ValueError:\n # target rates might be empty strings\n h[k] = ''\n\n return h",
"def _main_header(self, hdr):\n d = {}\n # Called readDefAnalysis in OpenMIMS\n d['sample type'], d['data included'], d['sample x'], d['sample y'], \\\n d['analysis type'], d['user name'], d['sample z'], date, time = \\\n unpack(self._bo + '4i 32s 16s i 12x 16s 16s', hdr.read(112))\n\n d['data included'] = bool(d['data included'])\n d['user name'] = self._cleanup_string(d['user name'])\n d['analysis type'] = self._cleanup_string(d['analysis type']).lower()\n date = self._cleanup_string(date)\n time = self._cleanup_string(time)\n d['date'] = self._cleanup_date(date + ' ' + time)\n\n if self.header['file type'] in (27, 29, 39):\n # Called MaskImage/readMaskIm in OpenMIMS\n d['original filename'], d['analysis duration'], d['frames'], \\\n d['scan type'], d['magnification'], d['size type'], \\\n d['size detector'], d['beam blanking'], d['presputtering'], \\\n d['presputtering duration'] = \\\n unpack(self._bo + '16s 3i 3h 2x 3i', hdr.read(48))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = {}\n d['HVControl']['hvcontrol enabled'] = False\n\n elif self.header['file type'] in (22, 41):\n # Called MaskSampleStageImage/readMaskIss in OpenMIMS\n d['original filename'], d['analysis duration'], d['scan type'], \\\n d['steps'], d['step size x'], d['step size y'], d['step size?'], \\\n d['step waittime'], d['frames'], d['beam blanking'], \\\n d['presputtering'], d['presputtering duration'] = \\\n unpack(self._bo + '16s 6i d 4i', hdr.read(64))\n\n d['scan type'] = _stage_scan_types.get(d['scan type'], str(d['scan type']))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n # Don't know if this unused byte needs to go after HVControl or after SigRef.\n hdr.seek(4, 1)\n\n elif self.header['file type'] in (21, 26):\n # Not in OpenMIMS\n # this bit same as image, 1 extra unused/unknown\n d['original filename'], d['analysis duration'], d['frames'], \\\n d['scan type'], d['magnification'], d['size type'], \\\n d['size detector'], d['beam blanking'], d['presputtering'], \\\n d['presputtering duration'] = \\\n unpack(self._bo + '16s 4x 3i 3h 2x 3i', hdr.read(52))\n\n # this bit same as stage scan\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n\n # 24 bytes unknown, not sure if they go here or before AutoCal\n hdr.seek(24, 1)\n\n elif self.header['file type'] == 31:\n # Don't know if this is correct, all 0s anyway\n d['original filename'], d['scan type'], \\\n d['beam blanking'], d['presputtering'] = \\\n unpack(self._bo + '16s 3i 4x', hdr.read(32))\n\n elif self.header['file type'] == 35:\n d['original filename'], d['scan type'], d['analysis duration'], \\\n d['frames'], d['beam blanking'], d['presputtering'] = \\\n unpack(self._bo + '16s 5i 40x', hdr.read(76))\n\n d['AutoCal'] = self._autocal(hdr)\n d['HVControl'] = self._hvcontrol(hdr)\n\n else:\n raise TypeError('What type of image are you? {}'.format(self.header['file type']))\n\n # Continue main header for all types\n d['SigRef'] = self._sigref(hdr)\n d['masses'] = unpack(self._bo + 'i', hdr.read(4))[0]\n\n # scan type is set for stage scan analysis, set others\n if isinstance(d['scan type'], int):\n if d['scan type'] == 0:\n d['scan type'] = ''\n else:\n d['scan type'] = str(d['scan type'])\n\n d['beam blanking'] = bool(d['beam blanking'])\n d['presputtering'] = bool(d['presputtering'])\n d['original filename'] = self._cleanup_string(d['original filename'])\n\n if self.header['file type'] in (21, 26, 27, 29, 35, 39):\n if self.header['file version'] >= 4108:\n n = 60\n else:\n n = 10\n elif self.header['file type'] in (22, 31, 40, 41):\n n = 20\n else:\n n = 0\n\n # Not sure what this is, memory pointers? Not needed.\n # d['mass table ptr'] = unpack(self._bo + 2*n*'h', hdr.read(n*4))\n hdr.seek(n*4, 1)\n\n if self.header['file type'] in (21, 22, 26, 40, 41, 35):\n hdr.seek(4, 1) # 4 bytes unused\n\n # Mass table, dict by species label.\n d['MassTable'] = collections.OrderedDict()\n for m in range(d['masses']):\n mi = {}\n mi['trolley index'], unknown, mi['mass'], mi['matrix or trace'], \\\n mi['detector'], mi['wait time'], mi['frame count time'] = \\\n unpack(self._bo + '2i d 2i 2d', hdr.read(40))\n\n if self.header['file type'] == 31:\n if d['analysis type'].endswith('trolley step scan'):\n # start and end are in mm, step is in μm; convert to mm\n mi['radius start'], mi['radius end'], \\\n mi['radius step'], mi['b field bits'] = \\\n unpack(self._bo + '3d i', hdr.read(28))\n mi['radius step'] /= 1000\n else:\n mi['voltage start'], mi['voltage end'], \\\n mi['voltage step'], mi['b field bits'] = \\\n unpack(self._bo + '3d i', hdr.read(28))\n else:\n mi['offset'], mi['b field bits'] = unpack(self._bo + '2i', hdr.read(8))\n\n mi.update(self._species(hdr))\n\n if self.header['file type'] == 31:\n hdr.seek(4, 1)\n\n # Add correction controls, my own addition.\n mi['background corrected'] = False\n mi['deadtime corrected'] = False\n mi['yield corrected'] = False\n\n label = mi.pop('label')\n # This is true for NS50L and file version 4108.\n # Anywhere else different?\n # Maybe confirm this with the Trolleys dict,\n # there is an Esi trolley.\n if mi['trolley index'] == 8:\n label = 'SE'\n\n d['MassTable'][label] = mi\n\n # Create a few convenient lists\n d['label list'] = tuple(d['MassTable'].keys())\n d['label list fmt'] = tuple(format_species(m) for m in d['label list'])\n d['mass list'] = tuple(d['MassTable'][m]['mass'] for m in d['label list'])\n\n return d",
"def merge_all_data(self):\n \n logging.info('***** Starting the merging process ')\n\n \n \"\"\" All possible unqiue_dates to loop on \"\"\"\n date_times = self.merged_unique_dates\n date_times.sort()\n \n date_times = np.array(date_times) \n \n \"\"\" List storing the indices of the date_index of the merged dataset \"\"\"\n all_merged_obs , all_merged_head, all_merged_fb , merged_indices , merged_date_time, mi= [] , [] , [] , [] , [], []\n \n \"\"\" Dictionary that will contain the merged file. \"\"\" \n # rand = datetime.strptime('1981-01-03 12:00:00', '%Y-%m-%d %H:%M:%S') \n #for dt in date_times[3008:3100]: # loop over all the possible date_times \n \n tot = len(date_times)\n for dt, c in zip(date_times[3008:3100], range(tot) ): # loop over all the possible date_times \n #print('Analize : ', str(c) , '/', str(tot) , ' ', dt , ' ', now(time.time()) )\n \n logging.info('Analize : %s %s /', str(c) , str(tot) )\n \n cleaned_df_container = {} \n chunk = ''\n \n for k in self.dataset_per_dt[dt] : # checking the list of available datasets \n \n index, index_up = self.unique_dates[k]['indices'][dt]['low'] , self.unique_dates[k]['indices'][dt]['up'] # extracting the exact chunk of the dataframe where the data of this are stored \n \n chunk = self.data[k]['dataframe'].iloc[index:index_up]\n \n chunk['date_time'] = dt\n chunk = self.clean_dataframe(chunk) # cleaning from wrong or nan values \n \n if len(chunk)==0:\n continue\n \n cleaned_df_container[k] = {} \n cleaned_df_container[k]['df'] = chunk # cleaned dataframe \n\n \n if all(value == 0 for value in cleaned_df_container.values()):\n logging.debug('No data were found! ')\n continue\n \n merged_observations_table, best_ds, duplicates, header = self.merge_record(dt, container = cleaned_df_container)\n \n merged_observations_table['source_id'] = best_ds # adding extra columns i.e. chosen dataset, other dataset with data, number of pressure levels \n merged_observations_table['z_coordinate_type'] = 1 # only pressure inn [Pa] available at the moment. Check z_coordinate_type table for the correpsonding code \n \n \n \"\"\" Extracting the merged feedback, flagging the advanced_observations_feedback flag = 1\"\"\"\n feedback, merged_obs = self.get_reanalysis_feedback( dt, merged_observations_table , reanalysis='era5fb', best_ds= best_ds)\n all_merged_fb.append(feedback) \n all_merged_obs.append(merged_obs)\n \n \"\"\" Setting the correct report_id in the header table \"\"\"\n merged_report_id = merged_obs['report_id'].values[0] # same report_id as calculated in the observation_table \n header['report_id'] = merged_report_id \n all_merged_head.append(header)\n \n #if len(merged_observations_table) != len(header): \n #print('lengths check best ds: ', best_ds , ' obs_merged: ' , len(merged_observations_table), ' feedback:' , len(feedback) , ' header: ' , len(header) )\n #print( len(merged_observations_table), ' ' , len(feedback) )\n\n \"\"\" New merged recordindex and recordtimestamps indices \"\"\"\n merged_indices.append(len(merged_observations_table)) \n merged_date_time.append(dt)\n\n\n \"\"\" Storing the merged date_time values and indices \"\"\"\n di=xr.Dataset()\n merged_date_time = np.array(merged_date_time)\n di['recordtimestamp'] = ( {'recordtimestamp' : merged_date_time.shape } , merged_date_time )\n \n \n \"\"\" Creating the merged indices \"\"\"\n mi.append(0)\n for i,ind in zip(merged_indices[0:], range(len(merged_indices[0:]) ) ) :\n mi.append(mi[ind] + i )\n mi = np.array(mi) \n di['recordindex'] = ( {'recordindex' : mi.shape } , mi )\n self.MergedRecordIndex = di \n \n \n \"\"\" Creating the merged dataframes \"\"\"\n logging.debug('*** Concatenating the observations_table dataframes' ) \n merged_obs = pd.concat (all_merged_obs)\n \n self.MergedObs = merged_obs \n logging.debug('*** Finished concatenating theobservations_table dataframes' ) \n \n logging.debug('*** Concatenating the header_table dataframes' ) \n merged_hd = pd.concat (all_merged_head)\n self.MergedHead = merged_hd \n logging.debug('*** Finished concatenating the header_table dataframes' ) \n \n logging.debug('*** Concatenating the feedback dataframes' ) \n merged_fb = pd.concat (all_merged_fb)\n self.MergedFeedback = merged_fb \n logging.debug('*** Finished concatenating the feedback dataframes' ) \n\n return 0",
"def getPadHeaderFiles(padPath, dateStart, dateStop, sensor):\n padFiles,sampleRate,dataColumns = getPadFiles(padPath,dateStart,dateStop,sensor,'.header')\n return padFiles,sampleRate,dataColumns",
"def analyze_all(datadir, TPQI_starts, dataruns, save = 1, lower = 38.4):\n dirs = os.listdir(datadir)\n idx = 0\n right_dirs = list()\n\n\n for l in dataruns:\n for k in arange(len(dirs)):\n mark_right = '_interference_'+num2str(l,0) in dirs[k]\n \n if mark_right and (len(dirs[k]) > len('_interference_'+num2str(l,0))+6):\n mark_right = False\n\n if mark_right:\n right_dirs.append(dirs[k])\n idx += 1\n continue\n\n \n if len(right_dirs) == 0:\n print 'Did not find any files'\n\n if len(dataruns) == len(right_dirs):\n print 'Found all files...'\n else:\n print 'Beware, not all files are taken into account, file(s) missing.'\n \n tail_over_time = zeros(len(right_dirs))\n tpqi_starts = TPQI_starts[dataruns]\n statistics_info = zeros([len(right_dirs),4])\n \n for k in arange(len(right_dirs)):\n tail_over_time[k] = tail_cts_per_shot(datapath = datadir+'\\\\'+right_dirs[k], lower = lower, TPQI_starts = tpqi_starts[k], save = save)\n statistics_info[k,:] = analyze_thresholds(datapath = datadir+'\\\\'+right_dirs[k], threshold_lt1 = 0, threshold_lt2 = 9, normalize = True, save = save)\n\n\n os.chdir(datadir)\n percentage_finished = float(k+1)/len(right_dirs)*100\n print 'finished: '+num2str(percentage_finished,0)+'%'\n\n\n if save:\n times_passed_overall_lt1 = statistics_info[:,0]\n times_passed_after_seq_lt1 = statistics_info[:,1]\n times_passed_overall_lt2 = statistics_info[:,2]\n times_passed_after_seq_lt2 = statistics_info[:,3]\n filename = 'statistics_run_'+num2str(dataruns.min(),0)+'_to_'+num2str(dataruns.max(),0)+'.npz' \n savez(filename, tpqi_starts = tpqi_starts, tail_over_time = tail_over_time,\n times_passed_overall_lt1 = times_passed_overall_lt1, \n times_passed_after_seq_lt1 = times_passed_after_seq_lt1, \n times_passed_overall_lt2 = times_passed_overall_lt2,\n times_passed_after_seq_lt2 = times_passed_after_seq_lt2)\n\n \n\n figure3 = plt.figure(figsize=(12.0, 16.0))\n plt.subplot(211)\n plt.plot(dataruns,tail_over_time*1E4, '-k')\n plt.xlabel('TPQI run number')\n plt.ylabel('Tail counts per shot (x 1E-4)')\n plt.grid()\n plt.ylim([0,1.1*max(tail_over_time*1E4)])\n\n plt.subplot(212)\n plt.plot(dataruns,TPQI_starts[0:len(right_dirs)], '-k')\n plt.xlabel('TPQI run number')\n plt.ylabel('TPQI starts per run')\n plt.grid()\n plt.ylim([0, 1.1*TPQI_starts[0:len(right_dirs)].max()])\n if save:\n figure3.savefig('tpqi_starts_and_tail_over_time.png')",
"def _build_header_dictionary(self):\n start = 0\n #print self.raw_data\n for a in range(20):\n redatapuller = re.compile(\"\\r\\n\\r\\n\\r\\n(?P<word>.*?)\\t.*?\\n\", re.DOTALL)\n m = redatapuller.search(self.raw_data[start:])\n if not(m):\n break\n self.header_dictionary[m.group(\"word\")] = start + m.end()\n if a==0:\n self.header_dictionary[\"main\"] = start + m.end()\n start += m.end()",
"def assemble(self, dt_range=None):\n if dt_range is not None:\n self.dt_list = trace_source.time_list(dt_range[0],\n dt_range[1],\n self.config['time']['step'])\n\n # only for the testcase\n traj_dir = self.config['partposit_dir']\n days_avail = os.listdir(traj_dir)\n # filter only for the trajectory files with tdump extension\n days_avail = [f for f in days_avail if len(f) == 11]\n print(days_avail)\n folders = [f for f in days_avail if datetime.datetime.strptime(f, \"%Y%m%d_%H\") in self.dt_list]\n\n assert len(folders) > 0, 'no folders with flexpart partposit data'\n\n # the defaultdict is used here to sort the files by datetime within a dictionary\n # filtered_files = defaultdict(list)\n # for f in files:\n # # regex the yyyymmdd-hh timestamp in the filename\n # dt = datetime.datetime.strptime(re.search('([0-9]{8})-([0-9]){2}', f).group(0), '%Y%m%d-%H')\n # height = float(re.search('([0-9]{3,6})(?=_0[0-9-]{1,4}.tdump)', f).group(0))\n # #print(f, dt, height)\n # if dt >= self.dt_list[0] and dt <= self.dt_list[-1]:\n # filtered_files[dt].append((f,height))\n\n # here an empty dict is generated with a zero containing array\n self.stat2d_dict = defaultdict(lambda: np.zeros((len(self.dt_list), len(self.height_list))))\n\n self.statls_dict = defaultdict(lambda: np.zeros((len(self.dt_list), len(self.height_list), 7)))\n\n self.raw_dict = defaultdict(lambda: np.zeros((len(self.dt_list), len(self.height_list),\n abs(self.config['time']['tr_duration'])+1)))\n\n # TODO make more than 7 geo names possible\n ng = trace_source.land_sfc.named_geography(self.config['geonames'])\n self.geo_names = ng.geo_names\n no_geo_names = len(list(self.geo_names.keys()))\n self.statgn_dict = defaultdict(lambda: np.zeros((len(self.dt_list),\n len(self.height_list),\n no_geo_names)))\n\n\n self.lat_names = {0: '<-60', 1: '-60..-30', 2:'-30..0', 3: '0..30', 4: '30..60', 5: '>60'}\n self.statlat_dict = defaultdict(lambda: np.zeros((len(self.dt_list),\n len(self.height_list),\n len(list(self.lat_names.keys())))))\n\n\n ls = trace_source.land_sfc.land_sfc()\n self.ls_categories = ls.categories\n\n\n for it, dt in enumerate(self.dt_list[:]):\n print('trajectories eding at ', dt)\n files_for_time = os.listdir(traj_dir + dt.strftime(\"%Y%m%d_%H\"))\n files_for_time = sorted([f for f in files_for_time if \"partposit_\" in f])\n folder = traj_dir + dt.strftime(\"%Y%m%d_%H\") + \"/\"\n print('files_for_time ', files_for_time)\n\n print('heights ', len(self.height_list), self.height_list)\n\n flex_stat = [flex_statistics(self.config, ls=ls, ng=ng) for h in self.height_list]\n traj_meta = read_flexpart_traj_meta(folder + \"trajectories.txt\")\n\n self.no_part.append(traj_meta['releases_meta'][1]['no_particles'])\n self.time_res.append(10*24/len(files_for_time))\n\n # different structure than hysplit\n # 1. loop through the ending times of the current day\n # 2. load partposit for a specified time\n # 3. loop through heights\n\n for f in files_for_time:\n print('files_for_time ', f)\n part_pos = read_partpositions(folder + f, 1, ctable=True)\n part_pos = np.array(part_pos)\n\n for ih, h in enumerate(self.height_list):\n #print(\"at \", ih, h)\n this_population = np.where(part_pos[:,0] == ih+1)[0]\n #release_sel = np.array([list(p) for p in part_pos if p[0]==ih+1])\n release_sel = part_pos[this_population, :]\n #assert np.all(release_sel == other_release)\n meta = traj_meta['releases_meta'][ih+1]\n #print(meta)\n assert np.mean(meta['heights']) == h, f\"{meta['heights']} {h} do not fit\"\n flex_stat[ih].add_partposits_gn(release_sel)\n\n flex_stat[ih].add_partposits_ls(release_sel)\n flex_stat[ih].add_partposits_thres(release_sel)\n\n # now assemble the statistics for all heights\n for ih, h in enumerate(self.height_list): \n flex_stat[ih].calc_gn_stat()\n for k in list(flex_stat[ih].stat_gn.keys()):\n self.stat2d_dict[k+'_no_below'][it, ih] = flex_stat[ih].stat_gn[k].no_below\n print('stat gn ', h, k, flex_stat[ih].stat_gn[k])\n self.statgn_dict[k][it, ih] = list(flex_stat[ih].stat_gn[k].counter.values())\n\n flex_stat[ih].calc_ls_stat()\n for k in list(flex_stat[ih].stat_ls.keys()):\n self.stat2d_dict[k+'_no_below'][it, ih] = flex_stat[ih].stat_ls[k].no_below\n print('stat ls ', h, k, flex_stat[ih].stat_ls[k])\n self.statls_dict[k][it, ih] = list(flex_stat[ih].stat_ls[k].counter.values())\n\n flex_stat[ih].calc_thres_stat()\n for k in list(flex_stat[ih].stat_lat.keys()):\n self.stat2d_dict[k+'_no_below'][it, ih] = flex_stat[ih].stat_lat[k].no_below\n print('stat_lat ', h, k, flex_stat[ih].stat_lat[k])\n self.statlat_dict[k][it, ih] = list(flex_stat[ih].stat_lat[k].counter.values())\n\n\n # #assert len(f_list) > 1\n # for ih, f in enumerate(f_list):\n # print(it, ih, f[1], dt)\n # traj = trajectory(self.config)\n # traj.load_file(traj_dir+f[0], silent=True)\n # savepath = '{}/{}'.format(self.config['plot_dir'], dt.strftime('%Y%m%d'))\n\n\n # if \"timeinterval\" in self.config['plotmap']:\n # timeinterval = self.config['plotmap']['timeinterval']\n # else:\n # timeinterval = 12\n # if \"heights\" in self.config['plotmap']:\n # heightlist = self.config['plotmap']['heights']\n # else:\n # heightlist = [1500.0, 3000.0, 4500.0]\n # #if f[1] == 3000.0 and dt.hour % 12 == 0:\n # if f[1] in heightlist and dt.hour % timeinterval == 0:\n # print(\"plotting \", f[1], dt.hour)\n # plot_trajectories_ens(traj, savepath, ls=ls, config=self.config)\n # #continue\n\n # traj.evaluate(silent=True)\n # traj.add_land_sfc(ls, silent=True)\n # traj.add_ensemble_land_sfc(ls)\n # traj.add_ensemble_geo_names(ng)\n # #traj.add_area_land_sfc('md', ls, silent=True)\n # #traj.add_area_land_sfc(2000, ls, silent=True)\n\n # #print(\"at step\", it, dt, ih, f)\n # #print('keys ', traj.statistics.keys())\n # # now the empty dict is filled with the keys (and values) of the statistics dict from traj\n # for k in list(traj.statistics.keys()):\n # self.stat2d_dict[k][it, ih] = traj.statistics[k]\n # # subset of trajectory data to collect\n # param_collect = ['latitude', 'longitude', 'height', \"PRESSURE\", \"AIR_TEMP\",\n # \"RAINFALL\", \"RELHUMID\", \"TERR_MSL\", 'age']\n # if 'land_sfc_category' in list(traj.data.keys()):\n # param_collect.append('land_sfc_category')\n # for k in param_collect:\n # #self.raw_dict[k][it, ih, :traj.data[1][k].shape[0]] = traj.data[1][k]\n # self.raw_dict[k][it, ih, :] = traj.data[1][k]\n # #self.raw_dict[k][it, ih, traj.data[1][k].shape[0]:] = -999.\n\n # for k in list(traj.stat_ls.keys()):\n # self.stat2d_dict[k+'_no_below'][it, ih] = traj.stat_ls[k].no_below\n # print('stat ls ', k, traj.stat_ls[k])\n # self.statls_dict[k][it, ih] = list(traj.stat_ls[k].counter.values())\n\n # for k in list(traj.stat_gn.keys()):\n # self.stat2d_dict[k+'_no_below'][it, ih] = traj.stat_gn[k].no_below\n # print('stat gn ', k, traj.stat_gn[k])\n # self.statgn_dict[k][it, ih] = list(traj.stat_gn[k].counter.values())\n\n # trying to free memory\n del ls\n del ng",
"def manage_headers(dem_header_file, header_paths):\n dem_header = parse_dem_header(dem_header_file)\n # find param files containing filename dates\n if len(header_paths) == 2:\n headers = [parse_epoch_header(hp) for hp in header_paths]\n combined_header = combine_headers(headers[0], headers[1], dem_header)\n else:\n # probably have DEM or incidence file\n combined_header = dem_header\n combined_header[ifc.DATA_TYPE] = ifc.DEM\n\n return combined_header",
"def combine_headers(hdr0, hdr1, dem_hdr):\n if not all([isinstance(a, dict) for a in [hdr0, hdr1, dem_hdr]]):\n raise GammaException('Header args need to be dicts')\n\n date0, date1 = hdr0[ifc.FIRST_DATE], hdr1[ifc.FIRST_DATE]\n if date0 == date1:\n raise GammaException(\"Can't combine headers for the same day\")\n elif date1 < date0:\n raise GammaException(\"Wrong date order\")\n\n chdr = {ifc.PYRATE_TIME_SPAN: (date1 - date0).days / ifc.DAYS_PER_YEAR,\n ifc.FIRST_DATE: date0,\n ifc.FIRST_TIME: hdr0[ifc.FIRST_TIME],\n ifc.SECOND_DATE: date1,\n ifc.SECOND_TIME: hdr1[ifc.FIRST_TIME],\n ifc.DATA_UNITS: RADIANS,\n ifc.PYRATE_INSAR_PROCESSOR: GAMMA}\n\n # set incidence angle to mean of first amd second image values\n inc_ang = hdr0[ifc.PYRATE_INCIDENCE_DEGREES]\n if np.isclose(inc_ang, hdr1[ifc.PYRATE_INCIDENCE_DEGREES], atol=1e-1):\n chdr[ifc.PYRATE_INCIDENCE_DEGREES] = (hdr0[ifc.PYRATE_INCIDENCE_DEGREES] + hdr1[\n ifc.PYRATE_INCIDENCE_DEGREES]) / 2\n else:\n msg = \"Incidence angles differ by more than 1e-1\"\n raise GammaException(msg)\n\n wavelen = hdr0[ifc.PYRATE_WAVELENGTH_METRES]\n if np.isclose(wavelen, hdr1[ifc.PYRATE_WAVELENGTH_METRES], atol=1e-6):\n chdr[ifc.PYRATE_WAVELENGTH_METRES] = wavelen\n else:\n args = (chdr[ifc.FIRST_DATE], chdr[ifc.SECOND_DATE])\n msg = \"Wavelength mismatch, check both header files for %s & %s\"\n raise GammaException(msg % args)\n # non-cropped, non-multilooked geotif process step information added\n chdr[ifc.DATA_TYPE] = ifc.ORIG\n\n chdr.update(dem_hdr) # add geographic data\n return chdr",
"def combine(files, output):\n # read all files\n bxrs = [h5py.File(f,'r') for f in files]\n # some paths we might care about & will copy\n metadata_paths = [\n '3BRecInfo/3BRecVars/MaxVolt',\n '3BRecInfo/3BRecVars/MinVolt',\n '3BRecInfo/3BRecVars/BitDepth',\n '3BRecInfo/3BRecVars/SignalInversion',\n '3BRecInfo/3BRecVars/SamplingRate',\n '3BRecInfo/3BRecVars/ExperimentType',\n '3BRecInfo/3BMeaChip/NRows',\n '3BRecInfo/3BMeaChip/NCols',\n '3BRecInfo/3BMeaChip/Layout',\n '3BRecInfo/3BMeaChip/MeaType',\n '3BRecInfo/3BMeaSystem/FwVersion',\n '3BRecInfo/3BMeaSystem/HwVersion',\n '3BRecInfo/3BMeaSystem/System'\n ]\n\n # count n_frames, n_samples from each file\n # also verify that key metadata matches\n n_frames = bxrs[0]['3BRecInfo/3BRecVars/NRecFrames'][0]\n n_samples = [bxrs[0]['3BData/Raw'].shape[0]]\n sampling_rate = bxrs[0]['3BRecInfo/3BRecVars/SamplingRate'][0]\n print(\"checking that all brw files have matching metadata\")\n for b in bxrs[1:]:\n for m in metadata_paths:\n try:\n if len(bxrs[0][m])==1:\n assert bxrs[0][m][:] == b[m][:]\n else:\n assert np.all(bxrs[0][m][:] == b[m][:])\n except Exception as E:\n logger.warn(f\"\"\"metadata does not match for {m}:\n found {bxrs[0][m]} and {b[m]}\n \"\"\")\n n_frames += b['3BRecInfo/3BRecVars/NRecFrames'][0]\n n_samples.append(b[\"3BData/Raw\"].shape[0])\n print(f\"combined duration: {n_frames/sampling_rate/60:.2f} minutes\")\n\n out_bxr = h5py.File(output, \"w\")\n # copy metadata\n bxrs[0].visititems(partial(glia.copy_metadata, copy_to=out_bxr))\n\n # copy data\n out_bxr['3BRecInfo/3BRecVars/NRecFrames'] = [n_frames]\n out_bxr['nSamplesPerRecording'] = n_samples\n tot_samples = sum(n_samples)\n assert np.isclose(tot_samples/n_frames, 4096) #4096 channels\n \n # copy raw data\n raw_dtype = bxrs[0][\"3BData/Raw\"].dtype\n dset = out_bxr.create_dataset(\"3BData/Raw\", (tot_samples,),\n dtype=raw_dtype)\n start_sample = 0\n max_chunk = int(1e8) # <1GiB \n for i, b in enumerate(bxrs):\n print(f\"Copying {files[i]}\")\n end_sample = start_sample+n_samples[i]\n for s in tqdm(range(0,n_samples[i],max_chunk)):\n e = min(s+max_chunk, end_sample)\n dset[start_sample+s:start_sample+e] = b[\"3BData/Raw\"][s:e]\n start_sample = end_sample\n\n # cleanup\n out_bxr.close()\n [b.close() for b in bxrs]",
"def readHeader(self, filename):\n f = Data.Usrxxx.readHeader(self, filename)\n# self.sayHeader()\n \n while True:\n data = fortran.read(f)\n if data is None: break\n size = len(data)\n# print(\"size: \", size)\n\n if size == 14 and data[:10] == \"STATISTICS\":\n self.statpos = f.tell()\n for det in self.detector:\n data = Data.unpackArray(fortran.read(f))\n det.total = data[0]\n det.totalerror = data[1]\n# for j in range(6):\n# fortran.skip(f)\n break\n\n if size != 50: raise IOError(\"Invalid USRTRACK/USRCOLL file\")\n\n header = struct.unpack(\"=i10siiififfif\", data)\n\n det = Data.Detector()\n det.nb = header[0]\n det.name = header[1].strip() # titutc - track/coll name\n det.type = header[2] # itustc - type of binning: 1 - linear energy etc\n det.dist = header[3] # idustc = distribution to be scored\n det.reg = header[4] # nrustc = region\n det.volume = header[5] # vusrtc = volume (cm**3) of the detector\n det.lowneu = header[6] # llnutc = low energy neutron flag\n det.elow = header[7] # etclow = minimum energy [GeV]\n det.ehigh = header[8] # etchgh = maximum energy [GeV]\n det.ne = header[9] # netcbn = number of energy intervals\n det.de = header[10] # detcbn = energy bin width\n\n self.detector.append(det)\n\n if det.lowneu:\n data = fortran.read(f)\n det.ngroup = struct.unpack(\"=i\",data[:4])[0]\n det.egroup = struct.unpack(\"=%df\"%(det.ngroup+1), data[4:])\n print(\"Low energy neutrons scored with %d groups\" % det.ngroup)\n else:\n\t\tdet.ngroup = 0\n\t\tdet.egroup = []\n\n\t size = (det.ngroup+det.ne) * 4\n\t if size != fortran.skip(f):\n\t\traise IOError(\"Invalid USRTRACK file\")\n f.close()",
"def parse_headers(self):\n\n logger.debug(f\"parse headers of {self.path}\")\n with open(self.path, 'rb') as f:\n parser = BinaryParser(f)\n magic, version_major, version_minor = parser.unpack(\"<2sBB\")\n if magic != b'RW':\n raise ValueError(\"invalid magic code\")\n self.version = (version_major, version_minor)\n\n if version_major == 1:\n parser.seek(8)\n elif version_major == 2:\n parser.seek(100)\n elif version_major == 3:\n parser.seek(268)\n else:\n raise ValueError(f\"unsupported WAD version: {version_major}.{version_minor}\")\n\n entry_count, = parser.unpack(\"<I\")\n\n if version_major == 1:\n self.files = [WadFileHeader(*parser.unpack(\"<QIIII\")) for _ in range(entry_count)]\n else:\n self.files = [WadFileHeader(*parser.unpack(\"<QIIIBBBBQ\")) for _ in range(entry_count)]",
"def _generate_header_files(self):\n return True",
"def parse_metadata(self):\n import csv\n f = open(self.seq_id_list)\n self.names = f.readlines()\n f.close()\n num_samples = len(self.names)\n for i in range(len(self.names)):\n self.names[i] = self.names[i].replace(\"\\n\", \"\")\n # Go through the combined metadata file - it has most of the data we need.\n metadata = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/combinedMetadata.csv\"))\n metadata_count = 0\n for row in metadata:\n # There has to be a more elegant way to do this.\n if row[\"SampleName\"] in self.names:\n data = dict()\n data[\"Investigator\"] = row[\"Investigator\"]\n data[\"Coverage\"] = row[\"AverageCoverageDepth\"]\n data[\"TotalLength\"] = row[\"TotalLength\"]\n data[\"rST\"] = row[\"rMLSTsequenceType\"]\n data[\"PipelineVersion\"] = row[\"PipelineVersion\"]\n data[\"MLST\"] = row[\"MLSTsequencetype\"]\n data[\"geneSeekr\"] = row[\"geneSeekrProfile\"].split(\";\")\n self.metadata[row[\"SampleName\"]] = data\n metadata_count += 1\n # Need to look in external WGS spades as well.\n metadata = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/combinedMetadata.csv\"))\n for row in metadata:\n # There has to be a more elegant way to do this.\n if row[\"SampleName\"] in self.names:\n data = dict()\n data[\"Investigator\"] = row[\"Investigator\"]\n data[\"Coverage\"] = row[\"AverageCoverageDepth\"]\n data[\"TotalLength\"] = row[\"TotalLength\"]\n data[\"rST\"] = row[\"rMLSTsequenceType\"]\n data[\"PipelineVersion\"] = row[\"PipelineVersion\"]\n data[\"MLST\"] = row[\"MLSTsequencetype\"]\n data[\"geneSeekr\"] = row[\"geneSeekrProfile\"].split(\";\")\n self.metadata[row[\"SampleName\"]] = data\n metadata_count += 1\n\n\n\n # Also need to go through the rMLST file to make sure that all rMLST genes are covered.\n rMLST_data = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/rmlst.csv\"))\n metadata_count = 0\n for row in rMLST_data:\n if row[\"Strain\"] in self.names:\n self.metadata[row[\"Strain\"]][\"Matches\"] = row[\"Matches\"]\n metadata_count += 1\n # Check external runs.\n rMLST_data = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/rmlst.csv\"))\n for row in rMLST_data:\n if row[\"Strain\"] in self.names:\n self.metadata[row[\"Strain\"]][\"Matches\"] = row[\"Matches\"]\n\n\n\n # Finally, need to get info on the MLST sequence type.\n metadata_count = 0\n mlst_data = csv.DictReader(open(self.nasmnt + \"WGSspades/reports/mlst.csv\"))\n for row in mlst_data:\n if row[\"Strain\"] in self.names:\n mlst = list()\n for i in range(1, 8):\n mlst.append(row[str(i)])\n self.metadata[row[\"Strain\"]][\"mlst_info\"] = mlst\n metadata_count += 1\n\n # Also from External.\n mlst_data = csv.DictReader(open(self.nasmnt + \"External_WGSspades/reports/mlst.csv\"))\n for row in mlst_data:\n if row[\"Strain\"] in self.names:\n mlst = list()\n for i in range(1, 8):\n mlst.append(row[str(i)])\n self.metadata[row[\"Strain\"]][\"mlst_info\"] = mlst\n metadata_count += 1\n\n # Go through the ROGA Summary file from the access DB to get strain/textual IDs, and 1' and 2' enzymes.\n try: # Assume we're using ROGA summary OLF. If it isn't there, assume ROGA summary OLC\n df = pd.read_excel('ROGA_summary_OLF.xlsx')\n for i in df.index:\n if df['SeqTracking_SEQID'][i] in self.names:\n seqid = df['SeqTracking_SEQID'][i]\n self.metadata[seqid][\"IsolateID\"] = df['Isolate ID'][i]\n self.metadata[seqid][\"TextualID\"] = df['Textual ID'][i]\n self.metadata[seqid][\"1Enzyme\"] = df[\"1' Enzyme\"][i]\n self.metadata[seqid][\"2Enzyme\"] = df[\"2' Enzyme\"][i]\n self.metadata[seqid][\"Source\"] = df['Source'][i]\n self.metadata[seqid][\"ReceivedDate\"] = df['ReceivedDate'][i]\n self.metadata[seqid][\"SequenceDate\"] = df['SequenceDate'][i]\n self.metadata[seqid][\"SequencedBy\"] = df['SequenceBy'][i]\n metadata_count += 1\n\n\n except FileNotFoundError: # Should be a file not found error - look it up.\n metadata_count = 0\n df = pd.read_excel('ROGA_summary_OLC.xlsx')\n for i in df.index:\n if df['SeqTracking_SEQID'][i] in self.names:\n seqid = df['SeqTracking_SEQID'][i]\n self.metadata[seqid][\"IsolateID\"] = df['OLN ID'][i]\n self.metadata[seqid][\"TextualID\"] = df['Lab ID'][i]\n self.metadata[seqid][\"ReceivedDate\"] = df['ReceivedDate'][i]\n self.metadata[seqid][\"SequenceDate\"] = df['SequenceDate'][i]\n self.metadata[seqid][\"SequencedBy\"] = df['SequenceBy'][i]\n metadata_count += 1\n # print(self.metadata)\n self.check_for_empty_data()",
"def parseheader(self):\n for line in self.rawheader.split(\"\\n\"):\n pat = \"QUITTING\"\n if pat in line:\n self.prefix = line\n continue\n\n pat = \"VERSION NUMBER\"\n if pat in line:\n self.softvers = line[28:].strip()\n continue\n\n pat = \"DATE/TIME IS\"\n if pat in line:\n meta = line[22:].strip()\n matchobj = dtpat.match(meta)\n if matchobj:\n try:\n self.dumpdt = datetime.strptime(meta, moddtfmt)\n except:\n self.nodump = True\n self.comment += (\n \" *** Cannot read module date/time: {}\\n\".format(meta)\n )\n continue\n\n pat = \"NUMBER RECORDS IS\"\n if pat in line:\n self.ndumprec = line[22:].strip()\n continue\n\n pat = \"MODULE TYPE IS\"\n if pat in line:\n self.modtype = line[22:].strip()\n continue\n\n pat = \"SERIAL NUMBER IS\"\n if pat in line:\n self.modserial = line[22:].strip()\n continue\n\n pat = \"COND S/N IS\"\n if pat in line:\n meta = line[22:].strip()\n serials = meta.split(\"/\")\n self.cellserial = serials[1]\n self.ioserial = serials[0]\n continue\n\n pat = \"SAMPLING INTERVAL IS\"\n if pat in line:\n meta = line[22:].strip()\n self.sampintv = meta\n if meta == \"00:01:00\":\n self.nodump = False\n self.comment += \" *** Sample interval is {}\\n\".format(meta)\n elif meta != \"00:02:00\":\n self.nodump = True\n self.comment += \" *** Sample interval is {}\\n\".format(meta)\n continue\n\n pat = \"AVERAGE INTERVAL IS\"\n if pat in line:\n self.avgintv = line[22:].strip()\n if int(self.avgintv) != 24:\n self.nodump = True\n self.comment += \" *** Average interval is {}\\n\".format(meta)\n continue\n\n pat = \"BATTERY VOLTAGE IS\"\n if pat in line:\n self.voltage = line[22:].strip()\n continue\n\n return self.modserial",
"def main():\n location = os.getcwd()\n header = \"Date,Time,Voltage,Current,Isolation,Range,SoC,Distance,Fan rpm,Fan Torque,Hyd. Pump rpm,Hyd. Pump Torque,SW Pump rpm,SW Pump Torque,Nozzle,Sidebrushes,WideSweepBrush,TempIGBT-Fan,Fan motor temp, Traction rpm, Traction torque,BMS1 Volts, BMS2 Volts\"\n header = header+\"\\n\"\n\n of =\"outFile.csv\"\n outFile = open(of, \"w\")\n outFile.write(header)\n\n for file in os.listdir(location ):\n try:\n if file.endswith(\".csv\") and not(file.startswith(\"outFile\")):\n print(\"...reading {}\".format(file))\n fcsv = csv.reader(open(file, newline=''), delimiter=' ', quotechar='|') \n for row in fcsv:\n line = ', '.join(row)\n if line[:4] == \"Date\":\n d = line[5:13]\n dd = d[6:9]+\"/\"+d[4:6]+\"/\"+d[:4]\n next\n elif line[12] == \"*\" or line[0] == \"*\":\n next\n elif line[0] == \"T\":\n next\n else:\n L = dd + \",\" + line + \"\\n\"\n outFile.write(L)\n except Exception as e:\n raise e\n print(\"No CSV files in here!\")\n\n try: \n print(\"\\nAll files have been merged into: {}\".format(of))\n outFile.close()\n \n except Exception as ee:\n raise ee",
"def test_merge_dim_header():\n hdr_in_1 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n hdr_in_2 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3],\n 'p2': [0.1, 0.2, 0.3]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 5, 4, 3)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4, 1, 2, 3],\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}}\n\n hdr_in_2 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 5, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 6, 4, 4)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}}\n\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_2, hdr_in_1, 6, 4, 4)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': [5, 6, 7, 8, 1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}}\n\n hdr_in_2 = Hdr_Ext.from_header_ext(\n {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 5, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4]}})\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 7, 4, 4)\n assert hdr_out == {'SpectrometerFrequency': [100.0, ],\n 'ResonantNucleus': ['1H', ],\n 'dim_5': 'DIM_DYN',\n 'dim_5_info': 'averages',\n 'dim_5_header': {'p1': [1, 2, 3, 4],\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_6': 'DIM_EDIT',\n 'dim_6_info': 'edit',\n 'dim_6_header': {'p1': {'start': 1, 'increment': 1},\n 'p2': [0.1, 0.2, 0.3, 0.4]},\n 'dim_7': 'DIM_USER_0',\n 'dim_7_info': 'other',\n 'dim_7_header': {'p1': {'Value': {'start': 1, 'increment': 1}, 'Description': 'user'},\n 'p2': [0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4]}}\n\n with pytest.raises(NIfTI_MRSIncompatible) as exc_info:\n hdr_out = nmrs_tools.split_merge._merge_dim_header(hdr_in_1, hdr_in_2, 5, 4, 4)\n assert exc_info.type is NIfTI_MRSIncompatible\n assert exc_info.value.args[0] == \"Both files must have matching dimension headers apart from the one being merged.\"\\\n \" dim_7_header does not match.\"",
"def run_resample(self):\n\n self.in_data.open()\n self.out_data.open()\n\n try:\n # Get the fields from the input file and set them/write headers in output:\n self.all_fields = self.in_data.fields\n\n self.out_data.set_fields(self.all_fields)\n self.out_data.write_headers()\n\n # Set up the sensor fields by removing non-sensor fields:\n self.set_sensor_only_fields()\n\n # Read the first event from the input file:\n self.get_next_input_event()\n\n # Warn and exit if we have no input data to read:\n if self.next_input_event is None:\n msg = f\"The input file {self.in_file} did not have any data rows\"\n warn(msg)\n\n return\n\n self.first_event_stamp = self.next_input_event[self.stamp_field]\n\n # Set up the sample tracking (here mostly to set the start of the first interval):\n self.reset_sample_tracking()\n\n # Now iterate through the output intervals:\n while True:\n self.process_next_interval()\n except EOFError: # catch when we are at the end of the file\n pass\n finally:\n self.in_data.close()\n self.out_data.close()\n\n print() # make sure we go to a new output line",
"def entries_from_goes_ts_files(*files, default_waveunit=None, source=None):\n\n\n \"\"\"\n ts_goes = ts.TimeSeries(file)\n statinfo = os.stat(file)\n entry = DatabaseEntry(path=file)\n entry.size = statinfo.st_size\n\n #header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'\n entry.instrument = ts_goes.meta.get('TELESCOP').values()\n entry.instrument = ts_goes.meta.get('TELESCOP').values()\n\n entry.wavemax = 0.8 # XRSB '1.0--8.0 $\\AA$'\n entry.wavemin = 0.05 # XRSA '0.5--4.0 $\\AA$'\n\n #\n entry.observation_time_start = ts_goes.meta.get('date-beg').values()[0]\n entry.observation_time_end = ts_goes.meta.get('date-end').values()[0]\n\n entry.metadata = ts_goes.meta.metadata[0][2]\n\n #entry.tags = [ sunpy.database.attrs.Tag('raw') ]\n \"\"\"\n\n\n for file in files:\n headers = fits.get_header(file)\n if isinstance(file, (str, six.text_type)):\n filename = file\n else:\n filename = getattr(file, 'name', None)\n statinfo = os.stat(file)\n #print('a header')\n entry = DatabaseEntry(path=filename)\n entry.size = statinfo.st_size\n\n # Add/tweak start/end entries for GOES\n if headers[0].get('TELESCOP','') != '':\n #header['INSTRUME'] = header['TELESCOP']# So E.G. 'GOES 6' instead 'X-ray Detector'\n entry.instrument = headers[0]['TELESCOP']\n elif headers[1].get('TELESCOP','') != '':\n entry.instrument = headers[1]['TELESCOP']\n if (headers[0].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[0]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[0]['DATE-OBS'])\n elif (headers[1].get('DATE-OBS','') != ''):\n if is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%Y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-OBS'], '%d/%m/%y'):\n start_time = datetime.strptime(headers[1]['DATE-OBS'], '%d/%m/%y')\n else:\n start_time = parse_time(headers[1]['DATE-OBS'])\n\n if (headers[0].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[0]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[0]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[0]['DATE-END'])\n elif (headers[1].get('DATE-END','') != ''):\n if is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%Y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%Y')\n elif is_time_in_given_format(headers[1]['DATE-END'], '%d/%m/%y'):\n end_time = datetime.strptime(headers[1]['DATE-END'], '%d/%m/%y')\n else:\n end_time = parse_time(headers[1]['DATE-END'])\n else:\n end_time = start_time + timedelta(days=1,seconds=-1)\n\n # Add these to the entry\n entry.observation_time_start = start_time\n entry.observation_time_end = end_time\n\n entry.wavemax = 0.8 # XRSB '1.0--8.0 $\\AA$'\n entry.wavemin = 0.05 # XRSA '0.5--4.0 $\\AA$'\n\n if source:\n entry.source = source\n\n entry.metadata = MetaDict(headers[1])\n #entry.tags = sunpy.database.attrs.Tag('raw')\n\n #entry = DatabaseEntry(instrument='EIT', wavemin=25.0)\n\n #return entry\n yield entry",
"def inBoth(from_files):\n t_nof1 = []\n f_nof1 = []\n array_of_times = []\n for file in from_files:\n item = file.replace('_COMPLETE', '')\n if item in to_files:\n to = os.path.join('/ToNof1/archive', item)\n from_nof1 = os.path.join('/FromNof1', file)\n t_nof1.append(to)\n f_nof1.append(from_nof1)\n\n\n\n with open(\"TAT_From_Nof1.tsv\", 'w') as f:\n i = 0\n myHeader = \"Completed File\\tCompleted Time\\tSent File\\tSent Time\\tDelta\\n\"\n f.write(myHeader)\n while i < len(to_files):\n today = datetime.today()\n\n fName = os.path.basename(f_nof1[i])\n tName = os.path.basename(t_nof1[i])\n\n fTime = getDate(f_nof1[i])\n tTime = getDate(t_nof1[i])\n\n duration = (today - fTime)\n if duration.days < 90:\n delta = fTime - tTime\n seconds = (delta.total_seconds())\n minutes = seconds / 60.0\n hours = minutes / 60.0\n array_of_times.append(hours)\n delta = str(delta)\n fTime = str(fTime)\n tTime = str(tTime)\n myString = (fName + \"\\t\" + fTime + \"\\t\" + tName + \"\\t\" + tTime + \"\\t\" + delta + \"\\n\")\n f.write(myString)",
"def get_headers(self):\n\t\t# collect all the non-segment\n\t\t# files into a list (there\n\t\t# should only be one header)\n\t\tfiles = glob.glob(\"%s/*\" % self.segment_path)\n\t\theaders = [f for f in files if os.path.splitext(f)[1] != '.seg']\n\t\tfor path in headers:\n\t\t\t_file = os.path.split(path)[1]\n\t\t\tdae = DiscreetArchiveElement(self,_file,element_type='header')\n\t\t\tself.elements.append(dae)\n\t\treturn True",
"def generate_headers(src_files, out_root, doc_root):\r\n\r\n if not os.path.exists(out_root):\r\n os.makedirs(out_root)\r\n did_print_heading = False\r\n changed = False\r\n for (name, files) in src_files:\r\n if files.__class__ == str:\r\n src = files\r\n files = (src,)\r\n else:\r\n src = files[0]\r\n\r\n dst = src.replace(\".hh\", \"-method-def.hh\")\r\n dst = dst.replace(\".cpp\", \"-method-def.hh\")\r\n dst = os.path.join(out_root, os.path.split(dst)[1])\r\n\r\n dst_doc = src.replace(\".hh\", '-methods.txt')\r\n dst_doc = dst_doc.replace(\".cpp\", '-methods.txt')\r\n dst_doc_filename = os.path.split(dst_doc)[1]\r\n dst_doc_filename = os.path.join(doc_root, dst_doc_filename)\r\n\r\n dst_prop_doc = src.replace(\".cpp\", '-properties.txt')\r\n dst_doc_prop_filename = os.path.split(dst_prop_doc)[1]\r\n dst_doc_prop_filename = os.path.join(doc_root, dst_doc_prop_filename)\r\n\r\n if util.changed(src, dst):\r\n if not did_print_heading:\r\n print(\"* Generating Python method definitions.\")\r\n did_print_heading = True\r\n generate(files, dst, dst_doc_filename, dst_doc_prop_filename, name)\r\n changed = True\r\n if not changed:\r\n print(\"* Python method definitions up to date.\")",
"def update_header(self) -> None:\n self.header.partial_reset()\n self.header.point_format_id = self.points.point_format.id\n self.header.point_data_record_length = self.points.point_size\n\n if len(self.points) > 0:\n self.header.update(self.points)\n\n if self.header.version.minor >= 4:\n if self.evlrs is not None:\n self.header.number_of_evlrs = len(self.evlrs)\n self.header.start_of_waveform_data_packet_record = 0\n # TODO\n # if len(self.vlrs.get(\"WktCoordinateSystemVlr\")) == 1:\n # self.header.global_encoding.wkt = 1\n else:\n self.header.number_of_evlrs = 0"
] | [
"0.6403937",
"0.61514765",
"0.6100263",
"0.5859067",
"0.58415174",
"0.58295155",
"0.57660097",
"0.56070304",
"0.55764234",
"0.5484562",
"0.5480894",
"0.5478244",
"0.545221",
"0.5397874",
"0.53785336",
"0.5354963",
"0.51996344",
"0.5187501",
"0.5178293",
"0.5173099",
"0.5160237",
"0.5153791",
"0.5149812",
"0.50992084",
"0.5083629",
"0.5082852",
"0.5078927",
"0.5067093",
"0.50668883",
"0.50662905"
] | 0.71078265 | 0 |
Information about the data files as a list of strings Returns List[str] List of information about the data files | def printDataFileList(self) -> List[str]:
textLst: List[str] = []
textLst.append("Data File\t\tSample Ranges")
for dFile, sRanges in zip(self.dataFileList, self.dataRanges):
textLst.append("{}\t\t{} - {}".format(dFile, sRanges[0], sRanges[1]))
textLst.append("Total samples = {}".format(self.getNumSamples()))
return textLst | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def find_data(self):\n data_list = []\n for root, dirs, files in os.walk(pathfinder.data_path()):\n for name in files:\n data_list.append(os.path.join(root, name))\n return data_list",
"def getDataFiles(directoryName):\r\n \r\n return listdir(directoryName)",
"def get_output_data(filenames):\n output = []\n for filename in filenames:\n file_info = get_file_info(filename)\n output.append(file_info)\n return output",
"def load(self):\n\t\t# Initialize empty list\n\t\tdata_files = []\n\n\t\t# Append the Drusen files to the list\n\t\tfor single_file in os.listdir(self.data_dir):\n\t\t\tdata_files.append(single_file)\n\t\treturn data_files",
"def get_data_files(main_directory):\n print('************************************')\n print('Log data list')\n print('************************************')\n log_files_list = globlin(main_directory + '/*/*.json' , recursive=True)\n song_files_list = globlin(main_directory + '/*/*/*/*/*.json', recursive=True)\n print(log_files_list)\n print('************************************')\n print('Song data list')\n print('************************************')\n print(song_files_list)\n return log_files_list, song_files_list",
"def list_files(self):\n ret = []\n for fname in self.files:\n ret.append('filename: %s\\t replica locations: %s' %\n (fname, ','.join(self.files[fname])))\n return ret",
"def files(self) -> List[str]:\n return [packet.name for packet in self.packets.file_description.values()]",
"def extract_files(self) -> list:\n pass",
"def getcontent(self):\n filelist=[]\n if len(self.filelist) == 0:\n return \"empty directory\"\n else:\n for file in self.filelist:\n filelist.append(file)\n return filelist",
"def get_data_files():\n data_files = []\n\n # Walk through the data directory, adding all files\n data_generator = os.walk('pypeit/data')\n for path, directories, files in data_generator:\n for f in files:\n data_path = '/'.join(path.split('/')[1:])\n data_files.append(os.path.join(data_path, f))\n\n # Add pipeline and spectrograph settings\n settings = glob.glob('pypeit/settings/settings.*')\n settings = ['/'.join(path.split('/')[1:]) for path in settings]\n data_files.extend(settings)\n\n return data_files",
"def all_files(self) -> List[IdentifiedFile]:\n return [self.main_file, *self.labware_files, *self.data_files]",
"def list_all():\n if os.path.exists(DATA_DIR):\n return os.listdir(DATA_DIR)\n return []",
"def get_filenames(self):\n return self.filenames",
"def get_listfile(self, datadir):\n return []",
"def listFiles(self):\n pass",
"def get_file_list():\n wb = xw.Workbook.caller()\n path_input = xw.Range('Macro', 'FilePath').value\n l_file_path = glob.glob(path_input + '[!~]*.*')\n l_file_name = [l.split('/')[-1] for l in l_file_path]\n xw.Range('Macro', 'FileField').clear_contents()\n xw.Range('Macro', 'C_FilePath').options(transpose=True).value = l_file_path\n xw.Range('Macro', 'C_FileName').options(transpose=True).value = l_file_name\n xw.Sheet('Macro').activate()\n wb.macro('ShowMsg')(\"Choose DataType for all the listed files\")",
"def GetFileNames(self):\n return self.files",
"def get_data_from_files(path, filename):\n\n data_files = []\n\n if path:\n list_of_files = os.listdir(path)\n print(\"List of data files:\", list_of_files)\n\n for file in list_of_files:\n if filename in file:\n full_filepath = path + \"/\" + file\n data_files.append(full_filepath)\n #print(data_files)\n\n else:\n data_files = []\n #print(data_files)\n return data_files",
"def raw_file_names(self):\n return self.filename",
"def get_data_files():\n return [\n ('share/jupyter/nbextensions/{}'.format(PY_PACKAGE), TARGETS),\n ('share/jupyter/lab/extensions', [\n os.path.relpath(f, '.') for f in glob.glob(TAR_PATH)\n ])\n ]",
"def list_output_files(self):\r\n fname = self.__get_output_filename()\r\n return [fname] if fname else []",
"def get_movie_data(files: list) -> list:\n pass",
"def get_data_files():\n\n data_files = []\n for d, dirs, filenames in os.walk(share_jupyterhub):\n rel_d = os.path.relpath(d, here)\n data_files.append((rel_d, [os.path.join(rel_d, f) for f in filenames]))\n return data_files",
"def getFilesList(data):\n\n filesList = []\n\n if os.path.isdir(data):\n logging.info(\"Using files from \" + data)\n #Create a list containing the file names\n for root, dirs, files in os.walk(data):\n for filename in files:\n filesList.append(os.path.join(root,filename))\n\n else:\n logging.info(\"Using file \" + data)\n filesList.append(os.path.abspath(data))\n\n return sorted(filesList)",
"def getGlobusFiles(self):\n\t\treturn self.transfer_client.operation_ls(self.transfer_client.endpoint_search(DATA_ENDPOINT_NAME)[0]['name'])",
"def get_files(self):\r\n return self._filelist",
"def files(self):\n files = []\n if self.package_type == 'package':\n file_data = dict([(k, self[k]) \\\n for k in ['size', 'sha1', 'sha256', 'md5sum']])\n file_data['name'] = self['filename'].split('/')[-1]\n files.append(file_data)\n else:\n for d in self['files']:\n file_data = d.copy()\n # Get checksum data as well...\n for key in ['sha1', 'sha256']:\n for data in self['checksums-' + key]:\n if file_data['name'] == data['name']:\n file_data[key] = data[key]\n files.append(file_data)\n return files",
"def get_list_of_data_and_labels():\n list_of_imgs = []\n list_of_img_labels = []\n for root, dirs, files in os.walk(config[\"PathToData\"], topdown=False):\n for f in files:\n ext = os.path.splitext(f)[-1].lower()\n\n if ext in config[\"ValidImageFileExtensions\"]:\n list_of_imgs.append(os.path.join(root, f))\n if ext in config[\"ValidLabelFileExtensions\"]:\n list_of_img_labels.append(os.path.join(root, f))\n\n list_of_imgs_with_labels = []\n list_of_corresponing_labels = []\n for img_full_file_name in list_of_imgs:\n img_file_name = os.path.splitext(img_full_file_name)[0].lower()\n corresponding_label = [label_full_file_name for label_full_file_name in list_of_img_labels if os.path.splitext(label_full_file_name)[0].lower() == img_file_name]\n if len(corresponding_label) != 0:\n list_of_imgs_with_labels.append(img_full_file_name)\n list_of_corresponing_labels.append(corresponding_label[0])\n\n assert len(list_of_imgs_with_labels) == len(list_of_corresponing_labels)\n\n return list_of_imgs_with_labels, list_of_corresponing_labels",
"def filenames(self):\n pass",
"def pdbfile_list():\n import glob, os\n os.chdir(\"../Data\")\n file_list = []\n for file in glob.glob(\"*.pdb\"):\n file_list.append(file)\n return file_list"
] | [
"0.7393555",
"0.7135669",
"0.69985956",
"0.6970242",
"0.6864774",
"0.6864221",
"0.68211174",
"0.67915004",
"0.678704",
"0.6742841",
"0.6741322",
"0.6736098",
"0.67299247",
"0.6709108",
"0.6708198",
"0.6699793",
"0.6695189",
"0.66744316",
"0.6654875",
"0.6652273",
"0.66320395",
"0.66036975",
"0.6602082",
"0.6594071",
"0.65933937",
"0.658104",
"0.6568237",
"0.6561749",
"0.65505755",
"0.6516499"
] | 0.7159409 | 1 |
Given a set of results, return a list of LDAPSearchResult objects. | def get_search_results(results):
if len(results) == 0:
return []
if type(results) == tuple and len(results) == 2:
(code, arr) = results
elif type(results) == list:
arr = results
res = []
for item in arr:
res.append(LDAPSearchResult(item))
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def parse_search_results(fields, results):\n my_results = []\n for result in results:\n my_results.append(SearchAnimeResult(fields, result))\n return my_results",
"def list_results(cls, output_dir, **kwargs):\n results = cls.load(output_dir, **kwargs)\n return results.get_results()",
"def get_search_results(twitter_dict, search_dict):\r\n\r\n search_list = [search_dict['username']] \r\n search_specified_list = []\r\n\r\n for user in search_list:\r\n search_users_list = [user]\r\n \r\n for operation in search_dict['operations']:\r\n search_users_list = search_helper(search_users_list, operation,\\\r\n twitter_dict)\r\n \r\n search_specified_list += search_users_list\r\n \r\n return search_specified_list",
"def get_ldap_users(conn, searchfilter, attrs):\n\n base_dn = conn.server.info.other['DefaultNamingContext'][0]\n conn.search(search_base=base_dn, search_filter=searchfilter, attributes=attrs)\n return conn.entries",
"def results(self, query=None, batch=True, b_size=10, b_start=0):\n # Disable theming for ajax requests\n if 'ajax' in self.request.form:\n del self.request.form['ajax']\n self.request.response.setHeader('X-Theme-Disabled', 'True')\n\n if query is None:\n query = {}\n\n query['b_start'] = b_start = int(b_start)\n query['b_size'] = b_size\n query = self.filter_query(query)\n\n if query is None:\n results = []\n else:\n query.update({'qt': 'hlsearch'});\n catalog = getToolByName(self.context, 'portal_catalog')\n try:\n results = catalog(**query)\n except ParseError:\n logger.exception('Exception while searching')\n return []\n except SolrException:\n logger.exception('Exception while searching')\n return []\n\n self.solr_response = results\n results = IContentListing(results)\n if batch:\n results = Batch(results, b_size, b_start)\n return results",
"def search_ldap(l, filter, base, scope=ldap.SCOPE_SUBTREE, attributes=None, accountname=None, DEBUG=False):\r\n if '%s' in filter:\r\n query = filter % accountname\r\n else:\r\n query = filter\r\n if DEBUG:\r\n warning(\"searching for user '%s' in base: %s. retrieve attributes: %s, scope: %s\"%(accountname, base, attributes, scope))\r\n warning('Filter string: %s'%(query,))\r\n try:\r\n ldap_result_id = l.search(base, scope, query, attributes)\r\n if DEBUG:\r\n warning('ldap_result_id: %s'%ldap_result_id)\r\n result_set = llist()\r\n result_type, result_data = l.result(ldap_result_id, 0)\r\n if DEBUG:\r\n warning('len of result_data: %d'%len(result_data))\r\n while result_type == ldap.RES_SEARCH_ENTRY:\r\n result_data = result_data[0]\r\n #data = ( result_data[0] , { i:result_data[1][i] for i in result_data[1] } )\r\n user_data = ldict({i: result_data[1][i][0] if len(result_data[1][i])==1 else result_data[1][i] for i in result_data[1]})\r\n user_data['dn'] = result_data[0]\r\n if isinstance(user_data['dn'], list):\r\n user_data['dn'] = user_data['dn'][0]\r\n\r\n result_set.append(user_data)\r\n result_type, result_data = l.result(ldap_result_id, 0)\r\n if DEBUG:\r\n warning('len of result_data: %d'%len(result_data))\r\n\r\n return result_set\r\n\r\n except ldap.LDAPError, e:\r\n print e\r\n return None",
"def get_list_of_results(self):\n return self.__result_list",
"def _process_ldap_info_for_all_users(self, result_data):\n results = []\n logger.debug(\" LDAP.py _process_ldap_info_for_all_users result_data %s \"\n % (result_data))\n for ldapentry in result_data:\n logger.debug(\" LDAP.py _process_ldap_info_for_all_users \\\n ldapentry name : %s \" % (ldapentry[1]['uid'][0]))\n tmpname = ldapentry[1]['uid'][0]\n hrn = self.authname + \".\" + tmpname\n\n tmpemail = ldapentry[1]['mail'][0]\n if ldapentry[1]['mail'][0] == \"unknown\":\n tmpemail = None\n\n try:\n results.append({\n 'type': 'user',\n 'pkey': ldapentry[1]['sshPublicKey'][0],\n #'uid': ldapentry[1]['uid'][0],\n 'uid': tmpname ,\n 'email':tmpemail,\n #'email': ldapentry[1]['mail'][0],\n 'first_name': ldapentry[1]['givenName'][0],\n 'last_name': ldapentry[1]['sn'][0],\n #'phone': 'none',\n 'serial': 'none',\n 'authority': self.authname,\n 'peer_authority': '',\n 'pointer': -1,\n 'hrn': hrn,\n })\n except KeyError, error:\n logger.log_exc(\"LDAPapi.PY \\t LdapFindUser EXCEPTION %s\"\n % (error))\n return\n\n return results",
"def fetch_search_results (self, search_str, list_from=0, list_to=10):\n # properly encode the search string\n encoded_search_string = quote(search_str)\n\n paths = [\n ['search', encoded_search_string, 'titles', {'from': list_from, 'to': list_to}, ['summary', 'title']],\n ['search', encoded_search_string, 'titles', {'from': list_from, 'to': list_to}, 'boxarts', '_342x192', 'jpg'],\n ['search', encoded_search_string, 'titles', ['id', 'length', 'name', 'trackIds', 'requestId']],\n ['search', encoded_search_string, 'suggestions', 0, 'relatedvideos', {'from': list_from, 'to': list_to}, ['summary', 'title']],\n ['search', encoded_search_string, 'suggestions', 0, 'relatedvideos', {'from': list_from, 'to': list_to}, 'boxarts', '_342x192', 'jpg'],\n ['search', encoded_search_string, 'suggestions', 0, 'relatedvideos', ['id', 'length', 'name', 'trackIds', 'requestId']]\n ]\n response = self._path_request(paths=paths)\n return self._process_response(response=response, component='Search results')",
"def get_results(self, ids):\n self.join()\n return [self.results[id] for id in ids]",
"def getAllResults(self, query, category=\"\", start=0, limit=50, sort='excerpt', page=None,\n dir='ASC'):\n facade = self._getFacade()\n results = facade.getSearchResults(query, category, resultSorter=None,\n start=start,\n limit=limit,\n sort=sort,\n dir=dir)\n return {'results': Zuul.marshal(results['results']),\n 'total': results['total']}",
"def uniqueResults( self, results ):\n rid_map = {}\n for r in results:\n rid_map[r.getRID()] = r\n return rid_map.values()",
"def get_results(self, ruleset=None, contact_field=None, segment=None):\n params = self._build_params(ruleset=ruleset, contact_field=contact_field, segment=segment)\n return Result.deserialize_list(self._get_all('results', params))",
"def merge_cached_results(*results):\r\n if len(results) == 1:\r\n return list(results[0])\r\n\r\n #make sure the sorts match\r\n sort = results[0].query._sort\r\n assert(all(r.query._sort == sort for r in results[1:]))\r\n\r\n def thing_cmp(t1, t2):\r\n for i, s in enumerate(sort):\r\n #t1 and t2 are tuples of (fullname, *sort_cols), so we can\r\n #get the value to compare right out of the tuple\r\n v1, v2 = t1[i + 1], t2[i + 1]\r\n if v1 != v2:\r\n return cmp(v1, v2) if isinstance(s, asc) else cmp(v2, v1)\r\n #they're equal\r\n return 0\r\n\r\n all_items = []\r\n for r in results:\r\n r.fetch()\r\n all_items.extend(r.data)\r\n\r\n #all_items = Thing._by_fullname(all_items, return_dict = False)\r\n return [i[0] for i in sorted(all_items, cmp = thing_cmp)]",
"def get_results_for(t_client, search_q):\n results = t_client.search(q=\"#\"+search_q)\n\n # This can be refactored\n return [\n {\n \"author\": \"@%s\" % t.from_user,\n \"text\": t.text,\n \"id\": t.id,\n \"date_h\": t.created_at.strftime(\"%H:%M:%S %d/%m/%Y\"),\n \"date\": time.mktime(t.created_at.timetuple()),\n } for t in results\n ]",
"def printSearchResults(results):\n Log.Debug('Search produced %d results:' % len(results))\n index = 0\n for result in results:\n Log.Debug(' ... %d: id=\"%s\", name=\"%s\", year=\"%s\", score=\"%d\".' %\n (index, result.id, result.name, str(result.year), result.score))\n index += 1",
"def get_results(self, nb_results=1000):\n\t\tdocs = self.searcher.search(self.constrained_query.build(), nb_results).scoreDocs\n\t\tself.constrained_query = BooleanQuery.Builder()\n\n\t\thits = []\n\t\tfor i in range(len(docs)):\n\t\t\thits.append({})\n\t\t\tfor field in self.reader.document(docs[i].doc).getFields():\n\t\t\t\thits[i][field.name()] = field.stringValue()\n\n\t\thits = self.remove_duplicates(hits)\n\t\treturn hits",
"def list_all():\n\n members = ldapi.search(ld, cfg['ldap_users_base'], '(objectClass=member)')\n return dict([(member[0], member[1]) for member in members])",
"def fetchall(self):\n return list(self._results)",
"def get_pages_from_search_results(results):\n pages = []\n for idpage in results['query']['pageids']:\n page = results['query']['pages'][idpage]\n pages.append(page)\n return pages",
"def process_results(self, response, results):\n return results",
"def process_results(self, response, results):\n return results",
"def get_organism_names(results):\r\n\r\n organism_names = []\r\n\r\n for result in results:\r\n organism_names.append(result)\r\n\r\n return organism_names",
"def list(self):\n return self.results_list",
"def filter_results(qry):\n result = []\n\n # check if qry is a list (multiple records) or not (single record)\n if type(qry) != list:\n record = make_ndb_return_data_json_serializable(qry)\n return(record)\n\n for q in qry:\n result.append(make_ndb_return_data_json_serializable(q))\n\n return(result)",
"def search_ldap(connection, search_base, attrlist):\n if (connection and search_base):\n if (attrlist):\n ldap_result = connection.search_s(search_base, ldap.SCOPE_SUBTREE, attrlist=attrlist)\n else:\n ldap_result = connection.search_s(search_base, ldap.SCOPE_SUBTREE) \n else:\n print \"Error: search_ldap: Connection object or search base argument given was not valid.\"\n print\n sys.exit(1)\n\n return ldap_result",
"def addMultiResults(self, results, index):\n # if no return from site, seed the results with an empty list\n if results is None or len(results) == 0:\n self._results[index] = None\n else:\n self._results[index] = results",
"def ldap_search(self, ldapfilter):\n # Determine the scope value\n if self.args.recursive:\n scope = ldap.SCOPE_SUBTREE\n else:\n scope = ldap.SCOPE_ONELEVEL\n \n # Search ldap for results\n try:\n self.searchresult = self.ldapobj.search_s(self.args.basedn, scope, ldapfilter)\n except ldap.REFERRAL as ex:\n print >> sys.stderr, \"Error: LDAP referral received. Is the basedn correct?\"\n sys.exit(1)\n except ldap.INVALID_CREDENTIALS:\n print >> sys.stderr, \"Error: Invalid credentials\"\n sys.exit(1)\n except Exception as ex:\n print ex.__class__.__name__\n finally:\n self.ldapobj.unbind_s()",
"def get_search_results(self):\n return self.get_list_of_names(self.SEARCH_RESULTS)",
"def extractSearchResults(self, html):\n results = list()\n soup = BeautifulSoup(html, 'html.parser')\n div = soup.find('div', id='main')\n if (type(div) == types.NoneType):\n div = soup.find('div', id='center_col')\n if (type(div) == types.NoneType):\n div = soup.find('body')\n if (type(div) != types.NoneType):\n lis = div.findAll('a')\n if(len(lis) > 0):\n for link in lis:\n if (type(link) == types.NoneType):\n continue\n \n url = link['href']\n if url.find(\".google\") > 6:\n continue\n \n url = self.extractUrl(url)\n if(cmp(url, '') == 0):\n continue\n title = link.renderContents()\n title = re.sub(r'<.+?>', '', title)\n result = SearchResult()\n result.setURL(url)\n print '### URL: ' + url\n result.setTitle(title)\n span = link.find('div')\n if (type(span) != types.NoneType):\n content = span.renderContents()\n content = re.sub(r'<.+?>', '', content)\n result.setContent(content)\n results.append(result)\n return results"
] | [
"0.5853295",
"0.5794549",
"0.5777972",
"0.5770905",
"0.5710312",
"0.57052636",
"0.56758606",
"0.56412905",
"0.5539216",
"0.5504885",
"0.5490016",
"0.54144686",
"0.53994405",
"0.5384345",
"0.53460926",
"0.53145885",
"0.53108484",
"0.5308784",
"0.5290834",
"0.52842665",
"0.5256991",
"0.5256991",
"0.525647",
"0.52504486",
"0.5242188",
"0.5228327",
"0.5220341",
"0.5213927",
"0.5211474",
"0.51974434"
] | 0.7374579 | 0 |
Look for transaction receipt, only raise not found error if they are missing for longer than two minutes. | async def _check_transaction_receipt(self, tx_hash: str, timestamp: int):
async_scheduler: AsyncCallScheduler = AsyncCallScheduler.shared_instance()
try:
return await async_scheduler.call_async(self._w3.eth.getTransactionReceipt, tx_hash)
except TransactionNotFound as e:
now: float = time.time()
if now - timestamp > 120:
stop_tx_hash = e.args[0].split(" ")[3]
self._stop_tx_tracking(stop_tx_hash)
self.logger().info(f"Stopped tracking transaction with hash: {stop_tx_hash}.")
return None | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wait_for_receipt(tx_hash, duration=C.EVM_TIMEOUT):\n slept = 0\n tx_rcpt = None\n\n while slept < duration:\n # because web3 throws if not present vs returning None (like the docs say)\n try:\n tx_rcpt = g.w3.eth.getTransactionReceipt(tx_hash)\n except TransactionNotFound:\n tx_rcpt = None\n current_app.logger.info(f'Transaction Receipt not ready after {slept} seconds, sleeping...')\n except:\n tx_rcpt = None\n current_app.logger.info(f'Unexpected error looking up transaction after {slept} seconds, sleeping...')\n\n if tx_rcpt != None:\n break\n slept = slept + C.TRANSACTION_RETRY\n sleep(C.TRANSACTION_RETRY)\n\n if tx_rcpt == None:\n current_app.logger.info(C.TRANSACTION_TIMEOUT % price_and_time[1])\n raise Exception(C.TRANSACTION_TIMEOUT % price_and_time[1])\n else:\n current_app.logger.info(C.TRANSACTION_MINED, tx_rcpt['transactionHash'])\n return g.w3.toHex(tx_rcpt['transactionHash'])",
"async def check_transaction_receipts(self):\n async_scheduler: AsyncCallScheduler = AsyncCallScheduler.shared_instance()\n tasks = [self._check_transaction_receipt(tx_hash, self._pending_tx_dict[tx_hash]['timestamp'])\n for tx_hash in self._pending_tx_dict.keys()]\n transaction_receipts: List[AttributeDict] = [tr for tr in await safe_gather(*tasks)\n if (tr is not None and tr.get(\"blockHash\") is not None)]\n block_hash_set: Set[HexBytes] = set(tr.blockHash for tr in transaction_receipts)\n fetch_block_tasks = [async_scheduler.call_async(self._w3.eth.getBlock, block_hash)\n for block_hash in block_hash_set]\n blocks: Dict[HexBytes, AttributeDict] = dict((block.hash, block)\n for block\n in await safe_gather(*fetch_block_tasks)\n if block is not None)\n\n for receipt in transaction_receipts:\n # Emit gas used event.\n tx_hash: str = receipt.transactionHash.hex()\n gas_price_wei: int = self._pending_tx_dict[tx_hash]['gas_price']\n gas_used: int = receipt.gasUsed\n gas_eth_amount_raw: int = gas_price_wei * gas_used\n\n if receipt.blockHash in blocks:\n block: AttributeDict = blocks[receipt.blockHash]\n\n if receipt.status == 0:\n self.logger().warning(f\"The transaction {tx_hash} has failed.\")\n self.trigger_event(WalletEvent.TransactionFailure, tx_hash)\n\n self.trigger_event(WalletEvent.GasUsed, EthereumGasUsedEvent(\n float(block.timestamp),\n tx_hash,\n float(gas_price_wei * 1e-9),\n gas_price_wei,\n gas_used,\n float(gas_eth_amount_raw * 1e-18),\n gas_eth_amount_raw\n ))\n\n # Stop tracking the transaction.\n self._stop_tx_tracking(tx_hash)",
"def get_receipt(tx_hash, url, retry=8):\n while retry > 0:\n receipt = rpc_request('getTransactionReceipt', [tx_hash], url)\n if receipt is not None:\n return receipt\n time.sleep(4)\n retry -= 1",
"def wait_for_receipt(self, txn_hash, timeout=120, poll_latency=0.1):\n return self.web3.eth.waitForTransactionReceipt(txn_hash, timeout, poll_latency)",
"def expired_receipt(self):\n return self._expired_receipt",
"def test_extract_receipt(self):\n\n # Test IAP Response without in_app list\n request = Request('DummyReceipt', use_production=True)\n ext_receipt = request._extract_receipt(self.iap_response)\n\n assert ext_receipt['status'] == 0 # 0 is normal\n assert ext_receipt['receipt']['product_id'] == 'TestProduction1'\n assert ext_receipt['receipt']['original_transaction_id'] == '1000000012345678' # original transaction id\n assert ext_receipt['receipt']['quantity'] == '1' # check quantity\n\n # Test IAP Response with in_app list\n request = Request('DummyReceipt', use_production=True)\n ext_receipt = request._extract_receipt(self.iap_response_in_app)\n\n assert ext_receipt['status'] == 0 # 0 is normal\n assert ext_receipt['receipt']['product_id'] == 'org.itunesiap'\n assert ext_receipt['receipt']['original_transaction_id'] == '1000000155718067' # original transaction id\n assert ext_receipt['receipt']['quantity'] == '1' # check quantity",
"def _verify_transaction_record_written(self, purchase_table_input: Dict, refund_table_input: Dict, error_table_input: Dict):\n client = boto3.client(\"dynamodb\")\n for transaction_item in purchase_table_input:\n response = client.get_item(\n Key={\n \"TransactionId\": {\n \"S\": transaction_item[\"TransactionId\"],\n },\n },\n TableName=self.transaction_table_purchase,\n )\n self.assertTrue(\n \"Item\" in response,\n f'Cannot find transaction record with id {transaction_item[\"TransactionId\"]}',\n )\n item = response[\"Item\"]\n self.assertDictEqual(item[\"Message\"], {\"S\": transaction_item[\"Message\"]})\n self.assertDictEqual(item[\"Timestamp\"], {\"S\": transaction_item[\"Timestamp\"]})\n self.assertDictEqual(item[\"Type\"], {\"S\": transaction_item[\"Type\"]})\n\n for transaction_item in refund_table_input:\n response = client.get_item(\n Key={\n \"TransactionId\": {\n \"S\": transaction_item[\"TransactionId\"],\n },\n },\n TableName=self.transaction_table_refund,\n )\n self.assertTrue(\n \"Item\" in response,\n f'Cannot find transaction record with id {transaction_item[\"TransactionId\"]}',\n )\n item = response[\"Item\"]\n self.assertDictEqual(item[\"Message\"], {\"S\": transaction_item[\"Message\"]})\n self.assertDictEqual(item[\"Timestamp\"], {\"S\": transaction_item[\"Timestamp\"]})\n self.assertDictEqual(item[\"Type\"], {\"S\": transaction_item[\"Type\"]})\n\n for transaction_item in error_table_input:\n response = client.get_item(\n Key={\n \"TransactionId\": {\n \"S\": transaction_item[\"TransactionId\"],\n },\n },\n TableName=self.transaction_table_error,\n )\n self.assertTrue(\n \"Item\" in response,\n f'Cannot find transaction record with id {transaction_item[\"TransactionId\"]}',\n )\n item = response[\"Item\"]\n self.assertDictEqual(item[\"Message\"], {\"S\": transaction_item[\"Message\"]})\n self.assertDictEqual(item[\"Timestamp\"], {\"S\": transaction_item[\"Timestamp\"]})\n self.assertDictEqual(item[\"Type\"], {\"S\": transaction_item[\"Type\"]})",
"def search_UI_transaction_bigger_before_day(account):\n\t_day = read_day()\n\t_amount = read_amount()\n\tfound = search_transaction_bigger_before_day(account, _day, _amount, print_transaction)\n\tif (not found):\n\t\tprint('Nu exista nici o tranzactie efectuata inainte de ziua', \\\n\t\t\t\t\"%d cu suma mai mare de %f\" % (_day, _amount))",
"def check_missed_job_completion_notifications(self):\n logger.info(\"Checking for missed job completion notifications\")\n #ten_min_ago = int((time.time() - 600) * 1e6)\n operating = self.instances.find({\n #'mtime': {'$lt': ten_min_ago},\n 'operation' : {'$exists': True, '$ne': None}\n })\n\n for fix_doc in operating:\n service = self.axops_client.get_service(fix_doc['operation']['id'])\n if ServiceStatus.completed(service['status']):\n # Keep this consistent with expectation in process_action_result() and axops/service/service.go\n payload = {\n \"id\": service['id'],\n \"name\": service['name'],\n \"status\": service['status'],\n \"annotations\": service.get('annotations', {}),\n \"user\": service['user']\n }\n try:\n logger.info(\"Detected missed job notification: %s\", payload)\n self.process_action_result(payload)\n except Exception:\n logger.exception(\"Failed to process completion event\")",
"def test_GET_receipt_by_id(self):\n\t\tself.POST_receipt()\n\t\t# verify receipt data matches list_data and that date set\n\t\tlist_data = self.GET_data('/api/list/search?_id=' + self.list_id + '&populate_rooms=true')[0]\n\t\treceipt_data = self.GET_data('/api/receipt/' + self.receipt_id)\n\n\t\tself.assertEqual(list_data['_id'], receipt_data['_list'])\n\t\tself.assertDataMatch(list_data, receipt_data, ['_cleaner', 'phonenumber', 'notes', 'price','location'])\n\n\t\tself.assertTrue('date' in receipt_data)\n\t\tself.assertTrue(dateutil.parser.parse(receipt_data['date']) > datetime.now())\n\n\t\t# for each room in list_data and receipt_data, assert they match\n\t\tself.assertEqual(len(list_data['rooms']), len(receipt_data['rooms']))\n\t\tnum_rooms = len(list_data['rooms'])\n\n\t\tfor r in range(num_rooms):\n\t\t\tself.assertEqual(list_data['rooms'][r]['name'], receipt_data['rooms'][r]['name'])\n\t\t\tself.assertEqual(len(list_data['rooms'][r]['tasks']), len(receipt_data['rooms'][r]['tasks']))\n\t\t\tfor t in range(len(list_data['rooms'][r]['tasks'])):\n\t\t\t\tself.assertEqual(list_data['rooms'][r]['tasks'][t], receipt_data['rooms'][r]['tasks'])\n\n\t\t# verify receipt.cleaner is filled in public cleaner\n\t\tcleaner_data = self.GET_data('/api/cleaner/' + receipt_data['_cleaner'])\n\t\tself.assertEqual(cleaner_data['name'], receipt_data['cleaner']['name'])\n\t\tself.assertEqual(cleaner_data['phonenumber'], receipt_data['cleaner']['phonenumber'])\n\t\tself.assertTrue('hashed_pwd' not in receipt_data['cleaner'])\n\n\t\t# delete receipt's parent list and assert receipt not deleted and receipt._list is null\n\t\tself.DELETE('/api/list/' + self.list_id)\n\t\treceipt_data = self.GET_data('/api/receipt/' + self.receipt_id)\n\t\tself.assertNotEqual(None, receipt_data)\n\t\tself.assertEqual(receipt_data['_list'], None)",
"def check_any_issue_needs_reminder(self, search_timedelta: datetime, records: List[EventRecord]) -> bool:\n fingerprints = [record.fingerprint for record in records]\n with self.session.begin() as session:\n timestamps: List[datetime] = (\n session.query(sqlalchemy.sql.expression.func.max(EventRecord.sent_at))\n .filter(EventRecord.fingerprint.in_(fingerprints) & EventRecord.sent_at.isnot(None))\n .group_by(EventRecord.fingerprint)\n .all()\n )\n if timestamps:\n return max(timestamps)[0] <= datetime.utcnow() - search_timedelta\n\n return False",
"def test_add_receipts(driver):\n print(\"-\"*80)\n print(\"Test: Adding a receipt\")\n print(\"-\"*80)\n\n driver = driver\n time.sleep(1)\n old_receipts = list(get_all_receipts(driver))\n m, a = add_receipts(driver)\n\n\n if DEBUG>=2:\n driver.refresh()\n time.sleep(1)\n new_receipts = list(get_all_receipts(driver))\n\n\n if len(old_receipts) + 1 != len(new_receipts):\n print(\"old_receipts={}\\n>> new_receipts={}\"\n .format(old_receipts, new_receipts))\n return -1\n found = False\n for rs in new_receipts:\n if str(rs['merchant']) == str(m) and str(rs['amount']) == str(a):\n found = True\n break\n elif DEBUG:\n print(\"Found (but not testing):\", rs)\n\n if not found:\n print(\n \"ERROR: I don't see the receipt I just inserted with \\n\"\n \"merchant={!r} and amount={!r}\".format(m, a)\n )\n return -1\n print(\"Success!!!\")\n print('<>'*40 + '\\n')\n return 0",
"def user_scans_get_receipt_status(self,\n user_id,\n receipt_id):\n # The base uri for api requests\n query_builder = Configuration.BASE_URI\n \n # Prepare query string for API call\n query_builder += \"/v1/users/{user_id}/receipt/{receipt_id}\"\n\n # Process optional template parameters\n query_builder = APIHelper.append_url_with_template_parameters(query_builder, { \n \"user_id\": user_id,\n \"receipt_id\": receipt_id\n })\n\n # Process optional query parameters\n query_parameters = {\n \"client_id\": self.__client_id,\n \"client_secret\": self.__client_secret\n }\n query_builder = APIHelper.append_url_with_query_parameters(query_builder, query_parameters)\n\n # Validate and preprocess url\n query_url = APIHelper.clean_url(query_builder)\n\n # Prepare headers\n headers = {\n \"user-agent\": \"IAMDATA V1\",\n \"accept\": \"application/json\"\n }\n\n # Prepare and invoke the API call request to fetch the response\n response = unirest.get(query_url, headers=headers)\n\n # Error handling using HTTP status codes\n if response.code == 400:\n raise APIException(\"Bad request\", 400, response.body)\n\n elif response.code == 401:\n raise APIException(\"Unauthorized\", 401, response.body)\n\n elif response.code == 500:\n raise APIException(\"Internal Server Error\", 500, response.body)\n\n elif response.code < 200 or response.code > 206: # 200 = HTTP OK\n raise APIException(\"HTTP Response Not OK\", response.code, response.body)\n \n # Try to cast response to desired type\n if isinstance(response.body, dict):\n # Response is already in a dictionary, return the object \n return UploadReceiptStatusWrapper(**response.body)\n \n # If we got here then an error occured while trying to parse the response\n raise APIException(\"Invalid JSON returned\", response.code, response.body)",
"def verify_receipt(receipt_data, user=None):\n #data = json.dumps({'receipt-data': '{' + receipt_data + '}'})\n data = '{{\\n \"receipt-data\" : \"{}\" \\n}}'.format(receipt_data)\n\n def verify(url):\n tries = 3\n for try_ in range(1, tries + 1):\n try:\n req = urllib2.Request(url, data)\n resp = urllib2.urlopen(req, timeout=18) # app timeout is supposed to be 60\n return json.loads(resp.read())\n except (urllib2.URLError, socket_error) as e:\n if try_ == tries:\n raise e\n\n cleaned_data = verify(settings.IAP_VERIFICATION_URL)\n\n # See: http://developer.apple.com/library/ios/#technotes/tn2259/_index.html\n if cleaned_data['status'] == 21007:\n cleaned_data = verify(settings.IAP_VERIFICATION_SANDBOX_URL)\n\n if cleaned_data['status'] != 0:\n extra = {'status': cleaned_data['status']}\n if user is not None and user.is_authenticated():\n extra['username'] = user.username\n extra['response_from_apple'] = json.dumps(cleaned_data)\n client.captureMessage('IAP receipt validation failed', extra=extra)\n raise ValidationError(\"Your purchase went through, but there was an error processing it. Please contact support: [email protected]\")\n\n return cleaned_data['receipt']",
"def validate_receipt_data(self, receipt_data):\n try:\n self._receipt_info = subscriptions.validate_apple_receipt(\n receipt_data\n )\n except subscriptions.ReceiptException as e:\n raise serializers.ValidationError(code=e.code, detail=e.msg)\n\n return receipt_data",
"def _timeout(self):\n if self._store_timeout > 0 and (not self._messages.empty()):\n \n # Update Timestamp\n timestamp = 0\n t = datetime.datetime.today()\n timestamp = t.microsecond/1000 + t.second*1000 + \\\n t.minute*60*1000 + t.hour*60*60*1000 + t.day*24*60*60*1000\n while timestamp > 4294967295: timestamp -= 4294967295\n \n # Remove Timeout Messages\n while (not self._messages.empty()):\n msg_time = self._messages.queue[0][0]\n if (timestamp - msg_time >= self._store_timeout) or\\\n (timestamp < msg_time and 4294967295 - \\\n msg_time + timestamp >= self._store_timeout):\n logger.warning(\"%s: message store timeout occurred.\" %\\\n (self.__class__.__name__))\n self._messages.get()\n else:\n break",
"def check(self, txid=None, amount=None, confirmation_height=None):\n\n txs = self.node.listtransactions(label=self.label, count=10000, include_watchonly=True)\n current_height = self.node.getblockcount()\n assert_equal(len(txs), self.expected_txs)\n\n addresses = self.node.listreceivedbyaddress(minconf=0, include_watchonly=True, address_filter=self.address['address'])\n\n if self.expected_txs:\n assert_equal(len(addresses[0][\"txids\"]), self.expected_txs)\n\n if txid is not None:\n tx, = [tx for tx in txs if tx[\"txid\"] == txid]\n assert_equal(tx[\"label\"], self.label)\n assert_equal(tx[\"address\"], self.address[\"address\"])\n assert_equal(tx[\"amount\"], amount)\n assert_equal(tx[\"category\"], \"receive\")\n assert_equal(tx[\"label\"], self.label)\n assert_equal(tx[\"txid\"], txid)\n\n # If no confirmation height is given, the tx is still in the\n # mempool.\n confirmations = (1 + current_height - confirmation_height) if confirmation_height else 0\n assert_equal(tx[\"confirmations\"], confirmations)\n if confirmations:\n assert \"trusted\" not in tx\n\n address, = [ad for ad in addresses if txid in ad[\"txids\"]]\n assert_equal(address[\"address\"], self.address[\"address\"])\n assert_equal(address[\"amount\"], self.expected_balance)\n assert_equal(address[\"confirmations\"], confirmations)\n # Verify the transaction is correctly marked watchonly depending on\n # whether the transaction pays to an imported public key or\n # imported private key. The test setup ensures that transaction\n # inputs will not be from watchonly keys (important because\n # involvesWatchonly will be true if either the transaction output\n # or inputs are watchonly).\n if self.data != Data.priv:\n assert_equal(address[\"involvesWatchonly\"], True)\n else:\n assert_equal(\"involvesWatchonly\" not in address, True)",
"def transaction_receipt(request, rp_id, payment_transaction_id, rp_guid=None,\n template_name=\"recurring_payments/transaction_receipt.html\"):\n if request.user.is_authenticated():\n rp = get_object_or_404(RecurringPayment, pk=rp_id)\n # only admin or user self can access this page\n if not request.user.profile.is_superuser and request.user.id != rp.user.id:\n raise Http403\n else:\n if not rp_guid: raise Http403\n rp = get_object_or_404(RecurringPayment, pk=rp_id, guid=rp_guid)\n\n payment_transaction = get_object_or_404(PaymentTransaction,\n pk=payment_transaction_id,\n status=True)\n if rp.platform == 'authorizenet':\n payment_profile = PaymentProfile.objects.filter(\n payment_profile_id=payment_transaction.payment_profile_id)[0]\n else:\n payment_profile = ''\n invoice = payment_transaction.payment.invoice\n\n return render_to_response(template_name, {\n 'rp': rp,\n 'invoice': invoice,\n 'payment_transaction': payment_transaction,\n 'payment_profile': payment_profile\n },\n context_instance=RequestContext(request))",
"def work_order_receipt_lookup(self, worker_service_id,\n worker_id,\n requester_id,\n receipt_status, id=None):\n pass",
"def check_entry_timeout(self, trade, timestamp, timeout):\n if trade.is_entry_timeout(timestamp, timeout):\n trader = self.strategy.trader()\n trade.cancel_open(trader)\n\n self.strategy.notify_order(trade.id, trade.dir, self.instrument.market_id, self.instrument.format_price(trade.entry_price),\n timestamp, trade.timeframe, 'cancel', None, self.instrument.format_price(trade.sl), self.instrument.format_price(trade.tp),\n comment='timeout')\n\n return True\n\n return False",
"def find_recovery_on_volume(volume):\n return _find(volume, RECOVERY_ON_VOLUME_TABLE)",
"def monitor_transactions(account):\n start_time = datetime.datetime.now()\n logger.info(\n format_log_message(\n 'Looking for new ripple transactions since last run'\n )\n )\n ledger_min_index = _get_min_ledger_index(account)\n marker = None\n has_results = True\n\n try:\n timeout = settings.RIPPLE_TIMEOUT\n except AttributeError:\n timeout = 5\n\n while has_results:\n try:\n response = account_tx(account,\n ledger_min_index,\n limit=PROCESS_TRANSACTIONS_LIMIT,\n marker=marker,\n timeout=timeout)\n except (RippleApiError, ConnectionError), e:\n logger.error(format_log_message(e))\n break\n\n transactions = response['transactions']\n marker = response.get('marker')\n has_results = bool(marker)\n\n for transaction in transactions:\n _store_transaction(account, transaction)\n\n transactions_timeout_reached = (\n datetime.datetime.now() - start_time >= datetime.timedelta(\n seconds=PROCESS_TRANSACTIONS_TIMEOUT\n )\n )\n\n if transactions_timeout_reached and has_results:\n has_results = False\n logger.error(\n 'Process_transactions command terminated because '\n '(%s seconds) timeout: %s',\n PROCESS_TRANSACTIONS_TIMEOUT, unicode(marker)\n )",
"def test_filter_transaction_by_receivers_failure(self):\n self._attempt_list_storage.gateway_transaction_exists.return_value = False\n self._map_storage.coin_address_exists.return_value = False\n transaction = Transaction(tx='723968', receivers=[self._not_gateway_managed_receiver])\n res = self._coin_transaction_consumer_impl.filter_transaction(transaction)\n self.assertFalse(res)\n self._map_storage.coin_address_exists.assert_called_once_with(self._not_gateway_managed_receiver.address)\n self._attempt_list_storage.find_by_trigger.assert_not_called()",
"def check_trade_timeout(self, trade, timestamp, profit_loss_rate=0.0):\n if trade.is_trade_timeout(timestamp) and trade.profit_loss > profit_loss_rate:\n trader = self.strategy.trader()\n trade.close(trader, self.instrument)\n\n self.strategy.notify_order(trade.id, trade.dir, self.instrument.market_id, self.instrument.format_price(trade.entry_price),\n timestamp, trade.timeframe, 'exit', None, self.instrument.format_price(trade.sl), self.instrument.format_price(trade.tp),\n comment='timeout')\n\n return True\n\n return False",
"def check(transaction):\n if not isinstance(transaction, Transaction):\n transaction = Transaction.objects.get(id=transaction)\n\n r = requests.post(\"https://www.blockonomics.co/api/searchhistory\",\n data=json.dumps({\"addr\": transaction.to_address}))\n try:\n history_data = json.loads(r.content.decode('utf-8'))['history'][0]\n except:\n return\n\n set_tx_details(history_data, transaction)",
"def work_order_receipt_lookup(self, worker_service_id,\n worker_id,\n requester_id,\n receipt_status, id=None):\n if worker_id is None or not is_hex(worker_id):\n logging.error(\"Worker id is empty or Invalid\")\n return create_jrpc_response(id, JsonRpcErrorCode.INVALID_PARAMETER,\n \"Worker id is empty or Invalid\")\n\n if worker_service_id is None or not is_hex(worker_service_id):\n logging.error(\"Worker service id is empty or Invalid\")\n return create_jrpc_response(id, JsonRpcErrorCode.INVALID_PARAMETER,\n \"Worker service id is empty or Invalid\")\n\n if requester_id is None or not is_hex(requester_id):\n logging.error(\"requester id is empty or Invalid\")\n return create_jrpc_response(id, JsonRpcErrorCode.INVALID_PARAMETER,\n \"requester id is empty or Invalid\")\n\n if not isinstance(receipt_status, ReceiptCreateStatus):\n logging.error(\"Invalid receipt status\")\n return create_jrpc_response(id, JsonRpcErrorCode.INVALID_PARAMETER,\n \"Invalid receipt status\")\n\n json_rpc_request = {\n \"jsonrpc\": \"2.0\",\n \"method\": \"WorkOrderReceiptLookUp\",\n \"id\": id,\n \"params\": {\n \"workerServiceId\": worker_service_id,\n \"workerId\": worker_id,\n \"requesterId\": requester_id,\n \"updateIndex\": receipt_status\n }\n }\n response = self.__uri_client._postmsg(json.dumps(json_rpc_request))\n return response",
"def _wait_for_confirmation(client, transaction_id, timeout):\n start_round = client.status()[\"last-round\"] + 1\n current_round = start_round\n\n while current_round < start_round + timeout:\n try:\n pending_txn = client.pending_transaction_info(transaction_id)\n except Exception:\n return\n if pending_txn.get(\"confirmed-round\", 0) > 0:\n return pending_txn\n elif pending_txn[\"pool-error\"]:\n raise Exception(\"pool error: {}\".format(pending_txn[\"pool-error\"]))\n client.status_after_block(current_round)\n current_round += 1\n raise Exception(\n \"pending tx not found in timeout rounds, timeout value = : {}\".format(timeout)\n )",
"def list_transactions_when_exists(\n self,\n account_ids: List[str] = None,\n payment_order_ids: List[str] = None,\n payee_ids: List[str] = None,\n direction: TransactionDirection = None,\n statuses: List[TransactionStatus] = None,\n value_timestamp_range: Dict[str, datetime] = None,\n booking_timestamp_range: Dict[str, datetime] = None,\n last_update_timestamp_range: Dict[str, datetime] = None,\n charge_amount_value_range: Dict[str, str] = None,\n order_by: List[TransactionOrderBy] = None,\n max_retry_seconds: int = DEFAULT_RETRY_SECONDS,\n retry_interval_seconds: int = DEFAULT_RETRY_INTERVAL\n ) -> TransactionsList:\n end_seconds = time.time() + max_retry_seconds\n while time.time() <= end_seconds:\n transactions = self.list_transactions(\n account_ids,\n payment_order_ids,\n payee_ids,\n direction,\n statuses,\n value_timestamp_range,\n booking_timestamp_range,\n last_update_timestamp_range,\n charge_amount_value_range,\n order_by,\n )\n if len(transactions) == 0:\n if time.time() + retry_interval_seconds <= end_seconds:\n break\n log.debug(\"Cannot find any transactions, retrying...\")\n time.sleep(retry_interval_seconds)\n continue\n return transactions\n log.debug(\"Failed to find any transactions after waiting\")\n raise TransactionsNotFoundError(\n \"Cannot find any transactions for the list criteria used\"\n )",
"def test_filter_transaction_exists(self):\n\n with patch.object(self._coin_transaction_consumer_impl, \"_filter_receivers\"):\n self._attempt_service.gateway_transaction_exists.return_value = True\n transaction = Transaction(tx='723968', receivers=[self._gateway_managed_receiver])\n res = self._coin_transaction_consumer_impl.filter_transaction(transaction)\n self.assertFalse(res)\n cast(MagicMock, self._coin_transaction_consumer_impl._filter_receivers).assert_not_called()",
"def work_order_receipt_retrieve(self, work_order_id, id=None):\n pass"
] | [
"0.6613937",
"0.6224734",
"0.5850289",
"0.56903857",
"0.5246986",
"0.52447987",
"0.51985824",
"0.51980007",
"0.5125841",
"0.5107872",
"0.50937873",
"0.50544405",
"0.4955215",
"0.49193367",
"0.49190685",
"0.48734447",
"0.48641413",
"0.48391086",
"0.48348594",
"0.483187",
"0.48214814",
"0.47970363",
"0.47957173",
"0.47753078",
"0.4768679",
"0.47103912",
"0.47082114",
"0.46963185",
"0.4691181",
"0.4680881"
] | 0.7078107 | 0 |
Test case for add_asset_share_feed | def test_add_asset_share_feed(self):
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_add_assets_signal(self):\n story = create_story(title=\"Test Story\", summary=\"Test Summary\",\n byline=\"Test Byline\", status='published')\n asset = create_html_asset(type='text', title='Test Asset', \n body='Test content')\n self.assertEqual(story.assets.count(), 0)\n story.featured_assets.add(asset)\n story.save()\n self.assertEqual(story.assets.count(), 1)",
"def test_dashboards_v2_share(self):\n pass",
"def test_submit_asset_to_submission_service(self):\n pass",
"def test_set_share(self):\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='gsiftp://source',\n destination='gsiftp://nowhere',\n vo='dteam',\n share=80\n ),\n status=200\n )",
"def test_already_added_asset(self):\n # Create a story\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n # create a HtmlAsset\n asset = HtmlAsset()\n asset.save()\n translation = HtmlAssetTranslation(title='Test Asset', asset=asset)\n translation.save()\n # assign the asset to the story\n story.assets.add(asset)\n story.save()\n # confirm the asset is added to the story\n self.assertTrue(asset in story.assets.select_subclasses())\n # create a Section\n section = create_section(title=\"Test Section 1\", story=story)\n # Assign the asset to the section\n section_asset = SectionAsset(section=section, asset=asset, weight=0)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(asset in section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(asset in story.assets.select_subclasses())",
"def test_add_category_to_asset(self):\n pass",
"def test_edit_share(self, client):\n user = UserFactory(email='[email protected]')\n client = self.get_auth_client(user)\n\n i1 = InstitutionFactory.get_institution()\n item = ItemFactory.get_item(user=user, institution=i1)\n a1 = AccountFactory.get_account(item=item, type=Account.CREDIT)\n\n i2 = Institution.objects.filter(~Q(plaid_id=i1.plaid_id)).first()\n item = ItemFactory.get_item(user=user, institution=i2)\n a2 = AccountFactory.get_account(item=item, type=Account.CREDIT)\n\n url = '/v1/accounts/edit_share'\n\n # ids not match\n dic = {\n 'id100': 50,\n 'id200': 50\n }\n data = json.dumps(dic)\n response = client.put(url, data, content_type='application/json')\n assert response.status_code == 400\n\n return\n\n # The total sum not equal to 100\n dic = {\n 'id{}'.format(a1.id): 10,\n 'id{}'.format(a2.id): 20\n }\n data = json.dumps(dic)\n response = client.put(url, data, content_type='application/json')\n assert response.status_code == 400\n\n # success\n dic = {\n 'id{}'.format(a1.id): 50,\n 'id{}'.format(a2.id): 50\n }\n data = json.dumps(dic)\n response = client.put(url, data, content_type='application/json')\n assert response.status_code == 200\n\n a1.refresh_from_db()\n a2.refresh_from_db()\n\n assert a1.transfer_share == 50\n assert a1.transfer_share == 50",
"def test_dashboards_v2_list_shares(self):\n pass",
"def test_import_test_asset(self):\n pass",
"def test_update_test_asset_content(self):\n pass",
"def test_update_asset_content(self):\n pass",
"def test_get_test_asset(self):\n pass",
"def test_already_added_asset(self):\n # assign the asset to the story\n self.story.assets.add(self.asset)\n self.story.save()\n # confirm the asset is added to the story\n self.assertTrue(self.asset in self.story.assets.select_subclasses())\n # Assign the asset to the section\n container = Container.objects.get(name='left')\n section_asset = SectionAsset(section=self.section, asset=self.asset, container=container)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())",
"def test_create_system_asset(self):\n pass",
"def test_add_new_asset(self):\n self.assertEqual(self.all_assets.count(), 1)\n new_asset = Asset(asset_code=\"IC002\",\n serial_number=\"SN0045\",\n model_number=self.test_assetmodel,\n assigned_to=self.user)\n new_asset.save()\n self.assertEqual(self.all_assets.count(), 2)",
"def test_auto_add_assets_to_story(self):\n # Confirm that the story has no assets\n self.assertEqual(self.story.assets.count(), 0)\n # Assign the asset to the section\n container = Container.objects.get(name='left')\n section_asset = SectionAsset(section=self.section, asset=self.asset, container=container)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(self.asset in self.section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(self.asset in self.story.assets.select_subclasses())",
"def test_update_test_asset(self):\n pass",
"def test_share_00(self, mocker):\n contributors = ['[email protected]:user:writer', '[email protected]:user:reader']\n g = GSheets(self.fake.file_path(depth=1, category=None, extension='json'), contributors)\n g.spreadsheet = Spreadsheet(None, None)\n g.spreadsheet.share = mocker.MagicMock()\n\n g.share()\n\n assert g.spreadsheet.share.call_count == len(contributors)",
"def publish_asset(\n self,\n *,\n asset_id: str,\n asset_manifest_path: str,\n asset_selector: str,\n asset_type: \"AssetType\",\n ) -> None:\n ...",
"def test_new_share(self):\n \n test_user_with_checkpoint = self.create_saved_test_user_with_checkpoint()\n another_test_user_to_share = self.create_saved_test_user()\n \n data = {\"user_id\": test_user_with_checkpoint.user_obj.id,\n \"to_user_id\": another_test_user_to_share.user_obj.id,\n \"signature\": gen_signature(\"put\",\n \"share\",\n gen_api_key(test_user_with_checkpoint.user_obj.access_token, \n test_user_with_checkpoint.user_obj.id)),\n \"user_checkpoint_id\": test_user_with_checkpoint.user_checkpoint_obj.id\n }\n \n resp = self.client.put(\"/share/\", data=data)\n assert \"ok\" in resp.data\n assert not get_share_w_attr(test_user_with_checkpoint.user_obj, \n another_test_user_to_share.user_obj, \n test_user_with_checkpoint.user_checkpoint_obj) is None",
"def test_update_asset(self):\n pass",
"def test_share_01(self, mocker):\n contributors = ['[email protected]']\n g = GSheets(self.fake.file_path(depth=1, category=None, extension='json'), contributors)\n g.spreadsheet = Spreadsheet(None, None)\n g.spreadsheet.share = mocker.MagicMock()\n\n g.share()\n\n assert not g.spreadsheet.share.called",
"def test_import_software_asset(self):\n pass",
"def test_add_asset_type_assignment_rule(self):\n pass",
"def test_create_nas_share_by_nas(self):\n pass",
"def test_wrong_config_shares0(self):\n self.app.post_json(url=\"/config/shares\",\n params=dict(\n source='gsiftp://source',\n destination='gsiftp://nowhere',\n vo='dteam',\n share='dfdf'\n ),\n status=400\n )",
"def test_auto_add_assets_to_story(self):\n # Create a story\n title = ('Transportation Challenges Limit Education Choices for '\n 'Denver Parents')\n summary = \"\"\"\n Many families in the Denver metro area use public\n transportation instead of a school bus because for them, a\n quality education is worth hours of daily commuting. Colorado's\n school choice program is meant to foster educational equity,\n but the families who benefit most are those who have time and\n money to travel. Low-income families are often left in a lurch.\n \"\"\"\n byline = \"Mile High Connects\"\n story = create_story(title=title, summary=summary, byline=byline)\n # Confirm that the story has no assets\n self.assertEqual(story.assets.count(), 0)\n # create a Section\n section = create_section(title=\"Test Section 1\", story=story)\n # create a HtmlAsset\n asset = HtmlAsset()\n asset.save()\n translation = HtmlAssetTranslation(title='Test Asset', asset=asset)\n translation.save()\n # Assign the asset to the section\n section_asset = SectionAsset(section=section, asset=asset, weight=0)\n section_asset.save()\n # Confirm the asset is in the section's list\n self.assertTrue(asset in section.assets.select_subclasses())\n # Confirm that the asset is in the story's list\n self.assertTrue(asset in story.assets.select_subclasses())",
"def test_import_system_asset(self):\n pass",
"async def test_endpoint_edit_share_correct(self):\n with self.patch_json_dump:\n await edit_share_handler(self.mock_request)\n self.json_mock.assert_called_once()",
"def ensure_share(self, context, share, share_server=None):\r\n LOG.debug(\"Ensure share.\")"
] | [
"0.65638447",
"0.6394304",
"0.63698304",
"0.6273951",
"0.613335",
"0.6046205",
"0.60451096",
"0.60319424",
"0.599211",
"0.5991656",
"0.5964404",
"0.59383875",
"0.5885912",
"0.58813864",
"0.5863835",
"0.57720274",
"0.57403314",
"0.5740057",
"0.57348704",
"0.5733369",
"0.5722773",
"0.5697476",
"0.568689",
"0.5678512",
"0.56459767",
"0.5636143",
"0.5621889",
"0.5572644",
"0.55492175",
"0.554391"
] | 0.9495953 | 0 |
Build redirect with redirect_state parameter. | def get_redirect_uri(self, state=None):
regex = re.compile(r"\:(80|443)\/")
uri = regex.sub("/", self.redirect_uri)
if self.REDIRECT_STATE and state:
uri = url_add_parameters(uri, {'redirect_state': state})
return uri | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_redirect_uri(self, state=None):\n if state is not None:\n uri = self.blank_redirect_uri\n if self.REDIRECT_STATE and state:\n uri = url_add_parameters(uri, {'redirect_state': state})\n else:\n uri = self.redirect_uri\n return uri",
"def createAuthRequestURL(self, state=None, **kwargs):\n # Fill arguments for preperation URL\n kwargs['response_type'] = 'code'\n kwargs['state'] = state or self.createState()\n kwargs['client_id'] = self.parameters['client_id']\n kwargs['redirect_uri'] = kwargs.get('redirect_uri') or self.parameters['redirect_uri']\n kwargs['scope'] = kwargs.get('scope') or self.parameters['scope'] or self.parameters['scopes_supported']\n if self.parameters['prompt']:\n kwargs['prompt'] = self.parameters['prompt']\n \n # Add IdP authorization endpoint\n self.log.info(kwargs['state'], 'session, generate URL for authetication.')\n url = (kwargs.get('authorization_endpoint') or self.parameters['authorization_endpoint']) + '?access_type=offline'\n if not url:\n return S_ERROR('No found authorization endpoint.')\n\n # Add arguments\n for key, value in kwargs.items():\n url += '&%s=%s' % (key, '+'.join(list(set(v.strip() for v in value))) if isinstance(value, list) else value)\n return S_OK({'URL': url, 'Session': kwargs['state']})",
"def kXR_redirect(self, streamid=None, status=None, dlen=None, port=None,\n host=None, opaque=None, token=None):\n response_struct = get_struct('ServerResponseHeader') + \\\n get_struct('ServerResponseBody_Redirect')\n if not host: host = ''\n else: host += (opaque if opaque else '') + (token if token else '')\n params = \\\n {'streamid': streamid if streamid else 0,\n 'status' : status if status else get_responseid('kXR_redirect'),\n 'dlen' : dlen if dlen else len(host) + 4,\n 'port' : port if port else 0,\n 'host' : host if host else r''}\n return self.mh.build_message(response_struct, params)",
"def create_redirect_url(self):\n return url_for(self.create_redirect_to_view)",
"def make_redirect_uri(self, application_url):\n log.debug(\"make_redirect_uri: application_url=%s base_url_path=%s \"\n \"redirect_uri=%s\",\n application_url, self.base_url_path, self.redirect_uri)\n return self._construct_url([application_url, self.base_url_path,\n self.redirect_uri])",
"def _make_combined_url(base_url, parameters, state):\n url = base_url.rstrip('?')\n url_parts = [url]\n sep_with_ampersand = ('?' in url)\n if parameters:\n query_string = urllib.urlencode(parameters)\n url_parts.extend([('&' if (sep_with_ampersand) else '?'), \n query_string])\n sep_with_ampersand = True\n\n if state:\n url_parts.extend([('&' if (sep_with_ampersand) else '?'), \n 'state=',\n state])\n\n return ''.join(url_parts)",
"def get_redirect_url(self, b36_int):\n return _redirect_implementation(request=self.request,\n model=self.model,\n b36_encoded_pk=b36_int)",
"def redirect(self) -> bytes:\n self.logger.debug(\"--- In SSO Redirect ---\")\n _info = self.unpack_redirect()\n self.logger.debug(\"Unpacked redirect :\\n{!s}\".format(pprint.pformat(_info)))\n\n ticket = _get_ticket(self.context, _info, BINDING_HTTP_REDIRECT)\n return self._redirect_or_post(ticket)",
"async def get_auth_url(self, state=None, redirect_uri=None):\n params = {\n 'client_id': self.settings.esia_client_id,\n 'client_secret': '',\n 'redirect_uri': redirect_uri or self.settings.redirect_uri,\n 'scope': self.settings.esia_scope,\n 'response_type': 'code',\n 'state': state or str(uuid.uuid4()),\n 'timestamp': get_timestamp(),\n 'access_type': 'offline'\n }\n params = await sign_params(params,\n certificate_file=self.settings.certificate_file,\n private_key_file=self.settings.private_key_file)\n\n params = urlencode(sorted(params.items())) # sorted needed to make uri deterministic for tests.\n\n return '{base_url}{auth_url}?{params}'.format(base_url=self.settings.esia_service_url,\n auth_url=self._AUTHORIZATION_URL,\n params=params)",
"def start(self, url_state=None):\n csrf_token = base64.urlsafe_b64encode(os.urandom(16))\n state = csrf_token\n if url_state is not None:\n state += \"|\" + url_state\n self.session[self.csrf_token_session_key] = csrf_token\n\n return self._get_authorize_url(self.redirect_uri, state)",
"def intermediate_redirect(cls, form_path):\r\n from r2.lib.template_helpers import add_sr\r\n dest = cls.format_output_url(request.fullpath)\r\n path = add_sr(form_path + query_string({\"dest\": dest}))\r\n return cls.redirect(path)",
"def redirect(cls, dest, code = 302):\r\n dest = cls.format_output_url(dest)\r\n c.response.headers['Location'] = dest\r\n c.response.status_code = code\r\n return c.response",
"def _redirect_login(request, fb, redirect_path, keep_state, required_permissions):\n redirect_uri = fb.url_for(_redirect_path(redirect_path, fb, request.path))\n\n if keep_state:\n if callable(keep_state):\n state = keep_state(request)\n else:\n state = request.get_full_path()\n # passing state directly to facebook oauth endpoint doesn't work\n redirect_uri += '?state=%s' % urlquote(state)\n\n url = fb.get_login_url(next=redirect_uri,\n required_permissions=required_permissions)\n\n return fb.redirect(url)",
"def authorization_url(self, redirect_uri=None, params=None, state=None,\n immediate=None, endpoint='authorize'):\n\n # prepare required args\n args = {\n 'type': 'web_server',\n 'client_id': self.client_id,\n }\n\n # prepare optional args\n redirect_uri = redirect_uri or self.redirect_uri\n if redirect_uri is not None:\n args['redirect_uri'] = redirect_uri\n if state is not None:\n args['state'] = state\n if immediate is not None:\n args['immediate'] = str(immediate).lower()\n\n args.update(params or {})\n\n return '%s?%s' % (urljoin(self.oauth_base_url, endpoint),\n urllib.urlencode(args))",
"def redirect(self, request, redirect_url):\n response_headers = [('Content-type', 'text/plain'),\n ('Location', redirect_url)]\n request['start']('302 REDIRECT', response_headers)\n return [\"Redirecting to %s\" % redirect_url]",
"def redirect(self, location):\n self.status=302\n headers=self.headers\n headers['status']='302 Moved Temporarily'\n headers['location']=location\n return location",
"def redirect(cls, location, status_code=302):\n if '\\x0d' in location or '\\x0a' in location:\n raise ValueError('invalid redirect URL')\n return cls(status_code=status_code, headers={'Location': location})",
"def redirect(target):\n return {\n 'status': '302',\n 'statusDescription': 'Found',\n 'headers': {\n 'location': [{\n 'key': 'Location',\n 'value': target\n }]\n }\n }",
"def gconnect():\n session['state'] = new_state()\n # Google will return the state as a query parameter\n authorization_url, state = FLOW.authorization_url(state=session['state'])\n return redirect(authorization_url)",
"def genauthurl(redirect=False, scope=False):\n if not scope:\n scope = c.oauth_scope\n\n return (c.sandbox_host if c.sandbox else c.production_host) \\\n + 'oauth/v2/authenticate?client_id=' \\\n + c.client_id \\\n + '&response_type=code&scope=' \\\n + scope \\\n + (('&redirect_uri=' + redirect) if redirect else '')",
"def redirect(self, location, status):\n url = ''\n # location string could contain either an abolute path or a relative one.\n # Also relative address could begin with /, i.e. from the root directory\n # on the same server, or be related to current path.\n # Therefore we split location for 3 parts:\n # 1) a host with a protocol http(s)://site.com\n # 2) the rest of the link (including first / if it presents)\n # 3) beginning / if it presents (as a flag)\n redirect_re = re.compile('^(https?://[^/]+)?((/)?(?:.*))$', re.I)\n matches = redirect_re.match(location)\n if matches.group(1): # if there is a host in the location\n url = location # the path is absolute, redirect there\n elif matches.group(3): # there is beginning /\n # the path is related to the root directory of the same server\n # add a path to the host\n url = '{}://{}{}'.format(self.url.protocol, self.url.host, matches.group(2))\n else: # the path is related to current directory on the server\n # get current path from the request\n path = self.url.request.rsplit('/', 1)[0] + '/'\n # add a new path to current path with the host\n url = '{}://{}{}'.format(self.url.protocol, self.url.host, path + matches.group(2))\n return TaskRedirect(self.url.host, status, URL(url))",
"def __init__(self, redirect_url, headers=None, content=None):\n super().__init__(redirect_url, headers=headers, content=content)\n self.headers.append((\"Location\", redirect_url))",
"def _build_state_value(django_request, user):\n relative_path = django_request.get_full_path().encode('utf-8')\n uri = django_request.build_absolute_uri(relative_path)\n token = xsrfutil.generate_token(xsrf_secret_key(), user.user_id(),\n action_id=str(uri))\n return uri + ':' + token",
"def _build_state_value(django_request, user):\n relative_path = django_request.get_full_path().encode('utf-8')\n uri = django_request.build_absolute_uri(relative_path)\n token = xsrfutil.generate_token(xsrf_secret_key(), user.user_id(),\n action_id=str(uri))\n return uri + ':' + token",
"def redirect(url):",
"def get_redirect_url(self, *args, **kwargs):\n redirect = kwargs['route']\n self.permanent = redirect.permanent\n return redirect.target.url",
"def redirect_to():\n\n args_dict = request.args.items()\n args = CaseInsensitiveDict(args_dict)\n\n # We need to build the response manually and convert to UTF-8 to prevent\n # werkzeug from \"fixing\" the URL. This endpoint should set the Location\n # header to the exact string supplied.\n response = app.make_response(\"\")\n response.status_code = 302\n if \"status_code\" in args:\n status_code = int(args[\"status_code\"])\n if status_code >= 300 and status_code < 400:\n response.status_code = status_code\n response.headers[\"Location\"] = args[\"url\"].encode(\"utf-8\")\n\n return response",
"def __init__(self, url, **kwargs):\n super(Redirect, self).__init__(**kwargs)\n self.value = url",
"def HttpResponseRedirectWithQuery(redirect_uri, query_params):\n nq = \"?\"\n for pname in query_params.keys():\n if query_params[pname]:\n redirect_uri += nq + pname + \"=\" + quote(query_params[pname])\n nq = \"&\"\n return HttpResponseRedirect(redirect_uri)",
"def get_url_with_redirect(url, redirect_url):\n if redirect_url:\n url = url + '?' + urlencode({settings.REDIRECT_FIELD_NAME: redirect_url})\n\n return url"
] | [
"0.6220852",
"0.58699924",
"0.5820168",
"0.5787547",
"0.568278",
"0.560986",
"0.5485404",
"0.54818624",
"0.54510164",
"0.5417507",
"0.54143995",
"0.54086787",
"0.5376258",
"0.53444564",
"0.53163904",
"0.52998877",
"0.5273541",
"0.525929",
"0.5201142",
"0.51966715",
"0.51813745",
"0.51802117",
"0.5168497",
"0.5168497",
"0.516831",
"0.51648825",
"0.5160331",
"0.51496714",
"0.51429754",
"0.5104942"
] | 0.5998109 | 1 |
Loads surface mesh using meshio. Not meant for mixed shape meshes. | def load_mesh(fname):
fname = abs_fname_(fname)
m = meshio.read(fname)
mesh = Mesh()
mesh.vertices = m.points
for i, c in enumerate(m.cells):
if i == 0:
faces = c.data
else:
faces = np.vstack((faces, c.data))
mesh.faces = faces
return mesh | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def load_mesh(self, script_to_apply=None): \n # convert to an obj file using meshlab\n if script_to_apply is None:\n meshlabserver_cmd = 'meshlabserver -i \\\"%s\\\" -o \\\"%s\\\"' %(self.filename, self.obj_filename)\n else:\n meshlabserver_cmd = 'meshlabserver -i \\\"%s\\\" -o \\\"%s\\\" -s \\\"%s\\\"' %(self.filename, self.obj_filename, script_to_apply) \n os.system(meshlabserver_cmd)\n logging.info('MeshlabServer Command: %s' %(meshlabserver_cmd))\n\n if not os.path.exists(self.obj_filename):\n raise ValueError('Meshlab conversion failed for %s' %(self.obj_filename))\n \n # read mesh from obj file\n of = obj_file.ObjFile(self.obj_filename)\n self.mesh_ = of.read()\n return self.mesh_",
"def load_mesh(self, name: str = None) -> dolfin.mesh:\n if self.mesh is None:\n self.mesh = df.Mesh()\n if name is None:\n mesh_name = self._casedir / Path(\"mesh.xdmf\")\n else:\n mesh_name = self._casedir / Path(f\"{name}.xdmf\")\n with df.XDMFFile(str(mesh_name)) as infile:\n infile.read(self.mesh)\n return self.mesh",
"def load_mesh(name):\n if name[-4:] == \".obj\":\n bpy.ops.import_scene.obj(filepath=name)\n mesh_name = (os.path.basename(name)).replace('.obj','')\n return mesh_name\n else:\n raise ValueError(\"{} not an obj file\".format(name))",
"def read_mesh(self, src):\n self.logger.debug(\"Reading mesh information from file %s\",\n src.file_name)\n\n self._title = src.title\n self._datetime = src.datetime\n\n self._ndim = src.ndim\n\n # copying mesh quantities\n self._npoin3 = src.npoin3\n self._nelem3 = src.nelem3\n self._ndp3 = src.ndp3\n self._nplan = src.nplan\n self.typ_elem = src.typ_elem\n\n # Copying mesh coordiantes\n self._meshx = src.meshx\n self._meshy = src.meshy\n self._meshz = src.meshz\n\n # Copying connectivity\n self._ikle3 = src.ikle3\n\n # Parallel interface information\n self._nptir = src.nptir\n self._knolg = src.knolg\n\n # Boundary information\n # nptfr and ipob3 are read reagrdless of presence of boundary file\n # As they are need in serafin format\n self._nptfr = src.nptfr\n self._ipob3 = src.ipob3\n if self.boundary_file is not None:\n self.typ_bnd_elem = src.typ_bnd_elem\n self._nelebd = src.nelebd\n self._bnd_info = src.bnd_info\n self._ikle_bnd = src.ikle_bnd\n self._nbor = src.nbor",
"def load_volume_mesh(fname):\n fname = abs_fname_(fname)\n\n m = meshio.read(fname)\n mesh = Mesh()\n mesh.vertices = m.points\n\n for i, c in enumerate(m.cells):\n if i == 0:\n elements = c.data\n else:\n elements = np.vstack((elements, c.data))\n\n mesh.elements = elements\n\n return mesh",
"def read(self, mesh_path: str) -> None:\n\n reader = VtuReader(mesh_path)\n self.set_mesh_data(mesh=reader.mesh, bc=reader.bc, mpc=reader.mpc)",
"def __init__(self, file_path):\n\n # Comments\n # mtllib mtl_name\n # o object_name\n # v x y z\n # vt u v\n # vn x y z\n # f v0/t0/n0 v1/t1/n1 v2/t2/n2\n\n print('loading mesh \"%s\"' % file_path)\n mesh_file = open(file_path, 'r')\n\n verts = []\n texs = []\n normals = []\n faces = []\n\n # For each line of the input file\n for line in mesh_file:\n line = line.rstrip(' \\r\\n')\n\n # Skip comments\n if line.startswith('#') or line == '':\n continue\n\n tokens = line.split(' ')\n tokens = map(lambda t: t.strip(' '), tokens)\n tokens = list(filter(lambda t: t != '', tokens))\n\n prefix = tokens[0]\n tokens = tokens[1:]\n\n if prefix == 'v':\n vert = list(map(lambda v: float(v), tokens))\n verts.append(vert)\n\n if prefix == 'vt':\n tc = list(map(lambda v: float(v), tokens))\n texs.append(tc)\n\n if prefix == 'vn':\n normal = list(map(lambda v: float(v), tokens))\n normals.append(normal)\n\n if prefix == 'f':\n assert len(tokens) == 3, \"only triangle faces are supported\"\n\n face = []\n for token in tokens:\n indices = list(map(lambda idx: int(idx), token.split('/')))\n face.append(indices)\n\n faces.append(face)\n\n mesh_file.close()\n\n self.num_faces = len(faces)\n\n print('num verts=%d' % len(verts))\n print('num_faces=%d' % self.num_faces)\n\n # Create numpy arrays to store the vertex data\n list_verts = np.zeros(shape=(3 * self.num_faces, 3), dtype=np.float32)\n list_texcs = np.zeros(shape=3 * 2 * self.num_faces, dtype=np.float32)\n list_norms = np.zeros(shape=3 * 3 * self.num_faces, dtype=np.float32)\n\n cur_vert_idx = 0\n\n # For each triangle\n for face in faces:\n # For each triplet of indices\n for triplet in face:\n v_idx, t_idx, n_idx = triplet\n\n # Note: OBJ uses 1-based indexing\n vert = verts[v_idx-1]\n texc = texs[t_idx-1]\n normal = normals[n_idx-1]\n\n list_verts[cur_vert_idx, :] = vert\n list_texcs[2*cur_vert_idx:2*(cur_vert_idx+1)] = texc\n list_norms[3*cur_vert_idx:3*cur_vert_idx+3] = normal\n\n cur_vert_idx += 1\n\n # Re-center the object so that y=0 is at the base,\n # and the object is centered in x and z\n x_coords = list_verts[:, 0]\n z_coords = list_verts[:, 2]\n min_y = list_verts[:, 1].min()\n mean_x = (x_coords.min() + x_coords.max()) / 2\n mean_z = (z_coords.min() + z_coords.max()) / 2\n list_verts[:, 1] -= min_y\n list_verts[:, 0] -= mean_x\n list_verts[:, 2] -= mean_z\n\n # Compute the object extents after centering\n x_coords = list_verts[:, 0]\n y_coords = list_verts[:, 1]\n z_coords = list_verts[:, 2]\n self.y_max = y_coords.max()\n\n # Create a vertex list to be used for rendering\n self.vlist = pyglet.graphics.vertex_list(\n 3 * self.num_faces,\n ('v3f', list_verts.reshape(-1)),\n ('t2f', list_texcs),\n ('n3f', list_norms)\n )\n\n # Load the texture associated with this mesh\n file_name = os.path.split(file_path)[-1]\n tex_name = file_name.split('.')[0]\n tex_path = get_file_path('textures', tex_name, 'png')\n self.texture = load_texture(tex_path)",
"def create_mesh_from_data(mesh_name, bsp_verts, bsp_faces, materials, scale_factor):\n\n\n def vertex_stream(vertices, stream_id):\n for vertex in vertices:\n yield vertex[stream_id]\n\n # Create mesh and object\n me = bpy.data.meshes.new(mesh_name+'Mesh')\n ob = bpy.data.objects.new(\"LEVEL\" + mesh_name, me)\n ob.show_name = True\n\n # Link object to scene\n bpy.context.scene.objects.link(ob)\n \n # Create the vertex data\n face_list = list(vertex_stream(bsp_faces, 1))\n mesh_verts = list(vertex_stream(bsp_verts, 0))\n\n me.from_pydata(mesh_verts, [], face_list)\n\n # Update mesh with new data\n me.update()\n apply_uvs(me, bsp_verts)\n\n # Add materials to mesh\n for cmaterial in materials:\n me.materials.append(cmaterial)\n\n # Apply material indexes to mesh faces\n face_materials = list(vertex_stream(bsp_faces, 0))\n\n for polygon_idx, current_polygon in enumerate(me.polygons):\n current_polygon.material_index = face_materials[polygon_idx]\n\n # Add additional properties to the new object\n ob['scale_factor'] = scale_factor\n\n return ob",
"def _load_serialized_mesh(filename):\n print 'Loading mesh data from NPZ file', filename\n npzfile = np.load(filename)\n\n k = npzfile['k'].item()\n initial_point = npzfile['initial_point']\n initial_face_index = npzfile['initial_face_index'].item()\n\n all_vertices = npzfile['all_vertices']\n triangles = npzfile['triangles']\n face_local_bases = npzfile['face_local_bases']\n neighbor_faces = npzfile['neighbor_faces']\n\n return [k, initial_point, initial_face_index,\n all_vertices, triangles, face_local_bases, neighbor_faces]",
"def from_file(filename=None, io='auto', prefix_dir=None,\n omit_facets=False, file_format=None):\n if isinstance(filename, Mesh):\n return filename\n\n if io == 'auto':\n if filename is None:\n output('filename or io must be specified!')\n raise ValueError\n else:\n io = MeshIO.any_from_filename(filename, prefix_dir=prefix_dir,\n file_format=file_format)\n\n output('reading mesh (%s)...' % io.filename)\n timer = Timer(start=True)\n\n trunk = io.get_filename_trunk()\n mesh = Mesh(trunk)\n mesh = io.read(mesh, omit_facets=omit_facets)\n\n output('...done in %.2f s' % timer.stop())\n\n mesh._set_shape_info()\n\n return mesh",
"def importMesh(self, name, file, mtype, material, **args):\n args = dictToTuple(**args)\n\n if not self.rank:\n logging.info('Importing mesh from {}'.format(file))\n\n self.lmp.command('fix {} all {} file {} type {} '.format(name, mtype, file, material) + ('{} ' * len(args)).format(*args))",
"def import_mesh(self, scenegroup):\n logger.debug((\"mesh\", scenegroup[\"asset\"]))\n if scenegroup[\"asset\"] in self._imported_assets:\n return self._imported_assets[scenegroup[\"asset\"]]\n asset = self.gridinfo.getAsset(scenegroup[\"asset\"])\n if not asset[\"type\"] == str(AssetType.OgreMesh):\n logger.debug(\"(\"+asset[\"type\"]+\")\")\n return\n materials = []\n if \"materials\" in scenegroup:\n materials = scenegroup[\"materials\"]\n mesh = self.create_mesh_frombinary(scenegroup[\"asset\"], asset[\"name\"], asset[\"data\"])\n return self.create_mesh_fromomesh(scenegroup[\"asset\"], asset[\"name\"],\n mesh, materials)",
"def from_mesh_data(mesh):\n if len(mesh.normals) > 0 and len(mesh.uvs) > 0:\n vformat = p3d.GeomVertexFormat.get_v3n3t2()\n vertices = np.column_stack((mesh.vertices, mesh.normals, mesh.uvs))\n elif len(mesh.normals) > 0:\n vformat = p3d.GeomVertexFormat.get_v3n3()\n vertices = np.column_stack((mesh.vertices, mesh.normals))\n elif len(mesh.uvs) > 0:\n vformat = p3d.GeomVertexFormat.get_v3t2()\n vertices = np.column_stack((mesh.vertices, mesh.uvs))\n else:\n vformat = p3d.GeomVertexFormat.get_v3()\n vertices = mesh.vertices\n return Mesh._make(vformat, vertices, mesh.faces)",
"def Read(self, filename=None, element_type=\"tri\", reader_type=None, reader_type_format=None,\n reader_type_version=None, order=0, read_surface_info=False, **kwargs):\n\n if reader_type != 'read_separate':\n if not isinstance(filename,str):\n raise ValueError(\"filename must be a string\")\n return\n\n if reader_type is None:\n if filename.split('.')[-1] == \"msh\":\n reader_type = \"gmsh\"\n elif filename.split('.')[-1] == \"obj\":\n reader_type = \"obj\"\n elif filename.split('.')[-1] == \"unv\":\n reader_type = \"unv\"\n elif filename.split('.')[-1] == \"fro\":\n reader_type = \"fro\"\n elif filename.split('.')[-1] == \"dat\":\n for key in kwargs.keys():\n inkey = insensitive(key)\n if \"connectivity\" in inkey and \"delimiter\" not in inkey:\n reader_type = \"read_separate\"\n break\n if reader_type is None:\n raise ValueError(\"Mesh file format was not undertood. Please specify it using reader_type keyword\")\n\n\n self.filename = filename\n self.reader_type = reader_type\n self.reader_type_format = reader_type_format\n self.reader_type_version = reader_type_version\n\n if self.reader_type == 'salome':\n self.ReadSalome(filename, element_type=element_type, read_surface_info=read_surface_info)\n elif reader_type == 'GID':\n self.ReadGIDMesh(filename, element_type, order)\n elif self.reader_type == 'gmsh':\n self.ReadGmsh(filename, element_type=element_type, read_surface_info=read_surface_info)\n elif self.reader_type == 'obj':\n self.ReadOBJ(filename, element_type=element_type, read_surface_info=read_surface_info)\n elif self.reader_type == 'fenics':\n self.ReadFenics(filename, element_type)\n elif self.reader_type == 'vtu':\n self.ReadVTK(filename)\n elif self.reader_type == 'unv':\n self.ReadUNV(filename, element_type)\n elif self.reader_type == 'fro':\n self.ReadFRO(filename, element_type)\n elif self.reader_type == 'read_separate':\n # READ MESH FROM SEPARATE FILES FOR CONNECTIVITY AND COORDINATES\n from Florence.Utils import insensitive\n # return insensitive(kwargs.keys())\n for key in kwargs.keys():\n inkey = insensitive(key)\n if \"connectivity\" in inkey and \"delimiter\" not in inkey:\n connectivity_file = kwargs.get(key)\n if \"coordinate\" in insensitive(key) and \"delimiter\" not in inkey:\n coordinates_file = kwargs.get(key)\n\n self.ReadSeparate(connectivity_file,coordinates_file,element_type,\n delimiter_connectivity=',',delimiter_coordinates=',')\n elif self.reader_type == 'ReadHDF5':\n self.ReadHDF5(filename)\n\n self.nnode = self.points.shape[0]\n # MAKE SURE MESH DATA IS CONTIGUOUS\n self.points = np.ascontiguousarray(self.points)\n self.elements = np.ascontiguousarray(self.elements)\n return",
"def create_mesh(self):\n print(\"create_mesh\")\n faces = self.get_faces()\n print(\"num faces: {}\".format(len(faces)))\n\n # TODO: perform face filtering to remove long edges in Z direction\n # filtered_faces = self.get_filtered_faces(faces)\n # print(\"num filtered faces: {}\".format(len(filtered_faces)))\n\n vertices = self.xyz_points.T\n\n # handle texture mappings\n vertex_index_to_texture = []\n for j in range(0, self.height):\n for i in range(0, self.width):\n # vertex_index = (j * self.width) + ij\n w = i / self.width\n h = (self.height - j - 1) / self.height\n vertex_index_to_texture.append(\n (w, h)\n )\n\n # Create material.\n # TODO: make the string/filename randomly generated and unique\n file0 = open(os.path.join(self.args.path, \"triangle_mesh.obj.mtl\"), \"w\") # write mode\n file0.write(\"newmtl material_0\\n\")\n # Save image here.\n cv2.imwrite(os.path.join(self.args.path, \"triangle_mesh.png\"), self.bgr)\n file0.write(\"map_Kd triangle_mesh.png\\n\")\n file0.close()\n\n # https://en.wikipedia.org/wiki/Wavefront_.obj_file\n # https://github.com/mmatl/pyrender/blob/master/examples/models/fuze.obj\n obj_path = os.path.join(self.args.path, \"triangle_mesh.obj\")\n file1 = open(obj_path, \"w\") # write mode\n file1.write(\"mtllib ./triangle_mesh.obj.mtl\\n\")\n for vertex in vertices:\n x, y, z = vertex\n file1.write(\"v {} {} {}\\n\".format(x, y, z))\n file1.write(\"usemtl material_0\\n\")\n for w, h in vertex_index_to_texture:\n file1.write(\"vt {} {}\\n\".format(w, h))\n for face in faces:\n a, b, c = face\n a += 1\n b += 1\n c += 1\n file1.write(\"f {}/{} {}/{} {}/{}\\n\".format(\n a, a, b, b, c, c\n )\n )\n file1.close()\n\n # Load the trimesh from OBJ file.\n trimesh_mesh = trimesh.load(obj_path)\n # trimesh_mesh.show()\n\n mesh = pyrender.Mesh.from_trimesh(trimesh_mesh, smooth=False)\n self.scene = pyrender.Scene(ambient_light=[3.0, 3.0, 3.0])\n\n camera = pyrender.IntrinsicsCamera(\n self.focal_length, self.focal_length, self.width / 2, self.height / 2\n )\n self.camera_pose = np.array([\n [1.0, 0.0, 0.0, 0.0],\n [0.0, 1.0, 0.0, 0.0],\n [0.0, 0.0, 1.0, 0.0],\n [0.0, 0.0, 0.0, 1.0],\n ])\n # https://pyrender.readthedocs.io/en/latest/examples/cameras.html#creating-cameras\n # https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.transform.Rotation.html\n r = R.from_rotvec(np.array([0, np.pi, 0]))\n r = R.from_rotvec(np.array([0.0, 0, np.pi])) * r\n matrix = r.as_matrix()\n self.camera_pose[:3, :3] = matrix\n\n light = pyrender.PointLight(\n color=[1.0, 1.0, 1.0],\n intensity=0.0\n )\n\n self.nm = pyrender.Node(mesh=mesh, matrix=np.eye(4))\n self.nl = pyrender.Node(light=light, matrix=np.eye(4))\n self.nc = pyrender.Node(camera=camera, matrix=np.eye(4))\n self.scene.add_node(self.nm)\n self.scene.add_node(self.nl)\n self.scene.add_node(self.nc)\n\n # Set the pose and show the image.\n temppose = self.extrinsics @ self.camera_pose\n self.scene.set_pose(self.nl, pose=temppose)\n self.scene.set_pose(self.nc, pose=temppose)\n pyrender.Viewer(self.scene, use_raymond_lighting=True,\n viewport_size=(self.width, self.height))",
"def CreateSurface2DMeshfrom3DMesh(self):\n\n self.__do_memebers_exist__()\n\n p = self.InferPolynomialDegree()\n mm = Mesh()\n if self.element_type == \"hex\":\n mm.element_type = \"quad\"\n elif self.element_type == \"tet\":\n mm.element_type = \"tri\"\n else:\n raise ValueError(\"Cannot make a 2D mesh from the 3D mesh of type {}\".format(self.element_type))\n\n unique_faces, inv_faces = np.unique(self.faces,return_inverse=True)\n mm.points = self.points[unique_faces,:]\n mm.nnode = mm.points.shape[0]\n aranger = np.arange(mm.nnode)\n mm.elements = aranger[inv_faces].reshape(self.faces.shape)\n mm.nelem = mm.elements.shape[0]\n mm.GetBoundaryEdges()\n\n return mm",
"def create_mesh(verts, faces, name):\n thisfunc = thisfile + '->create_mesh()'\n\n verts = np.array(verts)\n\n # Create mesh\n mesh_data = bpy.data.meshes.new(name)\n mesh_data.from_pydata(verts, [], faces)\n mesh_data.update()\n\n logging.info(\"%s: Mesh '%s' created\", thisfunc, name)\n\n return mesh_data",
"def get(self, mesh_name):\n\n # Assemble the absolute path to the mesh file\n file_path = get_file_path('meshes', mesh_name, 'obj')\n\n if file_path in self.cache:\n return self.cache[file_path]\n\n mesh = ObjMesh(file_path)\n self.cache[file_path] = mesh\n\n return mesh",
"def load_phong_mesh(file, shader,depth):\n global light_dir\n try:\n pp = assimpcy.aiPostProcessSteps\n flags = pp.aiProcess_Triangulate | pp.aiProcess_GenSmoothNormals\n scene = assimpcy.aiImportFile(file, flags)\n except assimpcy.all.AssimpError as exception:\n print('ERROR loading', file + ': ', exception.args[0].decode())\n return []\n\n # prepare mesh nodes\n meshes = []\n for mesh in scene.mMeshes:\n mat = scene.mMaterials[mesh.mMaterialIndex].properties\n mesh = PhongMesh(shader, [mesh.mVertices, mesh.mNormals], depth, mesh.mFaces,\n k_d=mat.get('COLOR_DIFFUSE', (1, 1, 1)),\n k_s=mat.get('COLOR_SPECULAR', (1, 1, 1)),\n k_a=mat.get('COLOR_AMBIENT', (0, 0, 0)),\n s=mat.get('SHININESS', 16.),\n light_dir=light_dir)\n meshes.append(mesh)\n\n size = sum((mesh.mNumFaces for mesh in scene.mMeshes))\n print('Loaded %s\\t(%d meshes, %d faces)' % (file, len(meshes), size))\n return meshes",
"def __init__(self, outprefix: str):\n paths = PhenoXPaths(outprefix)\n mesh_json_path = os.path.join(paths.data_dir, 'mesh.json')\n self.mesh = dict()\n\n if not os.path.exists(mesh_json_path):\n mesh_bin_file = glob.glob(os.path.join(paths.data_dir, '*.bin'))\n if mesh_bin_file:\n self._parse_mesh_bin(mesh_bin_file[0], mesh_json_path)\n\n self.mesh = json.load(open(mesh_json_path, 'r'))",
"def MeshMachine(main):\n\n # oDesign definition\n oDesign = main['ANSYS']['oDesign']\n\n # Data for the rotor mesh\n RotorName = main['ANSYS']['Rotor&Magnets']['Name'][0]\n RotorNumMaxElem = main['ANSYS']['Mesh']['Rotor']['NumMaxElem']\n RotorMaxLength = main['ANSYS']['Mesh']['Rotor']['MaxLength']\n\n # Data for the magnets mesh\n PMNames = main['ANSYS']['Rotor&Magnets']['PMNames']\n PMNumMaxElem = main['ANSYS']['Mesh']['Magnets']['NumMaxElem']\n PMMaxLength = main['ANSYS']['Mesh']['Magnets']['MaxLength']\n\n # Data for the Stator mesh\n StatorName = main['ANSYS']['Stator']['Name']\n StatorNormalDev = main['ANSYS']['Mesh']['Stator']['NormalDev']\n StatorAspectRatio = main['ANSYS']['Mesh']['Stator']['AspectRatio']\n\n # Data for the Stator mesh\n CoilNames = main['ANSYS']['Winding']['CoilNames']\n WindingNumMaxElem = main['ANSYS']['Mesh']['Winding']['NumMaxElem']\n WindingMaxLength = main['ANSYS']['Mesh']['Winding']['MaxLength']\n\n WindingName = []\n for phase in CoilNames:\n for direction in phase:\n WindingName += direction\n\n # Creating meshes\n oModule = oDesign.GetModule(\"MeshSetup\")\n\n # Rotor meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Rotor\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", [RotorName],\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(RotorNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(RotorMaxLength)+\"mm\"\n ]\n )\n # Magnet meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Magnets\",\n \"RefineInside:=\", True,\n \"Enabled:=\", True,\n \"Objects:=\", PMNames,\n \"RestrictElem:=\", False,\n \"NumMaxElem:=\", str(PMNumMaxElem),\n \"RestrictLength:=\", True,\n \"MaxLength:=\", str(PMMaxLength)+\"mm\"\n ]\n )\n # Stator meshes\n oModule.AssignTrueSurfOp(\n [\n \"NAME:Stator\",\n \"Objects:=\", [StatorName],\n \"CurvedSurfaceApproxChoice:=\", \"ManualSettings\",\n \"SurfDevChoice:=\", 0,\n \"NormalDevChoice:=\", 2,\n \"NormalDev:=\", str(StatorNormalDev) + \"deg\",\n \"AspectRatioChoice:=\", 2,\n \"AspectRatio:=\", str(StatorAspectRatio)\n ]\n )\n\n # Coil meshes\n oModule.AssignLengthOp(\n [\n \"NAME:Coils\",\n \"RefineInside:=\"\t, True,\n \"Enabled:=\"\t\t, True,\n \"Objects:=\"\t\t, WindingName,\n \"RestrictElem:=\"\t, False,\n \"NumMaxElem:=\"\t\t, str(WindingNumMaxElem),\n \"RestrictLength:=\"\t, True,\n \"MaxLength:=\"\t\t, str(WindingMaxLength) +\"mm\"\n ]\n )\n\n return main",
"def import_submesh(self, meshId, new_mesh, vertex, vbuffer, indices, materialName,\n matIdx):\n vertex_legend = get_vertex_legend(vertex)\n pos_offset = vertex_legend[VES_POSITION][1]\n no_offset = vertex_legend[VES_NORMAL][1]\n image = None\n if materialName in self._imported_ogre_materials:\n ogremat = self._imported_ogre_materials[materialName]\n if ogremat.btex and ogremat.btex.image:\n image = ogremat.btex.image\n if VES_TEXTURE_COORDINATES in vertex_legend:\n uvco_offset = vertex_legend[VES_TEXTURE_COORDINATES][1]\n vertmaps = {}\n indices_map = []\n # vertices\n for idx in range(max(indices)+1):\n coords = get_vcoords(vbuffer, idx, pos_offset)\n if coords:\n if not coords in vertmaps:\n new_mesh.verts.extend(*coords)\n vertmaps[coords] = len(new_mesh.verts)-1\n indices_map.append(vertmaps[coords])\n else:\n new_mesh.verts.extend(0.0,0.0,0.0)\n indices_map.append(len(new_mesh.verts)-1)\n if not len(new_mesh.verts):\n logger.debug(\"mesh with no vertex!!\")\n # faces\n for idx in range(len(indices)/3):\n idx = idx*3\n new_mesh.vertexUV = False\n face = [indices_map[indices[idx]],\n indices_map[indices[idx+1]],\n indices_map[indices[idx+2]]]\n new_mesh.faces.extend(face, ignoreDups=True)\n if len(new_mesh.faces) == 0:\n logger.debug(\"Degenerate face!\")\n continue\n face = new_mesh.faces[len(new_mesh.faces)-1]\n if image:\n face.image = image\n try:\n no1 = get_nor(indices[idx], vbuffer, no_offset)\n except:\n no1 = [0.0,0.0,0.0]\n try:\n no2 = get_nor(indices[idx+1], vbuffer, no_offset)\n except:\n no2 = [0.0,0.0,0.0]\n try:\n no3 = get_nor(indices[idx+2], vbuffer, no_offset)\n except:\n no3 = [0.0,0.0,0.0]\n if VES_TEXTURE_COORDINATES in vertex_legend:\n uv1 = get_uv(indices[idx], vbuffer, uvco_offset)\n uv2 = get_uv(indices[idx+1], vbuffer, uvco_offset)\n uv3 = get_uv(indices[idx+2], vbuffer, uvco_offset)\n face.uv = (mathutils.Vector(uv1),\n mathutils.Vector(uv2),\n mathutils.Vector(uv3))\n if not len(new_mesh.faces):\n logger.warning(\"mesh with no faces!!\")\n #sys.stderr.write(\"*\")\n #sys.stderr.flush()\n return new_mesh",
"def importMeshes(self, name=None):\n wall = False\n\n if 'mesh' in self.pargs:\n for mesh in self.pargs['mesh'].keys():\n\n if 'file' in self.pargs['mesh'][mesh]:\n if name:\n if mesh == name:\n self.pargs['mesh'][mesh]['import'] = True\n self.importMesh(mesh, self.pargs['mesh'][mesh]['file'], self.pargs['mesh'][mesh]['mtype'], self.pargs['mesh'][mesh]['id'], **self.pargs['mesh'][mesh]['args']) \n wall = True\n\n elif 'import' in self.pargs['mesh'][mesh]:\n if self.pargs['mesh'][mesh]['import']:\n self.importMesh(mesh, self.pargs['mesh'][mesh]['file'], self.pargs['mesh'][mesh]['mtype'], self.pargs['mesh'][mesh]['id'], **self.pargs['mesh'][mesh]['args']) \n wall = True\n \n if wall:\n self.setupWall(wtype='mesh')",
"def import_stl(cls, filename, scaling=1.0, mode=STL_AUTOMATIC, **kwargs):\n\n mode = mode.lower()\n if mode == STL_ASCII:\n vertices, triangles = cls._load_ascii(filename, scaling)\n elif mode == STL_BINARY:\n vertices, triangles = cls._load_binary(filename, scaling)\n elif mode == STL_AUTOMATIC:\n try:\n vertices, triangles = cls._load_ascii(filename, scaling)\n except ValueError:\n vertices, triangles = cls._load_binary(filename, scaling)\n else:\n modes = (STL_AUTOMATIC, STL_ASCII, STL_BINARY)\n raise ValueError('Unrecognised import mode, valid values are: {}'.format(modes))\n\n return Mesh(vertices, triangles, smoothing=False, **kwargs)",
"def mesh(self):\n self._ensure_mesh()\n return self._mesh",
"def __init__(self, mesh):\n self._mesh = mesh",
"def from_data(name, coors, ngroups, conns, mat_ids, descs,\n nodal_bcs=None):\n mesh = Mesh(name)\n mesh._set_io_data(coors=coors,\n ngroups=ngroups,\n conns=conns,\n mat_ids=mat_ids,\n descs=descs,\n nodal_bcs=nodal_bcs)\n mesh._set_shape_info()\n return mesh",
"def draw_stl_from_mesh(m):\n plt.ion()\n # Create a new plot\n figure = plt.figure()\n axes = mplot3d.Axes3D(figure)\n\n # Render the cube faces\n #for m in meshes:\n axes.add_collection3d(mplot3d.art3d.Poly3DCollection(m.vectors))\n\n # Auto scale to the mesh size\n scale = m.points.flatten(-1)\n axes.auto_scale_xyz(scale, scale, scale)",
"def DisplayMesh():\r\n \r\n # Load Surface Mesh Data and generate normals\r\n VTKString = OpenData('C:/Users/Tim/Documents/University/Year 4/Final Project/FinalYearProjectCode/TEH_Code/InputFiles','muscle_surface.vtk')\r\n header, Vertices, Triangles = CreateMatrixVTK(VTKString)\r\n \r\n fig = plt.figure()\r\n ax1 = fig.add_subplot(111,projection = '3d')\r\n ax1.plot_trisurf(Vertices[:,0],Vertices[:,1],Vertices[:,2],triangles= Triangles[:,1:])\r\n ax1.set_zlabel('z')\r\n ax1.set_ylabel('y')\r\n ax1.set_xlabel('x')\r\n plt.show()",
"def load_faces(file_data, headers, indices):\n\n\n def swap_winding(indices):\n return (indices[0], indices[2], indices[1])\n \n\n def indices_from_face(face_data):\n base_vertex = face_data[3]\n base_index = face_data[5]\n index_count = face_data[6]\n\n faces_indices = [base_vertex + indices[base_index + current_index] \n for current_index in range(index_count)]\n\n #Split into lists of 3 - ie triangles\n faces = []\n for current_face_idx in range(0, len(faces_indices), 3):\n faces.append(faces_indices[current_face_idx:current_face_idx+3])\n\n return faces\n\n\n def face_from_pack(face_data):\n \"\"\" \n Extract just the data we want from the full chunk\n \"\"\"\n triangle_list = indices_from_face(face_data)\n return [(face_data[0], triangles,) for triangles in triangle_list]\n\n face_offset, face_length = headers[13]\n face_chunk = Struct(\"iiiiiiii2i2i3f3f3f3f2i\") \n face_size = face_chunk.size\n face_count = int(face_length / face_size)\n\n faces = []\n\n for current_face_idx in range(face_count):\n face_file_position = face_offset + current_face_idx * face_size\n current_face = face_chunk.unpack(file_data[face_file_position : face_file_position+face_size])\n\n #Check we are a valid face (Could use a filter later)\n if current_face[2] != 1: continue #Only support meshes at the moment\n\n new_faces = face_from_pack(current_face)\n faces.extend(new_faces)\n\n return faces"
] | [
"0.7057171",
"0.70245314",
"0.67579937",
"0.67061126",
"0.6692487",
"0.6679398",
"0.6569063",
"0.6533282",
"0.6514972",
"0.6505622",
"0.6436628",
"0.6435029",
"0.6251096",
"0.62127954",
"0.62039167",
"0.6150478",
"0.61312926",
"0.611122",
"0.6106273",
"0.6066741",
"0.6060872",
"0.6057695",
"0.6019078",
"0.6003635",
"0.5999166",
"0.5988176",
"0.59692186",
"0.59391433",
"0.59272313",
"0.58827794"
] | 0.7224944 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.