query
stringlengths 9
9.05k
| document
stringlengths 10
222k
| metadata
dict | negatives
listlengths 30
30
| negative_scores
listlengths 30
30
| document_score
stringlengths 4
10
| document_rank
stringclasses 2
values |
---|---|---|---|---|---|---|
When kwargs['flat_fields'] = True, the expected fields are put back into remaining_fields. | def test_prep_country_fields_flat(self):
original_flag = self.form.country_optional
self.form.country_optional = True
original_fields = self.form.fields
original_removed = getattr(self.form, 'removed_fields', None)
original_computed = getattr(self.form, 'computed_fields', None)
self.form.fields = original_fields.copy()
if original_removed is not None:
self.form.removed_fields = original_removed.copy()
if original_computed is not None:
self.form.computed_fields = original_computed.copy()
remaining = original_fields.copy()
opts, field_rows = {'fake_opts': 'fake', 'fields': ['nope']}, [{'name': 'assigned_field'}]
args = ['arbitrary', 'input', 'args']
kwargs = {'test_1': 'data_1', 'test_2': 'data_2'}
field_names = (self.form.country_field_name, 'country_flag', )
if not any(remaining.get(name, None) for name in field_names):
fix_fields = {name: self.get_missing_field(name) for name in field_names if name not in remaining}
remaining.update(fix_fields)
expected_add = {name: remaining[name] for name in field_names if name in remaining}
expected_field_rows = field_rows.copy()
expected_field_rows.append(expected_add)
expected_remaining = {name: field for name, field in remaining.items() if name not in expected_add}
expected_opts = deepcopy(opts)
# expected_opts['fields'].append(field_names)
kwargs['flat_fields'] = True
expected_remaining.update(expected_add)
sent = (opts, field_rows, remaining, *args)
r_opts, r_rows, r_remaining, *r_args, r_kwargs = self.form.prep_country_fields(*sent, **kwargs)
self.assertEqual(expected_opts, r_opts)
self.assertEqual(expected_field_rows, r_rows)
self.assertEqual(expected_remaining, r_remaining)
self.assertEqual(args, r_args)
self.assertEqual(kwargs, r_kwargs)
self.form.country_optional = original_flag
self.form.fields = original_fields
if original_removed is not None:
self.form.removed_fields = original_removed
if original_computed is not None:
self.form.computed_fields = original_computed
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def full_dehydrate(self, bundle, for_list=False):\n\n # call the base class if qs param `fields` is not set\n try:\n if not len(self.specified_fields):\n self.specified_fields = []\n return super(SpecifiedFields, self).full_dehydrate( \\\n bundle, for_list)\n except:\n self.specified_fields = []\n return super(SpecifiedFields, self).full_dehydrate( \\\n bundle, for_list)\n\n\n # Dehydrate each field supplied in the `fields` parameter\n for field_name, field_object in self.fields.items():\n\n # A touch leaky but it makes URI resolution work.\n if getattr(field_object, 'dehydrated_type', None) == 'related':\n field_object.api_name = self._meta.api_name\n field_object.resource_name = self._meta.resource_name\n\n # Check for an optional method to do further dehydration.\n method = getattr(self, \"dehydrate_%s\" % field_name, None)\n\n if method:\n bundle.data[field_name] = method(bundle)\n\n bundle = self.dehydrate(bundle)\n return bundle",
"def test_prep_remaining(self):\n self.assertTrue(hasattr(self.form, 'prep_remaining'))\n original_fields = self.form.fields\n self.form.fields = original_fields.copy()\n remaining_fields = original_fields.copy()\n opts, field_rows = {'fake_opts': 'fake', 'fields': ['nope']}, [{'name': 'assigned_field'}]\n args = ['arbitrary', 'input', 'args']\n kwargs = {'test_1': 'data_1', 'test_2': 'data_2'}\n\n expected = (opts.copy(), field_rows.copy(), remaining_fields.copy(), *args, kwargs.copy())\n actual = self.form.prep_remaining(opts, field_rows, remaining_fields, *args, **kwargs)\n self.assertEqual(expected, actual)\n\n self.form.fields = original_fields",
"def test_get_flat_fields_setting(self):\n # FormOverrideMixIn.get_flat_fields_setting\n pass",
"def remove_all_fields(self):\n self.fields = None",
"def strip_unwanted_fields(self, data, many, **kwargs):\n unwanted_fields = [\"resource_type\"]\n for field in unwanted_fields:\n if field in data:\n data.pop(field)\n return data",
"def test_overall_make_fieldsets(self):\n original_fieldsets = self.form.fieldsets\n self.form.fieldsets = (\n ('Your Name', {\n 'position': 1,\n 'fields': [('first_name', 'last_name', )],\n }),\n (None, {\n 'classes': ('counting', ),\n 'position': 2,\n 'fields': [\n ('first', 'second', ),\n 'not_third',\n 'not_fourth',\n 'last',\n ],\n }),\n ('Non_Fields', {\n 'position': 3,\n 'fields': [\n 'non-field_name',\n 'not_a_field'\n ],\n }),\n (None, {\n 'position': None,\n # 'modifiers': ['password_display', ],\n 'fields': [\n # ('password1', 'password2', ),\n 'generic_field',\n 'bool_field',\n 'single_check'\n ]\n }),\n ('address', {\n 'classes': ('collapse', 'address', ),\n # 'modifiers': ['address', 'prep_country_fields', ],\n 'position': 'end',\n 'fields': [\n 'billing_address_1',\n 'billing_address_2',\n ('billing_city', 'billing_country_area', 'billing_postcode', ),\n ],\n }), )\n fieldsets = [(label, deepcopy(opts)) for label, opts in self.form.fieldsets if label != 'Non_Fields']\n remaining_fields = self.form.fields.copy()\n assigned_field_names = flatten([flatten(opts['fields']) for fieldset_label, opts in fieldsets])\n unassigned_field_names = [name for name in remaining_fields if name not in assigned_field_names]\n remaining_fields.pop('hide_field')\n address_fieldset = fieldsets.pop()\n opts = {'modifiers': 'prep_remaining', 'position': 'remaining', 'fields': unassigned_field_names}\n fieldsets.append((None, opts))\n fieldsets.append(address_fieldset)\n for fieldset_label, opts in fieldsets:\n opts['field_names'] = flatten(opts['fields'])\n rows, column_count = [], 0\n for names in opts['fields']:\n if isinstance(names, str):\n names = [names]\n columns = {name: self.form.fields[name] for name in names if name in remaining_fields}\n # TODO: Remove hidden or otherwise excluded fields.\n column_count = max(column_count, len(columns))\n if columns:\n rows.append(columns)\n opts['rows'] = rows\n opts['column_count'] = column_count\n self.form.make_fieldsets()\n actual_fieldsets = self.form._fieldsets\n self.assertEqual(len(fieldsets), 5)\n self.assertEqual(len(fieldsets), len(actual_fieldsets))\n count = 0\n for expect, got in zip(fieldsets, actual_fieldsets):\n labels = str(got[0]) if expect[0] == got[0] else ' & '.join(str(ea) for ea in (expect[0], got[0]))\n expect_row_names = flatten([list(ea.keys()) for ea in expect[1]['rows']])\n actual_row_names = flatten([list(ea.keys()) for ea in got[1]['rows']])\n row_names = str(expect_row_names) + '\\n' + str(actual_row_names)\n message = f\"Fieldset # {count} named {labels} expected then got: \\n{row_names}\"\n self.assertEqual(expect, got, message)\n count += 1\n self.assertEqual(fieldsets, actual_fieldsets)\n\n self.form.fieldsets = original_fieldsets",
"def test_make_fieldsets_outcome_order(self):\n original_fieldsets = self.form.fieldsets\n self.form.fieldsets = (\n (None, {\n 'classes': ('counting', ),\n 'position': 2,\n 'fields': [\n ('first', 'second', ),\n 'last',\n ],\n }),\n ('Non_Fields', {\n 'position': 3,\n 'fields': [\n 'non-field_name',\n 'not_a_field'\n ],\n }),\n ('Your Name', {\n 'position': 1,\n 'fields': [('first_name', 'last_name', )],\n }),\n (None, {\n 'position': None,\n 'fields': [\n 'generic_field',\n 'bool_field',\n 'single_check'\n ]\n }),\n ('address', {\n 'classes': ('collapse', 'address', ),\n # 'modifiers': ['address', 'prep_country_fields', ],\n 'position': 'end',\n 'fields': [\n 'billing_address_1',\n 'billing_address_2',\n ('billing_city', 'billing_country_area', 'billing_postcode', ),\n ],\n }), )\n fieldsets = [(label, deepcopy(opts)) for label, opts in self.form.fieldsets if label != 'Non_Fields']\n fieldsets[0], fieldsets[1] = fieldsets[1], fieldsets[0]\n remaining_fields = self.form.fields.copy()\n assigned_field_names = flatten([flatten(opts['fields']) for fieldset_label, opts in fieldsets])\n unassigned_field_names = [name for name in remaining_fields if name not in assigned_field_names]\n remaining_fields.pop('hide_field')\n address_fieldset = fieldsets.pop()\n opts = {'modifiers': 'prep_remaining', 'position': 'remaining', 'fields': unassigned_field_names}\n fieldsets.append((None, opts))\n fieldsets.append(address_fieldset)\n for fieldset_label, opts in fieldsets:\n opts['field_names'] = flatten(opts['fields'])\n rows, column_count = [], 0\n for names in opts['fields']:\n if isinstance(names, str):\n names = [names]\n columns = {name: self.form.fields[name] for name in names if name in remaining_fields}\n # TODO: Remove hidden or otherwise excluded fields.\n column_count = max(column_count, len(columns))\n if columns:\n rows.append(columns)\n opts['rows'] = rows\n opts['column_count'] = column_count\n self.form.make_fieldsets()\n actual_fieldsets = self.form._fieldsets\n self.assertEqual(len(fieldsets), 5)\n self.assertEqual(len(fieldsets), len(actual_fieldsets))\n count = 0\n for expect, got in zip(fieldsets, actual_fieldsets):\n labels = str(got[0]) if expect[0] == got[0] else ' & '.join(str(ea) for ea in (expect[0], got[0]))\n expect_row_names = flatten([list(ea.keys()) for ea in expect[1]['rows']])\n actual_row_names = flatten([list(ea.keys()) for ea in got[1]['rows']])\n row_names = str(expect_row_names) + '\\n' + str(actual_row_names)\n message = f\"Fieldset # {count} named {labels} expected then got: \\n{row_names}\"\n self.assertEqual(expect, got, message)\n count += 1\n self.assertEqual(fieldsets, actual_fieldsets)\n\n self.form.fieldsets = original_fieldsets",
"def set_fields(self, fields: FieldDict):\n super().set_fields(fields)\n nested_field: NestedField = self.fields[self.nested]\n if not isinstance(nested_field, NestedField):\n raise TypeError(\n f'The field \"{self.nested}\" must be a NestedField instance, not \"{nested_field}\".')\n if nested_field.many:\n raise ValueError(f'The field \"{self.nested}\" can not be set as \"many=True\".')\n self.nested_field = nested_field\n # create partial methods\n self._do_dump = partial(\n getattr(self, self.dump_method),\n target=nested_field.dump_target,\n method=nested_field.dump,\n )\n self._do_load = partial(\n getattr(self, self.load_method),\n target=nested_field.load_target,\n method=nested_field.load,\n )",
"def copy_fields(self, entity, all_fields=False):\n\n if all_fields:\n fields = self.get_all_fields()\n else:\n fields = self.get_non_pk_fields()\n\n for field in fields.keys():\n setattr(self, field, getattr(entity, field, None))",
"def test_make_fieldsets_uses_prep_fields(self):\n original_called_prep_fields = self.form.called_prep_fields = False\n full_fieldsets = self.form.make_fieldsets()\n\n self.assertFalse(original_called_prep_fields)\n self.assertIsInstance(full_fieldsets, (list, tuple))\n self.assertIsNotNone(getattr(self.form, '_fieldsets', None))\n self.assertTrue(self.form.called_prep_fields)\n\n self.form.called_prep_fields = original_called_prep_fields",
"def set_fields(self, upstream_obj, nonparam_fields=None):\n default_data = upstream_obj.default_data(start_year=self.start_year,\n metadata=True)\n\n if self.raw_input_fields is None:\n self.raw_input_fields = {}\n for field in self._meta.fields:\n if (getattr(self, field.attname, None) and\n field.name not in nonparam_fields):\n raw_val = getattr(self, field.attname)\n if field.name.endswith(\"cpi\") and isinstance(raw_val, bool):\n raw_val = str(raw_val)\n self.raw_input_fields[field.name] = raw_val\n\n input_fields, failed_lookups = param_formatters.parse_fields(\n self.raw_input_fields,\n default_data\n )\n\n if failed_lookups:\n # distinct elements\n potential_failed_lookups = set(failed_lookups)\n # only keep parameters that used to be in the upstream package\n set_failed_lookups = potential_failed_lookups - nonparam_fields\n if self.deprecated_fields is None:\n self.deprecated_fields = []\n # drop parameters that we already know are deprecated\n set_failed_lookups.difference_update(self.deprecated_fields)\n self.deprecated_fields += list(set_failed_lookups)\n\n self.input_fields = input_fields",
"def test_clean_moves_computed_fields_to_fields(self):\n name = 'test_field'\n if isinstance(self.form.computed_fields, (list, tuple)):\n self.form.computed_fields = self.form.get_computed_fields([name])\n computed_names = list(self.form.computed_fields.keys())\n field_names = list(self.form.fields.keys())\n field_data = {f_name: f\"input_{f_name}_{i}\" for i, f_name in enumerate(field_names)}\n field_data.update({name: f\"value_{f_name}_{i}\" for i, f_name in enumerate(computed_names)})\n original_data = self.form.data\n original_fields = self.form.fields\n original_computed_fields = self.form.computed_fields\n original_errors = getattr(self.form, '_errors', None)\n original_cleaned_data = getattr(self.form, 'cleaned_data', None)\n self.form.data = original_data.copy()\n self.form.fields = original_fields.copy()\n self.form.computed_fields = original_computed_fields.copy()\n self.form._errors = ErrorDict() if original_errors is None else original_errors.copy() # mimic full_clean\n populated_cleaned_data = deepcopy(original_cleaned_data or {})\n populated_cleaned_data.update(field_data)\n self.form.cleaned_data = populated_cleaned_data.copy() # ensure cleaned_data is present (mimic full_clean)\n final_cleaned_data = self.form.clean()\n\n self.assertIn(name, computed_names)\n self.assertNotIn(name, field_names)\n self.assertEqual(1, len(computed_names))\n self.assertIn(name, self.form.fields)\n self.assertNotEqual(original_cleaned_data, final_cleaned_data)\n\n self.form.data = original_data\n self.form.fields = original_fields\n self.form.computed_fields = original_computed_fields\n self.form._errors = original_errors\n self.form.cleaned_data = original_cleaned_data\n if original_errors is None:\n del self.form._errors\n if original_cleaned_data is None:\n del self.form.cleaned_data",
"def test_prep_field_properties(self):\n original_data = self.form.data\n test_data = original_data.copy()\n # modify values in data\n test_data._mutable = False\n self.form.data = test_data\n original_fields = self.form.fields\n test_fields = original_fields.copy()\n # modify fields\n self.form.fields = test_fields\n test_fields_info = {name: field.__dict__.copy() for name, field in test_fields.items()}\n original_get_overrides = self.form.get_overrides\n def skip_overrides(): return {}\n self.form.get_overrides = skip_overrides\n original_alt_field_info = getattr(self.form, 'alt_field_info', None)\n self.form.alt_field_info = self.alt_field_info\n self.form.test_condition_response = True\n expected_fields_info = test_fields_info.copy()\n result_fields = self.form.prep_fields()\n result_fields_info = {name: field.__dict__.copy() for name, field in result_fields.items()}\n modified_info = self.alt_field_info['alt_test_feature']\n first_label = modified_info['first']['label']\n first_initial = modified_info['first']['initial']\n last_initial = modified_info['last']['initial']\n for name, opts in modified_info.items():\n expected_fields_info[name].update(opts)\n\n self.assertEqual(first_label, result_fields['first'].label)\n self.assertEqual(first_initial, result_fields['first'].initial)\n self.assertEqual(last_initial, result_fields['last'].initial)\n for key, val in expected_fields_info.items():\n self.assertEqual(val, result_fields_info[key])\n self.assertDictEqual(expected_fields_info, result_fields_info)\n\n self.form.test_condition_response = False\n self.form.alt_field_info = original_alt_field_info\n if original_alt_field_info is None:\n del self.form.alt_field_info\n self.form.fields = original_fields\n self.form.data = original_data\n self.form.get_overrides = original_get_overrides",
"def update_fields(state, **kwargs):\n return state._replace(**kwargs)",
"def restore_object(self, attrs, instance=None):\n model_attrs = dict(**attrs)\n if instance is not None:\n for attr, value in attrs.items():\n if attr in self.opts.postonly_fields:\n model_attrs.pop(attr)\n return super(HyperlinkedModelSerializer, self).restore_object(model_attrs, instance)",
"def intialize_from_fields(self):\n raise NotImplementedError",
"def clean_fields(self, exclude=None):\n obj = self._obj\n if obj is None:\n return None\n\n self.event = self.clean_event(self.event)\n self.resource_name = self.clean_resource_name(obj.__class__.__name__)\n self.resource_id = obj.id\n self.site = self.clean_site(obj)\n\n serializer_class = self.get_serializer_for_resource(self.resource_name)\n serializer = serializer_class(obj)\n self._resource = serializer.data",
"def test_pass_through_prep_country_fields(self):\n original_flag = self.form.country_optional\n self.form.country_optional = False # True\n original_fields = self.form.fields\n self.form.fields = original_fields.copy()\n remaining_fields = original_fields.copy()\n opts, field_rows = {'fake_opts': 'fake'}, [{'name': 'assigned_field'}]\n args = ['arbitrary', 'input', 'args']\n kwargs = {'test_1': 'data_1', 'test_2': 'data_2'}\n\n expected = (opts.copy(), field_rows.copy(), remaining_fields.copy(), *args, kwargs.copy())\n actual = self.form.prep_country_fields(opts, field_rows, remaining_fields, *args, **kwargs)\n self.assertEqual(expected, actual)\n\n self.form.country_optional = original_flag\n self.form.fields = original_fields",
"def test_prep_country_fields(self):\n original_flag = self.form.country_optional\n self.form.country_optional = True\n original_fields = self.form.fields\n original_removed = getattr(self.form, 'removed_fields', None)\n original_computed = getattr(self.form, 'computed_fields', None)\n self.form.fields = original_fields.copy()\n if original_removed is not None:\n self.form.removed_fields = original_removed.copy()\n if original_computed is not None:\n self.form.computed_fields = original_computed.copy()\n remaining = original_fields.copy()\n opts, field_rows = {'fake_opts': 'fake', 'fields': ['nope']}, [{'name': 'assigned_field'}]\n args = ['arbitrary', 'input', 'args']\n kwargs = {'test_1': 'data_1', 'test_2': 'data_2'}\n field_names = (self.form.country_field_name, 'country_flag', )\n if not any(remaining.get(name, None) for name in field_names):\n fix_fields = {name: self.get_missing_field(name) for name in field_names if name not in remaining}\n remaining.update(fix_fields)\n expected_add = {name: remaining[name] for name in field_names if name in remaining}\n expected_field_rows = field_rows.copy()\n expected_field_rows.append(expected_add)\n expected_remaining = {name: field for name, field in remaining.items() if name not in expected_add}\n expected_opts = deepcopy(opts)\n expected_opts['fields'].append(field_names)\n\n sent = (opts, field_rows, remaining, *args)\n r_opts, r_rows, r_remaining, *r_args, r_kwargs = self.form.prep_country_fields(*sent, **kwargs)\n self.assertEqual(expected_opts, r_opts)\n self.assertEqual(expected_field_rows, r_rows)\n self.assertEqual(expected_remaining, r_remaining)\n self.assertEqual(args, r_args)\n self.assertEqual(kwargs, r_kwargs)\n\n self.form.country_optional = original_flag\n self.form.fields = original_fields\n if original_removed is not None:\n self.form.removed_fields = original_removed\n if original_computed is not None:\n self.form.computed_fields = original_computed",
"def test_entities__Entity__getRawFields__1(entity_with_field, field):\n entity = entity_with_field\n entity.setFieldOrder([field.__name__, 'dummy'])\n assert ([field.__name__, 'dummy', 'dummy2'] ==\n [x[0] for x in entity.getRawFields()])",
"def test_no_empty_sets_in_computed_fieldsets(self):\n original_fieldsets = self.form.fieldsets\n self.form.fieldsets = (\n ('Your Name', {\n 'position': 1,\n 'fields': [('first_name', 'last_name', )],\n }),\n ('Non_Fields', {\n 'position': 2,\n 'fields': [\n 'non-field_name',\n 'not_a_field'\n ],\n }), )\n fieldsets = [(label, deepcopy(opts)) for label, opts in self.form.fieldsets if label != 'Non_Fields']\n remaining_fields = self.form.fields.copy()\n assigned_field_names = flatten([flatten(opts['fields']) for fieldset_label, opts in fieldsets])\n unassigned_field_names = [name for name in remaining_fields if name not in assigned_field_names]\n remaining_fields.pop('hide_field')\n opts = {'modifiers': 'prep_remaining', 'position': 'remaining', 'fields': unassigned_field_names}\n fieldsets.append((None, opts))\n for fieldset_label, opts in fieldsets:\n opts['field_names'] = flatten(opts['fields'])\n rows, column_count = [], 0\n for names in opts['fields']:\n if isinstance(names, str):\n names = [names]\n columns = {name: self.form.fields[name] for name in names if name in remaining_fields}\n # TODO: Remove hidden or otherwise excluded fields.\n column_count = max(column_count, len(columns))\n if columns:\n rows.append(columns)\n opts['rows'] = rows\n opts['column_count'] = column_count\n self.form.make_fieldsets()\n actual_fieldsets = self.form._fieldsets\n self.assertEqual(len(fieldsets), 2)\n self.assertEqual(len(fieldsets), len(actual_fieldsets))\n count = 0\n for expect, got in zip(fieldsets, actual_fieldsets):\n labels = str(got[0]) if expect[0] == got[0] else ' & '.join(str(ea) for ea in (expect[0], got[0]))\n expect_row_names = flatten([list(ea.keys()) for ea in expect[1]['rows']])\n actual_row_names = flatten([list(ea.keys()) for ea in got[1]['rows']])\n row_names = str(expect_row_names) + '\\n' + str(actual_row_names)\n message = f\"Fieldset # {count} named {labels} expected then got: \\n{row_names}\"\n self.assertEqual(expect, got, message)\n count += 1\n self.assertEqual(fieldsets, actual_fieldsets)\n\n self.form.fieldsets = original_fieldsets",
"def test_computed_fieldsets_structure(self):\n original_fieldsets = self.form.fieldsets\n self.form.fieldsets = (\n ('Your Name', {\n 'position': 1,\n 'fields': [('first_name', 'last_name', )],\n }),\n (None, {\n 'classes': ('counting', ),\n 'position': 2,\n 'fields': [\n ('first', 'second', ),\n 'not_third',\n 'not_fourth',\n 'last',\n ],\n }),\n ('Non_Fields', {\n 'position': 3,\n 'fields': [\n 'non-field_name',\n 'not_a_field'\n ],\n }),\n (None, {\n 'position': None,\n # 'modifiers': ['password_display', ],\n 'fields': [\n # ('password1', 'password2', ),\n 'generic_field',\n 'bool_field',\n 'single_check'\n ]\n }),\n ('address', {\n 'classes': ('collapse', 'address', ),\n # 'modifiers': ['address', 'prep_country_fields', ],\n 'position': 'end',\n 'fields': [\n 'billing_address_1',\n 'billing_address_2',\n ('billing_city', 'billing_country_area', 'billing_postcode', ),\n ],\n }), )\n self.form.make_fieldsets()\n fieldsets = self.form._fieldsets\n each_are_tuples = (isinstance(ea, tuple) for ea in fieldsets)\n correct_fieldset_labels = (isinstance(label, (str, type(None))) for label, opts in fieldsets)\n opts_are_dictionaries = (isinstance(opts, dict) for label, opts in fieldsets)\n required_keys = {'position', 'fields', 'field_names', 'rows', 'column_count', }\n optional_keys = {'classes', 'modifiers', 'row_data', }\n allowed_keys = required_keys | optional_keys\n opt_keys = set(flatten([list(opts.keys()) for lbl, opts, in fieldsets]))\n unaccepted_keys = [key for key in opt_keys if key not in allowed_keys]\n has_required = (all(key in opts for key in required_keys) for lbl, opts in fieldsets)\n self.assertIsInstance(fieldsets, list)\n self.assertTrue(all(each_are_tuples))\n self.assertTrue(all(correct_fieldset_labels))\n self.assertTrue(all(opts_are_dictionaries))\n self.assertEqual(len(unaccepted_keys), 0)\n self.assertFalse(unaccepted_keys)\n self.assertTrue(all(has_required))\n\n self.form.fieldsets = original_fieldsets",
"def only(self, *args):\n try:\n only_fields = tuple(self._get_field_path(arg) for arg in args)\n except ValueError as e:\n raise ValueError(f\"{e.args[0]} in only()\")\n new_qs = self._copy_self()\n new_qs.only_fields = only_fields\n return new_qs",
"def copy_fields(self, model):\n fields = super(HistoricalRecords, self).copy_fields(model)\n for name, field in self.additional_fields.items():\n assert name not in fields\n assert hasattr(self, 'get_%s_value' % name)\n fields[name] = field\n return fields",
"def clean_fields(self, instance, exclude=None):\n errors = {}\n exclude = exclude or []\n for name, f in self.properties.items():\n raw_value = getattr(instance, name, None)\n is_blank = not bool(raw_value)\n is_nullable = f.null\n is_defaulted = f.column.default or f.column.server_default\n is_required = f.required\n\n is_skippable = is_blank and (is_nullable or is_defaulted or not is_required)\n\n if name in exclude or is_skippable:\n continue\n try:\n setattr(instance, name, f.clean(raw_value, instance))\n except ValidationError as e:\n errors[name] = e.error_list\n if errors:\n raise NestedValidationError(errors)",
"def update_fields(self):\n if hasattr(self.day, \"body_composition\"):\n for f in self.get_fields():\n name = f.get_field().name\n value = getattr(self.day.body_composition, name, None)\n if value is not None:\n f.set_field(value)\n else:\n f.set_field(\"\")",
"def _fetch_fields(self, old_or_new, target_fields):\r\n \r\n returned_fields = []\r\n for target_field in target_fields:\r\n if target_field in [\",\", \"\\t\"]:\r\n returned_fields.append(target_field)\r\n else:\r\n try:\r\n returned_fields.append(str(getattr(old_or_new, target_field)))\r\n except:\r\n returned_fields.append(\"-\") \r\n return returned_fields",
"def test_prep_fields(self):\n pass",
"def pull_fields(self, org):\n pass",
"def test_defining_only_or_defer_on_nonexistant_fields_fails(self):"
] | [
"0.6243996",
"0.62252426",
"0.6159768",
"0.6112266",
"0.5876747",
"0.58750236",
"0.5867535",
"0.58615494",
"0.5753491",
"0.57264113",
"0.5716666",
"0.5704671",
"0.560665",
"0.56063133",
"0.555894",
"0.5558664",
"0.5553255",
"0.5545755",
"0.55451787",
"0.5522963",
"0.55055803",
"0.55039364",
"0.5502084",
"0.54798037",
"0.5475571",
"0.547263",
"0.5462082",
"0.54531246",
"0.54428023",
"0.54403156"
] | 0.68460476 | 0 |
If submitted form requested foreign display, but was showing local, set_alt_data is called as expected. | def test_on_post_display_local_to_foreign(self):
# data.get('country_flag', None)
# address_display_version = 'foreign' if country_flag else 'local'
# form.set_alt_data(name='country_display', field=self.fields['country_display'], value=address_display_version)
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_on_post_display_foreign_to_local(self):\n # data.get('country_flag', None)\n # address_display_version = 'foreign' if country_flag else 'local'\n # form.set_alt_data(name='country_display', field=self.fields['country_display'], value=address_display_version)\n pass",
"def test_on_post_display_foreign_to_foreign(self):\n # data.get('country_flag', None)\n # address_display_version = 'foreign' if country_flag else 'local'\n # form.set_alt_data(name='country_display', field=self.fields['country_display'], value=address_display_version)\n pass",
"def test_raises_set_alt_data(self):\n name, value = 'generic_field', 'alt_data_value'\n field = self.form.fields.get(name, None)\n self.assertIsNotNone(field, \"Unable to find the expected field in current fields. \")\n data = {name: (field, value)}\n\n with self.assertRaises(ImproperlyConfigured):\n self.form.set_alt_data(data=data, name=name, field=field, value=value)",
"def test_set_alt_data_single(self):\n name, value = 'generic_field', 'alt_data_value'\n field = self.form.fields.get(name, None)\n self.assertIsNotNone(field, \"Unable to find the expected field in current fields. \")\n original_form_data = self.form.data\n test_data = self.test_data.copy()\n test_data.update({name: self.test_initial[name]})\n test_data._mutable = False\n self.form.data = test_data\n initial_data = test_data.copy()\n expected_data = test_data.copy()\n expected_data.update({name: value})\n initial_val = self.form.get_initial_for_field(field, name)\n html_name = self.form.add_prefix(name)\n data_val = field.widget.value_from_datadict(self.form.data, self.form.files, html_name)\n use_alt_value = not field.has_changed(initial_val, data_val)\n expected_value = value if use_alt_value else initial_data.get(name)\n expected_result = {name: value} if use_alt_value else {}\n result = self.form.set_alt_data(data=None, name=name, field=field, value=value)\n\n self.assertEqual(self.test_initial[name], initial_val)\n self.assertEqual(initial_data[name], data_val)\n self.assertEqual(expected_value, self.form.data[html_name])\n self.assertEqual(expected_value, field.initial)\n self.assertDictEqual(expected_result, result)\n for key in initial_data:\n self.assertEqual(expected_data[key], self.form.data[key])\n self.assertEqual(len(expected_data), len(self.form.data))\n self.assertTrue(use_alt_value)\n\n self.form.data = original_form_data",
"def test_set_alt_data_collection(self):\n names = list(self.test_data.keys())[1:-1]\n alt_values = {name: f\"alt_value_{name}\" for name in self.test_initial} # some, but not all, will be used.\n original_form_data = self.form.data\n test_data = self.test_data.copy()\n test_data.update({k: v for k, v in self.test_initial.items() if get_html_name(self.form, k) not in names})\n test_data._mutable = False\n self.form.data = test_data\n initial_data = test_data.copy()\n expected_result = {k: v for k, v in alt_values.items() if get_html_name(self.form, k) not in names}\n expected_data = test_data.copy()\n expected_data.update(expected_result)\n expect_updates = any(self.data_is_initial(name) for name in initial_data)\n test_input = {name: (self.form.fields[name], val) for name, val in alt_values.items()}\n result = self.form.set_alt_data(test_input)\n\n self.assertDictEqual(expected_result, result)\n self.assertDictEqual(expected_data, self.form.data)\n self.assertNotEqual(initial_data, self.form.data)\n self.assertTrue(expect_updates)\n self.assertIsNot(test_data, self.form.data)\n\n self.form.data = original_form_data",
"def get_initial_display(self):\r\n return {self.answer_id: self.initial_display}",
"def get_initial_display(self):\r\n return {self.answer_id: self.initial_display}",
"def test_set_alt_data_unchanged(self):\n original_form_data = self.form.data\n test_data = self.test_data.copy()\n test_data._mutable = False\n self.form.data = test_data\n initial_data = test_data.copy()\n alt_values = {name: f\"alt_value_{name}\" for name in self.test_initial}\n test_input = {name: (self.form.fields[name], val) for name, val in alt_values.items()}\n expect_updates = any(self.data_is_initial(name) for name in initial_data)\n result = self.form.set_alt_data(test_input)\n had_updates = any(self.form.data[name] != value for name, value in initial_data.items())\n\n self.assertFalse(expect_updates)\n self.assertFalse(had_updates)\n self.assertDictEqual({}, result)\n self.assertDictEqual(initial_data, self.form.data)\n self.assertIs(test_data, self.form.data)\n\n self.form.data = original_form_data",
"def _onchange_no_direct_fp(self):\n if self.no_direct_fp:\n self.fp_supplier_id = False\n self.fp_location_id = False",
"def set_show_in_display_form(self, flag):\n qry = ServiceOperationQuery(self, \"setShowInDisplayForm\", [flag])\n self.context.add_query(qry)\n return self",
"def setContentData(self, content):\n original = content\n if IVersionedObject.providedBy(original):\n content = original.get_editable()\n if content is None:\n self.widgetFactoryFactory = SMIDisplayWidgetFactory\n content = original.get_previewable()\n\n super(SMIEditForm, self).setContentData(content)",
"def process_show_formset(self, request, step, formset):\n pass",
"def setContentData(self, content):\n original = content\n if IVersionedObject.providedBy(original):\n content = original.get_editable()\n if content is None:\n self.widgetFactoryFactory = SMIDisplayWidgetFactory\n content = original.get_previewable()\n\n super(SMISubEditForm, self).setContentData(content)",
"def process_show_form(self, request, step, form):\n pass",
"def _revert(self):\n if self.kwargs.get(\"collect\"):\n if self.kwargs[\"collect\"].ona_scan_form_pk:\n enable_form(self.kwargs[\"collect\"].ona_scan_form_pk)",
"def opt_display(self, display):\n key = get_enum_key(display, DISPLAYS)\n if key is not None:\n self.conf[\"display\"] = key\n self.display = DISPLAYS[key]\n print(\"Set display %r\" % key)\n else:\n print(\"Unknown display %r\" % display)",
"def _load_regional(self):\n# global approved, conflicts, suggestions, unknown, cldr, current\n start = self.lblFallback['text'].find('=>') + 2\n if self.lblFallback['text'][start:]:\n self.preferred.set(self.lblFallback['text'][start:])\n pass",
"def test_with_localsite_in_data_and_instance(self):\n config = IntegrationConfig.objects.create(\n integration_id=self.integration.integration_id)\n\n form = MyConfigForm(\n integration=self.integration,\n request=self.request,\n data={\n 'name': 'Test',\n 'my_conditions_last_id': '0',\n 'my_conditions_mode': 'all',\n 'my_conditions_choice[0]': 'review-groups',\n 'my_conditions_operator[0]': 'contains-any',\n 'my_conditions_value[0]': [self.local_site_1_group.pk],\n 'group': self.local_site_1_group.pk,\n 'local_site': self.local_site_1.pk,\n },\n instance=config)\n self.assertTrue(form.is_valid())\n\n new_config = form.save()\n self.assertEqual(config.pk, new_config.pk)\n self.assertEqual(new_config.local_site, self.local_site_1)",
"def test_set_alt_data_mutable(self):\n original_test_initial = self.test_initial\n original_form_data = self.form.data\n initial = self.test_initial\n test_data = self.test_data.copy()\n test_data.update({name: initial[name] for name in list(initial.keys())[1:-1]}) # two fields for alt_values\n test_data._mutable = False\n self.form.data = test_data\n initial_data = test_data.copy()\n alt_values = {name: f\"alt_value_{name}\" for name in initial} # some, but not all, will be used.\n unchanged_fields = {name: val for name, val in test_data.items() if val == initial[name]}\n expected_result = {name: alt_values[name] for name in unchanged_fields}\n expected_data = test_data.copy()\n expected_data.update(expected_result)\n expect_updates = any(self.data_is_initial(name) for name in initial_data)\n test_input = {name: (self.form.fields[name], val) for name, val in alt_values.items()}\n result = self.form.set_alt_data(test_input)\n had_updates = any(value != self.form.data[name] for name, value in initial_data.items())\n\n for name, val in expected_data.items():\n self.assertEqual(val, self.form.data[name])\n self.assertTrue(expect_updates)\n self.assertTrue(had_updates)\n self.assertFalse(getattr(self.form.data, '_mutable', True))\n self.assertDictEqual(expected_result, result)\n self.assertDictEqual(expected_data, self.form.data)\n\n self.form.data = original_form_data\n self.test_initial = original_test_initial",
"def save(self, *args, **kwargs):\n if DocumentSetFormField.objects.get(pk=self.field_id).autocomplete:\n self.canonical_label = self.get_canonical_value()\n\n super(DocumentSetFieldEntry, self).save(*args, **kwargs)",
"def on_pre_enter(self):\r\n store = get_store()\r\n self.ids.Capillary.text = str(store.get('Capillary')[\"value\"])\r\n self.ids.CapillaryUnit.text = store.get('Capillary')[\"unit\"]\r\n self.ids.Towindow.text = str(store.get('Towindow')[\"value\"])\r\n self.ids.TowindowUnit.text = store.get('Towindow')[\"unit\"]\r\n self.ids.Idiameter.text = str(store.get('Idiameter')[\"value\"])\r\n self.ids.IdiameterUnit.text = unicode(store.get('Idiameter')[\"unit\"])\r\n self.ids.Pressure.text = str(store.get('Pressure')[\"value\"])\r\n self.ids.PressureUnit.text = store.get('Pressure')[\"unit\"]\r\n self.ids.Detectiontime.text = str(store.get('Detectiontime')[\"value\"])\r\n self.ids.DetectiontimeUnit.text = store.get('Detectiontime')[\"unit\"]",
"def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == 'region':\n if request.user.profile.region is not None:\n kwargs['initial'] = request.user.profile.region.pk\n return db_field.formfield(**kwargs)\n\n return super(RegionalObjectiveAdmin, self).formfield_for_foreignkey(\n db_field, request, **kwargs)",
"def test_with_localsite_in_data(self):\n # Make sure the initial state and querysets are what we expect on init.\n form = MyConfigForm(integration=self.integration,\n request=self.request)\n\n self.assertIsNone(form.limited_to_local_site)\n self.assertIn('local_site', form.fields)\n self.assertEqual(list(form.fields['group'].queryset),\n [self.local_site_1_group,\n self.local_site_2_group,\n self.global_site_group])\n self.assertNotIn('local_site',\n form.fields['my_conditions'].choice_kwargs)\n\n # Now test what happens when it's been fed data and validated.\n form = MyConfigForm(\n integration=self.integration,\n request=self.request,\n data={\n 'name': 'Test',\n 'my_conditions_last_id': '0',\n 'my_conditions_mode': 'all',\n 'my_conditions_choice[0]': 'review-groups',\n 'my_conditions_operator[0]': 'contains-any',\n 'my_conditions_value[0]': [self.local_site_1_group.pk],\n 'group': self.local_site_1_group.pk,\n 'local_site': self.local_site_1.pk,\n })\n\n self.assertIsNone(form.limited_to_local_site)\n self.assertIn('local_site', form.fields)\n self.assertEqual(list(form.fields['group'].queryset),\n [self.local_site_1_group,\n self.local_site_2_group,\n self.global_site_group])\n self.assertNotIn('local_site',\n form.fields['my_conditions'].choice_kwargs)\n\n self.assertTrue(form.is_valid())\n\n # Make sure any overridden querysets have been restored, so users can\n # still change entries.\n self.assertEqual(list(form.fields['group'].queryset),\n [self.local_site_1_group,\n self.local_site_2_group,\n self.global_site_group])\n self.assertNotIn('local_site',\n form.fields['my_conditions'].choice_kwargs)\n\n new_config = form.save()\n self.assertEqual(new_config.local_site, self.local_site_1)\n self.assertEqual(new_config.settings['group'], 'local-site-1-group')\n\n condition_set = new_config.settings['my_conditions']\n self.assertEqual(list(condition_set.conditions[0].value),\n [self.local_site_1_group])",
"def get_initial(self):\n\t\n\t#Getting the initial data and setting it\n initial = super(UpdateView, self).get_initial()\n\timage_ref = default_value.get_setting('compute', 'image_ref') \n flavor_ref = default_value.get_setting('compute', 'flavor_ref')\n initial.update({'test_id': self.kwargs['test_id'], 'image_ref': image_ref, 'flavor_ref': flavor_ref})\n return initial",
"def test_col_data_field_help_hidden_initial_manual(self):\n help_tag = 'span'\n help_text_br = False\n label_attrs = {}\n names = ('first', 'billing_address_1')\n targets = ('help_text', 'field')\n expected = {nam: {fd: '' for fd in targets} for nam in names}\n test_text = 'This is the test help text'\n name = names[0]\n self.form.fields[name].help_text = test_text\n expected[name]['help_text'] = '<span id=\"id_{}-help\" class=\"help-text\">{}</span>'.format(name, test_text)\n field_attrs = {'aria-describedby': 'id_{}-help'.format(name)}\n bf = self.form[name]\n display = bf.as_widget(attrs=field_attrs)\n display += bf.as_hidden(only_initial=True)\n expected[name]['field'] = display\n expected[names[1]]['field'] = self.form[names[1]]\n original_field = {name: self.form.fields[name]}\n self.form.fields.update({name: deepcopy(original_field[name])})\n self.form.fields[name].show_hidden_initial = True\n actual = {}\n for name in names:\n field = self.form.fields[name]\n response = self.form.collect_col_data(name, field, help_tag, help_text_br, label_attrs)\n actual[name] = {target: response.get(target, 'NOT FOUND') for target in targets}\n\n self.assertDictEqual(expected, actual)\n\n self.form.fields.update(original_field)",
"def on_pre_enter(self):\r\n store = get_store()\r\n self.ids.Capillary.text = str(store.get('Capillary')[\"value\"])\r\n self.ids.CapillaryUnit.text = store.get('Capillary')[\"unit\"]\r\n self.ids.Towindow.text = str(store.get('Towindow')[\"value\"])\r\n self.ids.TowindowUnit.text = store.get('Towindow')[\"unit\"]\r\n self.ids.Idiameter.text = str(store.get('Idiameter')[\"value\"])\r\n self.ids.IdiameterUnit.text = store.get('Idiameter')[\"unit\"]\r\n self.ids.Voltage.text = str(store.get('Voltage')[\"value\"])\r\n self.ids.VoltageUnit.text = store.get('Voltage')[\"unit\"]\r\n self.ids.Electriccurrent.text = str(store.get('Electriccurrent')[\"value\"])\r\n self.ids.ElectriccurrentUnit.text = store.get('Electriccurrent')[\"unit\"]",
"def on_pre_enter(self):\r\n store = get_store()\r\n self.ids.Capillary.text = str(store.get('Capillary')[\"value\"])\r\n self.ids.CapillaryUnit.text = store.get('Capillary')[\"unit\"]\r\n self.ids.Towindow.text = str(store.get('Towindow')[\"value\"])\r\n self.ids.TowindowUnit.text = store.get('Towindow')[\"unit\"]\r\n self.ids.Idiameter.text = str(store.get('Idiameter')[\"value\"])\r\n self.ids.IdiameterUnit.text = store.get('Idiameter')[\"unit\"]\r\n self.ids.Voltage.text = str(store.get('Voltage')[\"value\"])\r\n self.ids.VoltageUnit.text = store.get('Voltage')[\"unit\"]\r\n self.ids.Electroosmosis.text = str(store.get('Electroosmosis')[\"value\"])\r\n self.ids.ElectroosmosisUnit.text = store.get('Electroosmosis')[\"unit\"]",
"def data_source_display_name(self, data_source_display_name):\n\n self._data_source_display_name = data_source_display_name",
"def get_context_data(self, **kwargs):\n if 'form' not in kwargs:\n kwargs['form'] = self.get_form(self.form_class)\n return super(OrganizerDataSetUpdate, self).get_context_data(**kwargs)",
"def upd_preview(self):\n\n if self.data_type != \"layer\":\n self.dlg.uLabelImgPreview.clear()\n self.dlg.uLabelImgPreview.setText(\"No preview available\")\n return\n\n if self.get_preview(\"300x200\", 0.5):\n return\n if self.get_preview(\"150x100\", 5):\n return\n\n self.dlg.uLabelImgPreview.clear()\n self.dlg.uLabelImgPreview.setText(\"No preview available\")"
] | [
"0.77660143",
"0.74513006",
"0.5475795",
"0.5383431",
"0.5315087",
"0.52522415",
"0.52522415",
"0.52449316",
"0.5115948",
"0.5079759",
"0.5019265",
"0.4988016",
"0.49873954",
"0.4916853",
"0.4899723",
"0.48932767",
"0.4892237",
"0.48896125",
"0.4884321",
"0.48838308",
"0.48831412",
"0.48813424",
"0.48651266",
"0.48605624",
"0.48510775",
"0.48491296",
"0.48265335",
"0.48032463",
"0.4798786",
"0.4765438"
] | 0.7731352 | 1 |
If submitted form requested foreign display, and was showing foreign, shows correctly. | def test_on_post_display_foreign_to_foreign(self):
# data.get('country_flag', None)
# address_display_version = 'foreign' if country_flag else 'local'
# form.set_alt_data(name='country_display', field=self.fields['country_display'], value=address_display_version)
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_on_post_display_foreign_to_local(self):\n # data.get('country_flag', None)\n # address_display_version = 'foreign' if country_flag else 'local'\n # form.set_alt_data(name='country_display', field=self.fields['country_display'], value=address_display_version)\n pass",
"def test_on_post_display_local_to_foreign(self):\n # data.get('country_flag', None)\n # address_display_version = 'foreign' if country_flag else 'local'\n # form.set_alt_data(name='country_display', field=self.fields['country_display'], value=address_display_version)\n pass",
"def process_show_form(self, request, step, form):\n pass",
"def ikfkDisplayMethod(*args, display: Union[AnyStr, bool]=\"\", q=True, query=True,\n **kwargs)->Union[None, Any]:\n pass",
"def set_show_in_display_form(self, flag):\n qry = ServiceOperationQuery(self, \"setShowInDisplayForm\", [flag])\n self.context.add_query(qry)\n return self",
"def form_valid(self, form):\n obj = form.save()\n base_url = self.request.build_absolute_uri()\n return self.render_to_response(\n self.get_context_data(\n result=f'{base_url}{obj.shortened_url}'))",
"def fl_form_is_visible(ptr_flform):\n _fl_form_is_visible = library.cfuncproto(\n library.load_so_libforms(), \"fl_form_is_visible\", \\\n cty.c_int, [cty.POINTER(xfdata.FL_FORM)], \\\n \"\"\"int fl_form_is_visible(FL_FORM * form) \"\"\")\n library.check_if_flinitialized()\n library.verify_flformptr_type(ptr_flform)\n library.keep_elem_refs(ptr_flform)\n retval = _fl_form_is_visible(ptr_flform)\n return retval",
"def display_required():\n return display_required()",
"def form_valid(self, form, contacto_linea_form, direccion_linea_form):\n self.object = form.save()\n contacto_linea_form.instance = self.object\n contacto_linea_form.save()\n direccion_linea_form.instance = self.object\n direccion_linea_form.save()\n\n if self.request.POST.get('_popup', 0):\n nombre = self.object.nombre.upper()\n id = self.object.id\n return HttpResponse(\n '<script type=\"text/javascript\">opener.dismissAddAnotherPopup( window, \\'%s\\', \\'%s\\' );</script>'\n % (id, nombre))\n else:\n return HttpResponseRedirect(self.get_success_url())",
"def visible(self, show):",
"def save_form(self, request, form, change):\n OwnableAdmin.save_form(self, request, form, change)\n return DisplayableAdmin.save_form(self, request, form, change)",
"def save_form(self, request, form, change):\n OwnableAdmin.save_form(self, request, form, change)\n return DisplayableAdmin.save_form(self, request, form, change)",
"def save_form(self, request, form, change):\n OwnableAdmin.save_form(self, request, form, change)\n return DisplayableAdmin.save_form(self, request, form, change)",
"def process_show_formset(self, request, step, formset):\n pass",
"def form_valid(self, form):\n form.instance.human = self.request.user\n return super(CreateBookmark, self).form_valid(form)",
"def form_valid(self, form):\n return super().form_valid(form)",
"def formfield_for_dbfield(self, db_field, **kwargs):\n if (isinstance(db_field, ForeignKey) and \n db_field.name in self.related_search_fields):\n kwargs['widget'] = fk_lookup.FkLookup(db_field.rel.to) \n return super(AutocompleteMixin, self).formfield_for_dbfield(db_field, **kwargs)",
"def form_valid(self, form, factura_form, ot_linea_form):\n self.object = form.save()\n factura_form.instance = self.object\n factura_form.save()\n ot_linea_form.instance = self.object\n ot_linea_form.save()\n return HttpResponseRedirect(self.get_success_url())",
"def show_box(self):\n self.permanent_show = not self.permanent_show",
"def render_form():",
"def is_shown(self, request):\n return True",
"def update_field_value_display(self, new_value):\n self.field_update_method(new_value)\n self._set_field_fg(new_value)\n self.link_missing = self.field_links and not any(link.name for link in self.field_links)\n self.build_field_context_menu()",
"def displayFiducial(self):\r\n # obsolete?\r\n profbox()\r\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\r\n for modelNode in modelNodes.values():\r\n displayNode = modelNode.GetDisplayNode()\r\n if modelNode.GetAttribute(\"segmented\") == \"1\" and modelNode.GetAttribute(\"nth\") != None:\r\n if 1:\r\n i = int(modelNode.GetAttribute(\"nth\"))\r\n if self.fiducialnode[i] == 0:\r\n polyData = modelNode.GetPolyData()\r\n nb = int(polyData.GetNumberOfPoints() - 1)\r\n coord = [0, 0, 0]\r\n if nb > 10:\r\n self.fiducialnode[i] = slicer.vtkMRMLAnnotationFiducialNode()\r\n polyData.GetPoint(nb, coord)\r\n self.fiducialnode[i].SetName(self.option[i])\r\n self.fiducialnode[i].SetFiducialCoordinates(coord)\r\n self.fiducialnode[i].Initialize(slicer.mrmlScene)\r\n self.fiducialnode[i].SetLocked(1)\r\n self.fiducialnode[i].SetSelectable(0)\r\n fidDN = self.fiducialnode[i].GetDisplayNode()\r\n fidDN.SetColor(modelNode.GetDisplayNode().GetColor())\r\n fidDN.SetGlyphScale(0)\r\n fidTN = self.fiducialnode[i].GetAnnotationTextDisplayNode()\r\n fidTN.SetTextScale(3)\r\n fidTN.SetColor(modelNode.GetDisplayNode().GetColor())\r\n\r\n self.fiducialnode[i].SetDisplayVisibility(modelNode.GetDisplayNode().GetVisibility())\r\n else:\r\n if modelNode.GetDisplayNode().GetVisibility():\r\n self.fiducialnode[i].SetDisplayVisibility(abs(self.fiducialnode[i].GetDisplayVisibility() - 1))\r\n if self.fiducialnode[i].GetDisplayVisibility() == 1:\r\n self.displayFiducialButton.text = \"Hide Labels on Needles\"\r\n else:\r\n self.displayFiducialButton.text = \"Display Labels on Needles\"",
"def should_show():",
"def displayFiducial(self):\n #obsolete?\n profbox()\n modelNodes = slicer.util.getNodes('vtkMRMLModelNode*')\n for modelNode in modelNodes.values():\n displayNode = modelNode.GetDisplayNode()\n if modelNode.GetAttribute(\"segmented\") == \"1\" and modelNode.GetAttribute(\"nth\")!=None:\n if 1:\n i = int(modelNode.GetAttribute(\"nth\"))\n if self.fiducialnode[i] == 0: \n polyData = modelNode.GetPolyData()\n nb = int(polyData.GetNumberOfPoints()-1)\n coord = [0,0,0]\n if nb>10:\n self.fiducialnode[i] = slicer.vtkMRMLAnnotationFiducialNode()\n polyData.GetPoint(nb,coord) \n self.fiducialnode[i].SetName(self.option[i])\n self.fiducialnode[i].SetFiducialCoordinates(coord) \n self.fiducialnode[i].Initialize(slicer.mrmlScene)\n self.fiducialnode[i].SetLocked(1)\n self.fiducialnode[i].SetSelectable(0)\n fidDN = self.fiducialnode[i].GetDisplayNode()\n fidDN.SetColor(modelNode.GetDisplayNode().GetColor())\n fidDN.SetGlyphScale(0)\n fidTN = self.fiducialnode[i].GetAnnotationTextDisplayNode()\n fidTN.SetTextScale(3)\n fidTN.SetColor(modelNode.GetDisplayNode().GetColor())\n \n self.fiducialnode[i].SetDisplayVisibility(modelNode.GetDisplayNode().GetVisibility())\n else: \n if modelNode.GetDisplayNode().GetVisibility():\n self.fiducialnode[i].SetDisplayVisibility(abs(self.fiducialnode[i].GetDisplayVisibility()-1))\n if self.fiducialnode[i].GetDisplayVisibility()==1:\n self.displayFiducialButton.text = \"Hide Labels on Needles\"\n else:\n self.displayFiducialButton.text = \"Display Labels on Needles\"",
"def get_submission_display_form(self):\n return self.organization.get_display_form()",
"def load_fishfry():\n logging.info(\"\\nedit ----------\")\n ffid = request.args.get(\"ffid\")\n if ffid:\n logging.info(ffid)\n # Prepare the form\n ff = FishFryForm()\n # get data for the one fish fry\n onefry = get_one_fishfry(ffid)\n # shortcut to the returned fish fry's properties\n if 'properties' in onefry.keys():\n p = onefry['properties']\n # map the fish fry data to the form fields\n ff.ffid.data = ffid\n ff.alcohol.data = prebool(p['alcohol'])\n ff.email.data = p['email']\n ff.etc.data = p['etc']\n ff.handicap.data = prebool(p['handicap'])\n ff.homemade_pierogies.data = prebool(p['homemade_pierogies'])\n ff.lunch.data = prebool(p['lunch'])\n ff.menu_txt.data = p['menu']['text']\n ff.menu_url.data = p['menu']['url']\n ff.phone.data = p['phone']\n ff.publish.data = p['publish']\n ff.take_out.data = prebool(p['take_out'])\n ff.validated.data = p['validated']\n ff.venue_address.data = p['venue_address']\n ff.venue_name.data = p['venue_name']\n ff.venue_notes.data = p['venue_notes']\n ff.venue_type.data = p['venue_type']\n ff.website.data = p['website']\n try:\n ff.lng.data = onefry['geometry']['coordinates'][0]\n ff.lat.data = onefry['geometry']['coordinates'][1]\n except:\n logging.warning(\"bad geom for {0}\".format(ffid))\n ff.lng.data = None\n ff.lat.data = None\n\n if p['events']:\n events = sort_records(p['events'], 'dt_start')\n for event in events:\n event_form = EventForm()\n event_form.dt_start = parse(event['dt_start'])\n event_form.dt_end = parse(event['dt_end'])\n ff.events.append_entry(event_form)\n # logging.info(ff.alcohol.data)\n # logging.info(ff.validated.data)\n return render_template(\n 'pages/fishfryform.html',\n form=ff\n )\n else:\n msg = \"Requested fish fry ({0}) not found.\".format(ffid)\n logging.warning(msg)\n flash(msg, \"warning\")\n return redirect(url_for('new_fishfry'))\n else:\n msg = \"Fish Fry ID not provided, so editing not possible. Record a new fish fry.\"\n logging.info(msg)\n flash(msg, \"info\")\n return redirect(url_for('new_fishfry'))",
"def formfield_for_foreignkey(self, db_field, request, **kwargs):\n profile = request.user.get_profile()\n if db_field.name == 'region' and profile.region is not None:\n kwargs['initial'] = profile.region.pk\n return db_field.formfield(**kwargs)\n\n if db_field.name == 'district' and profile.district is not None:\n kwargs['initial'] = profile.district.pk\n return db_field.formfield(**kwargs)\n\n if db_field.name == 'prescribing_officer':\n kwargs['initial'] = request.user.pk\n field = db_field.formfield(**kwargs)\n return field\n\n return super(PrescriptionAdmin, self).formfield_for_foreignkey(\n db_field, request, **kwargs)",
"def _onchange_no_direct_fp(self):\n if self.no_direct_fp:\n self.fp_supplier_id = False\n self.fp_location_id = False",
"def form_valid(self, form):\n\t self.object = form.save()\n\t registro = self.object\n\t cliente = registro.cliente\n\t cpf = cliente.cpf\n\t self.validar_registros(cpf)\n\t premios_cliente = Premios.objects.filter(cliente__cpf = cpf)\n\t return render(self.request, 'ctfidelidade/lista_premios.html', {'premios': premios_cliente})"
] | [
"0.7168204",
"0.7138766",
"0.57188034",
"0.55657893",
"0.54062665",
"0.5394192",
"0.526207",
"0.52578133",
"0.52548194",
"0.5253556",
"0.5211166",
"0.5211166",
"0.5211166",
"0.52019155",
"0.5168009",
"0.51628506",
"0.5142888",
"0.5126371",
"0.5120038",
"0.51047885",
"0.5086085",
"0.5080952",
"0.5068316",
"0.5060796",
"0.5053587",
"0.50533056",
"0.5052625",
"0.5020622",
"0.5009269",
"0.4997483"
] | 0.7608519 | 0 |
If submitted form requested local display, but was showing foreign, set_alt_data corrects to local. | def test_on_post_display_foreign_to_local(self):
# data.get('country_flag', None)
# address_display_version = 'foreign' if country_flag else 'local'
# form.set_alt_data(name='country_display', field=self.fields['country_display'], value=address_display_version)
pass | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_on_post_display_local_to_foreign(self):\n # data.get('country_flag', None)\n # address_display_version = 'foreign' if country_flag else 'local'\n # form.set_alt_data(name='country_display', field=self.fields['country_display'], value=address_display_version)\n pass",
"def test_on_post_display_foreign_to_foreign(self):\n # data.get('country_flag', None)\n # address_display_version = 'foreign' if country_flag else 'local'\n # form.set_alt_data(name='country_display', field=self.fields['country_display'], value=address_display_version)\n pass",
"def _onchange_no_direct_fp(self):\n if self.no_direct_fp:\n self.fp_supplier_id = False\n self.fp_location_id = False",
"def test_set_alt_data_unchanged(self):\n original_form_data = self.form.data\n test_data = self.test_data.copy()\n test_data._mutable = False\n self.form.data = test_data\n initial_data = test_data.copy()\n alt_values = {name: f\"alt_value_{name}\" for name in self.test_initial}\n test_input = {name: (self.form.fields[name], val) for name, val in alt_values.items()}\n expect_updates = any(self.data_is_initial(name) for name in initial_data)\n result = self.form.set_alt_data(test_input)\n had_updates = any(self.form.data[name] != value for name, value in initial_data.items())\n\n self.assertFalse(expect_updates)\n self.assertFalse(had_updates)\n self.assertDictEqual({}, result)\n self.assertDictEqual(initial_data, self.form.data)\n self.assertIs(test_data, self.form.data)\n\n self.form.data = original_form_data",
"def test_raises_set_alt_data(self):\n name, value = 'generic_field', 'alt_data_value'\n field = self.form.fields.get(name, None)\n self.assertIsNotNone(field, \"Unable to find the expected field in current fields. \")\n data = {name: (field, value)}\n\n with self.assertRaises(ImproperlyConfigured):\n self.form.set_alt_data(data=data, name=name, field=field, value=value)",
"def test_set_alt_data_single(self):\n name, value = 'generic_field', 'alt_data_value'\n field = self.form.fields.get(name, None)\n self.assertIsNotNone(field, \"Unable to find the expected field in current fields. \")\n original_form_data = self.form.data\n test_data = self.test_data.copy()\n test_data.update({name: self.test_initial[name]})\n test_data._mutable = False\n self.form.data = test_data\n initial_data = test_data.copy()\n expected_data = test_data.copy()\n expected_data.update({name: value})\n initial_val = self.form.get_initial_for_field(field, name)\n html_name = self.form.add_prefix(name)\n data_val = field.widget.value_from_datadict(self.form.data, self.form.files, html_name)\n use_alt_value = not field.has_changed(initial_val, data_val)\n expected_value = value if use_alt_value else initial_data.get(name)\n expected_result = {name: value} if use_alt_value else {}\n result = self.form.set_alt_data(data=None, name=name, field=field, value=value)\n\n self.assertEqual(self.test_initial[name], initial_val)\n self.assertEqual(initial_data[name], data_val)\n self.assertEqual(expected_value, self.form.data[html_name])\n self.assertEqual(expected_value, field.initial)\n self.assertDictEqual(expected_result, result)\n for key in initial_data:\n self.assertEqual(expected_data[key], self.form.data[key])\n self.assertEqual(len(expected_data), len(self.form.data))\n self.assertTrue(use_alt_value)\n\n self.form.data = original_form_data",
"def test_set_alt_data_collection(self):\n names = list(self.test_data.keys())[1:-1]\n alt_values = {name: f\"alt_value_{name}\" for name in self.test_initial} # some, but not all, will be used.\n original_form_data = self.form.data\n test_data = self.test_data.copy()\n test_data.update({k: v for k, v in self.test_initial.items() if get_html_name(self.form, k) not in names})\n test_data._mutable = False\n self.form.data = test_data\n initial_data = test_data.copy()\n expected_result = {k: v for k, v in alt_values.items() if get_html_name(self.form, k) not in names}\n expected_data = test_data.copy()\n expected_data.update(expected_result)\n expect_updates = any(self.data_is_initial(name) for name in initial_data)\n test_input = {name: (self.form.fields[name], val) for name, val in alt_values.items()}\n result = self.form.set_alt_data(test_input)\n\n self.assertDictEqual(expected_result, result)\n self.assertDictEqual(expected_data, self.form.data)\n self.assertNotEqual(initial_data, self.form.data)\n self.assertTrue(expect_updates)\n self.assertIsNot(test_data, self.form.data)\n\n self.form.data = original_form_data",
"def get_initial_display(self):\r\n return {self.answer_id: self.initial_display}",
"def get_initial_display(self):\r\n return {self.answer_id: self.initial_display}",
"def save(self, *args, **kwargs):\n if DocumentSetFormField.objects.get(pk=self.field_id).autocomplete:\n self.canonical_label = self.get_canonical_value()\n\n super(DocumentSetFieldEntry, self).save(*args, **kwargs)",
"def test_with_localsite_in_data_and_instance(self):\n config = IntegrationConfig.objects.create(\n integration_id=self.integration.integration_id)\n\n form = MyConfigForm(\n integration=self.integration,\n request=self.request,\n data={\n 'name': 'Test',\n 'my_conditions_last_id': '0',\n 'my_conditions_mode': 'all',\n 'my_conditions_choice[0]': 'review-groups',\n 'my_conditions_operator[0]': 'contains-any',\n 'my_conditions_value[0]': [self.local_site_1_group.pk],\n 'group': self.local_site_1_group.pk,\n 'local_site': self.local_site_1.pk,\n },\n instance=config)\n self.assertTrue(form.is_valid())\n\n new_config = form.save()\n self.assertEqual(config.pk, new_config.pk)\n self.assertEqual(new_config.local_site, self.local_site_1)",
"def test_col_data_field_help_hidden_initial_manual(self):\n help_tag = 'span'\n help_text_br = False\n label_attrs = {}\n names = ('first', 'billing_address_1')\n targets = ('help_text', 'field')\n expected = {nam: {fd: '' for fd in targets} for nam in names}\n test_text = 'This is the test help text'\n name = names[0]\n self.form.fields[name].help_text = test_text\n expected[name]['help_text'] = '<span id=\"id_{}-help\" class=\"help-text\">{}</span>'.format(name, test_text)\n field_attrs = {'aria-describedby': 'id_{}-help'.format(name)}\n bf = self.form[name]\n display = bf.as_widget(attrs=field_attrs)\n display += bf.as_hidden(only_initial=True)\n expected[name]['field'] = display\n expected[names[1]]['field'] = self.form[names[1]]\n original_field = {name: self.form.fields[name]}\n self.form.fields.update({name: deepcopy(original_field[name])})\n self.form.fields[name].show_hidden_initial = True\n actual = {}\n for name in names:\n field = self.form.fields[name]\n response = self.form.collect_col_data(name, field, help_tag, help_text_br, label_attrs)\n actual[name] = {target: response.get(target, 'NOT FOUND') for target in targets}\n\n self.assertDictEqual(expected, actual)\n\n self.form.fields.update(original_field)",
"def test_with_localsite_in_data(self):\n # Make sure the initial state and querysets are what we expect on init.\n form = MyConfigForm(integration=self.integration,\n request=self.request)\n\n self.assertIsNone(form.limited_to_local_site)\n self.assertIn('local_site', form.fields)\n self.assertEqual(list(form.fields['group'].queryset),\n [self.local_site_1_group,\n self.local_site_2_group,\n self.global_site_group])\n self.assertNotIn('local_site',\n form.fields['my_conditions'].choice_kwargs)\n\n # Now test what happens when it's been fed data and validated.\n form = MyConfigForm(\n integration=self.integration,\n request=self.request,\n data={\n 'name': 'Test',\n 'my_conditions_last_id': '0',\n 'my_conditions_mode': 'all',\n 'my_conditions_choice[0]': 'review-groups',\n 'my_conditions_operator[0]': 'contains-any',\n 'my_conditions_value[0]': [self.local_site_1_group.pk],\n 'group': self.local_site_1_group.pk,\n 'local_site': self.local_site_1.pk,\n })\n\n self.assertIsNone(form.limited_to_local_site)\n self.assertIn('local_site', form.fields)\n self.assertEqual(list(form.fields['group'].queryset),\n [self.local_site_1_group,\n self.local_site_2_group,\n self.global_site_group])\n self.assertNotIn('local_site',\n form.fields['my_conditions'].choice_kwargs)\n\n self.assertTrue(form.is_valid())\n\n # Make sure any overridden querysets have been restored, so users can\n # still change entries.\n self.assertEqual(list(form.fields['group'].queryset),\n [self.local_site_1_group,\n self.local_site_2_group,\n self.global_site_group])\n self.assertNotIn('local_site',\n form.fields['my_conditions'].choice_kwargs)\n\n new_config = form.save()\n self.assertEqual(new_config.local_site, self.local_site_1)\n self.assertEqual(new_config.settings['group'], 'local-site-1-group')\n\n condition_set = new_config.settings['my_conditions']\n self.assertEqual(list(condition_set.conditions[0].value),\n [self.local_site_1_group])",
"def _load_regional(self):\n# global approved, conflicts, suggestions, unknown, cldr, current\n start = self.lblFallback['text'].find('=>') + 2\n if self.lblFallback['text'][start:]:\n self.preferred.set(self.lblFallback['text'][start:])\n pass",
"def _revert(self):\n if self.kwargs.get(\"collect\"):\n if self.kwargs[\"collect\"].ona_scan_form_pk:\n enable_form(self.kwargs[\"collect\"].ona_scan_form_pk)",
"def formfield_for_foreignkey(self, db_field, request, **kwargs):\n if db_field.name == 'region':\n if request.user.profile.region is not None:\n kwargs['initial'] = request.user.profile.region.pk\n return db_field.formfield(**kwargs)\n\n return super(RegionalObjectiveAdmin, self).formfield_for_foreignkey(\n db_field, request, **kwargs)",
"def test_set_alt_data_mutable(self):\n original_test_initial = self.test_initial\n original_form_data = self.form.data\n initial = self.test_initial\n test_data = self.test_data.copy()\n test_data.update({name: initial[name] for name in list(initial.keys())[1:-1]}) # two fields for alt_values\n test_data._mutable = False\n self.form.data = test_data\n initial_data = test_data.copy()\n alt_values = {name: f\"alt_value_{name}\" for name in initial} # some, but not all, will be used.\n unchanged_fields = {name: val for name, val in test_data.items() if val == initial[name]}\n expected_result = {name: alt_values[name] for name in unchanged_fields}\n expected_data = test_data.copy()\n expected_data.update(expected_result)\n expect_updates = any(self.data_is_initial(name) for name in initial_data)\n test_input = {name: (self.form.fields[name], val) for name, val in alt_values.items()}\n result = self.form.set_alt_data(test_input)\n had_updates = any(value != self.form.data[name] for name, value in initial_data.items())\n\n for name, val in expected_data.items():\n self.assertEqual(val, self.form.data[name])\n self.assertTrue(expect_updates)\n self.assertTrue(had_updates)\n self.assertFalse(getattr(self.form.data, '_mutable', True))\n self.assertDictEqual(expected_result, result)\n self.assertDictEqual(expected_data, self.form.data)\n\n self.form.data = original_form_data\n self.test_initial = original_test_initial",
"def test_clean_dependent_fields(self):\n data = self.report_data.copy()\n new_primary_complaint = 'housing'\n data.update({'primary_complaint': new_primary_complaint})\n form = ReportEditForm(data, instance=self.report)\n\n self.assertTrue(form.is_valid())\n self.assertTrue('public_or_private_employer' in form.changed_data)\n self.assertTrue('employer_size' not in form.changed_data)\n for field in Report.PRIMARY_COMPLAINT_DEPENDENT_FIELDS['workplace']:\n self.assertTrue(form.cleaned_data[field] == \"\")",
"def formfield_for_foreignkey(self, db_field, request, **kwargs):\n profile = request.user.get_profile()\n if db_field.name == 'region' and profile.region is not None:\n kwargs['initial'] = profile.region.pk\n return db_field.formfield(**kwargs)\n\n if db_field.name == 'district' and profile.district is not None:\n kwargs['initial'] = profile.district.pk\n return db_field.formfield(**kwargs)\n\n if db_field.name == 'prescribing_officer':\n kwargs['initial'] = request.user.pk\n field = db_field.formfield(**kwargs)\n return field\n\n return super(PrescriptionAdmin, self).formfield_for_foreignkey(\n db_field, request, **kwargs)",
"def save_form(self, request, form, change):\n OwnableAdmin.save_form(self, request, form, change)\n return DisplayableAdmin.save_form(self, request, form, change)",
"def save_form(self, request, form, change):\n OwnableAdmin.save_form(self, request, form, change)\n return DisplayableAdmin.save_form(self, request, form, change)",
"def save_form(self, request, form, change):\n OwnableAdmin.save_form(self, request, form, change)\n return DisplayableAdmin.save_form(self, request, form, change)",
"def test_without_localsite_and_instance(self):\n config = IntegrationConfig.objects.create(\n integration_id=self.integration.integration_id)\n\n form = MyConfigForm(\n integration=self.integration,\n request=self.request,\n instance=config,\n data={\n 'name': 'Test',\n 'my_conditions_last_id': '0',\n 'my_conditions_mode': 'all',\n 'my_conditions_choice[0]': 'review-groups',\n 'my_conditions_operator[0]': 'contains-any',\n 'my_conditions_value[0]': [self.global_site_group.pk],\n 'group': self.global_site_group.pk,\n })\n\n self.assertTrue(form.is_valid())\n\n new_config = form.save()\n self.assertEqual(config.pk, new_config.pk)\n self.assertIsNone(new_config.local_site)",
"def check_display_option(display):\n display_options = get_display_options(verbose=False)\n if display not in display_options:\n err_str = \"The display value (%s) does not correspond to a possible \\\n display value in ENA\" % (display)\n raise ValueError(err_str)",
"def test_prep_field_properties(self):\n original_data = self.form.data\n test_data = original_data.copy()\n # modify values in data\n test_data._mutable = False\n self.form.data = test_data\n original_fields = self.form.fields\n test_fields = original_fields.copy()\n # modify fields\n self.form.fields = test_fields\n test_fields_info = {name: field.__dict__.copy() for name, field in test_fields.items()}\n original_get_overrides = self.form.get_overrides\n def skip_overrides(): return {}\n self.form.get_overrides = skip_overrides\n original_alt_field_info = getattr(self.form, 'alt_field_info', None)\n self.form.alt_field_info = self.alt_field_info\n self.form.test_condition_response = True\n expected_fields_info = test_fields_info.copy()\n result_fields = self.form.prep_fields()\n result_fields_info = {name: field.__dict__.copy() for name, field in result_fields.items()}\n modified_info = self.alt_field_info['alt_test_feature']\n first_label = modified_info['first']['label']\n first_initial = modified_info['first']['initial']\n last_initial = modified_info['last']['initial']\n for name, opts in modified_info.items():\n expected_fields_info[name].update(opts)\n\n self.assertEqual(first_label, result_fields['first'].label)\n self.assertEqual(first_initial, result_fields['first'].initial)\n self.assertEqual(last_initial, result_fields['last'].initial)\n for key, val in expected_fields_info.items():\n self.assertEqual(val, result_fields_info[key])\n self.assertDictEqual(expected_fields_info, result_fields_info)\n\n self.form.test_condition_response = False\n self.form.alt_field_info = original_alt_field_info\n if original_alt_field_info is None:\n del self.form.alt_field_info\n self.form.fields = original_fields\n self.form.data = original_data\n self.form.get_overrides = original_get_overrides",
"def test_condition_alt_country(self):\n original_flag = self.form.country_optional\n self.form.country_optional = True\n original_data = getattr(self.form, 'data', None)\n test_data = original_data.copy()\n test_data['country_flag'] = True\n self.form.data = test_data\n first_expect = True\n first_actual = self.form.condition_alt_country()\n self.form.data['country_flag'] = False\n second_expect = False\n second_actual = self.form.condition_alt_country()\n self.form.data['country_flag'] = True\n self.form.country_optional = False\n third_expect = False\n third_actual = self.form.condition_alt_country()\n\n self.assertEqual(first_expect, first_actual)\n self.assertEqual(second_expect, second_actual)\n self.assertEqual(third_expect, third_actual)\n\n self.form.country_optional = original_flag\n self.form.data = original_data\n if original_data is None:\n del self.form.data",
"def form_valid(self, form):\n form.instance.auth_user = self.request.user\n form.instance.group = self.get_local_group()\n\n form.instance.locality = form.cleaned_data['locality']\n form.instance.status = 'submitted'\n\n super(CreateInitiativeView, self).form_valid(form)\n\n return redirect(self.success_url + '?id=' + str(self.object.pk))",
"def set_show_in_display_form(self, flag):\n qry = ServiceOperationQuery(self, \"setShowInDisplayForm\", [flag])\n self.context.add_query(qry)\n return self",
"def _annotate_local(self) -> None:\n if self._has_annotation(self.primaryjoin, \"local\"):\n return\n\n if self._local_remote_pairs:\n local_side = util.column_set(\n [l for (l, r) in self._local_remote_pairs]\n )\n else:\n local_side = util.column_set(self.parent_persist_selectable.c)\n\n def locals_(element: _CE, **kw: Any) -> Optional[_CE]:\n if \"remote\" not in element._annotations and element in local_side:\n return element._annotate({\"local\": True})\n return None\n\n self.primaryjoin = visitors.replacement_traverse(\n self.primaryjoin, {}, locals_\n )",
"def test_without_localsite(self):\n # Make sure the initial state and querysets are what we expect on init.\n form = MyConfigForm(integration=self.integration,\n request=self.request)\n\n self.assertIsNone(form.limited_to_local_site)\n self.assertIn('local_site', form.fields)\n self.assertEqual(list(form.fields['group'].queryset),\n [self.local_site_1_group,\n self.local_site_2_group,\n self.global_site_group])\n self.assertNotIn('local_site',\n form.fields['my_conditions'].choice_kwargs)\n\n # Now test what happens when it's been fed data and validated.\n form = MyConfigForm(\n integration=self.integration,\n request=self.request,\n data={\n 'name': 'Test',\n 'my_conditions_last_id': '0',\n 'my_conditions_mode': 'all',\n 'my_conditions_choice[0]': 'review-groups',\n 'my_conditions_operator[0]': 'contains-any',\n 'my_conditions_value[0]': [self.global_site_group.pk],\n 'group': self.global_site_group.pk,\n })\n\n self.assertIsNone(form.limited_to_local_site)\n self.assertIn('local_site', form.fields)\n self.assertEqual(list(form.fields['group'].queryset),\n [self.local_site_1_group,\n self.local_site_2_group,\n self.global_site_group])\n self.assertNotIn('local_site',\n form.fields['my_conditions'].choice_kwargs)\n\n self.assertTrue(form.is_valid())\n\n # Make sure any overridden querysets have been restored, so users can\n # still change entries.\n self.assertEqual(list(form.fields['group'].queryset),\n [self.local_site_1_group,\n self.local_site_2_group,\n self.global_site_group])\n\n config = form.save()\n self.assertIsNone(config.local_site)\n self.assertEqual(config.settings['group'], 'global-site-group')\n\n condition_set = config.settings['my_conditions']\n self.assertEqual(list(condition_set.conditions[0].value),\n [self.global_site_group])"
] | [
"0.79282147",
"0.76382875",
"0.5292249",
"0.5248025",
"0.52416384",
"0.5228665",
"0.52174956",
"0.51686186",
"0.51686186",
"0.5072312",
"0.5057547",
"0.50152373",
"0.49532503",
"0.49409863",
"0.49361557",
"0.49005464",
"0.48473862",
"0.47833675",
"0.47592726",
"0.47376817",
"0.47376817",
"0.47376817",
"0.4732629",
"0.4701605",
"0.46793795",
"0.46482417",
"0.464056",
"0.46165588",
"0.46003506",
"0.45586404"
] | 0.80248636 | 0 |
get_critical_field called if form.country_optional, country_field, and needed_names. | def test_init_get_critical_for_needed(self):
# needed_names = [nf for nf in ('country_display', 'country_flag') if nf not in self.form.base_fields]
# for name in needed_names: name, field = self.get_critical_field(name, name)
# original_get_critical_field = self.form.get_critical_field
# self.form.get_critical_field = self.get_critical_field_signal
print("================ TEST INIT GET CRITICAL FOR NEEDED ==================")
print(self.form.get_critical_field.__name__)
# print(getattr(self, 'get_critical_call', 'NOT FOUND'))
# print(getattr(self.form, 'get_critical_call', 'NOT FOUND'))
name = 'country_display'
expected = {'names': name, 'alt_name': name}
field = self.form.fields.get(name, None) or self.form.computed_fields(name, None)
response = self.form.get_critical_field(name, name)
actual = getattr(self, 'get_critical_call', 'NOT FOUND')
print("----------------------------------------")
print(response)
print(expected)
print(actual)
# self.assertDictEqual(expected, actual)
self.assertEqual((name, field), response)
# self.get_critical_field = original_get_critical_field | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_get_critical_from_existing_fields(self):\n name = 'generic_field'\n opts = {'names': (name, ), 'alt_field': '', 'computed': False}\n expected_field = self.form.fields.get(name, None)\n actual_name, actual_field = self.form.get_critical_field(opts['names'])\n self.assertEqual(name, actual_name)\n self.assertEqual(expected_field, actual_field)",
"def test_callable_name_get_critical_field(self):\n special = self.get_generic_name\n name, field = self.form.get_critical_field(special)\n expected_name = special()\n expected_field = self.form.fields[expected_name]\n self.assertEqual(expected_name, name)\n self.assertEqual(expected_field, field)",
"def test_raise_on_missing_critical(self):\n name_for_field = 'absent_field'\n field_opts = {'names': (name_for_field, 'absent'), 'alt_field': '', 'computed': False}\n critical_fields = {'absent_field': field_opts}\n with self.assertRaises(ImproperlyConfigured):\n self.form.fields_for_critical(critical_fields)",
"def getFieldInfo(fieldnum):\n try:\n info = _getCampaignDict()[\"c{0}\".format(fieldnum)]\n # Print warning messages if necessary\n if \"preliminary\" in info and info[\"preliminary\"] == \"True\":\n logger.warning(\"Warning: the position of field {0} is preliminary. \"\n \"Do not use this position for your final \"\n \"target selection!\".format(fieldnum))\n return info\n except KeyError:\n raise ValueError(\"Field {0} not set in this version \"\n \"of the code\".format(fieldnum))",
"def get_missing_field(self, name):\n source = getattr(self.form, 'removed_fields', {})\n if issubclass(self.form.__class__, ComputedFieldsMixIn):\n source = self.form.computed_fields\n field = source.pop(name, None)\n return field",
"def getfield(form, fieldname):\n try:\n return form[fieldname]\n except KeyError:\n return None",
"def get_field(entry, field):\n\n if field.name in entry.field_dict:\n if field.choices:\n return getattr(entry.object, \"get_%s_display\" % field.name)()\n return entry.field_dict[field.name]\n else:\n return settings.TEMPLATE_STRING_IF_INVALID",
"def test_init_update_computed_field_names(self):\n original_request = self.request\n original_form = self.form\n computed = getattr(self.form, 'computed_fields', None)\n get_form = self.make_form_request()\n computed_fields = getattr(get_form, 'computed_fields', None)\n\n self.assertIsNotNone(computed)\n self.assertIsNotNone(computed_fields)\n self.assertIsNotNone(self.form.country_field_name)\n self.assertIn(self.form.country_field_name, computed_fields)\n\n self.request = original_request\n self.form = original_form",
"def try_get_field(self, field_name: str) -> Optional[fields.Field]:\n field = getattr(self, field_name, None)\n if isinstance(field, fields.Field):\n return field\n return None",
"def get_field(self, field):\n return self.extra_fields[field]",
"def test_clean_country_flag(self):\n # country_flag = self.cleaned_data.get('country_flag', None)\n # field = self.fields.get(self.country_field_name, None)\n # if not field and hasattr(self, 'computed_fields'):\n # field = self.computed_fields.get(self.country_field_name, None)\n # if field.initial == self.cleaned_data.get(self.country_field_name, None)\n pass",
"def on_get_field(self, ins, const, obj):\n pass",
"def get_field_by_name(form, field_name):\n if field_name in form.fields:\n return form[field_name]\n return None",
"def get_field(self, field_name):\n for f in self.fields:\n if f.name.lower() == field_name.lower():\n return f\n return None",
"def _get_field(self, field_name: str):\n backcompat_prefix = \"extra__grpc__\"\n if field_name.startswith(\"extra__\"):\n raise ValueError(\n f\"Got prefixed name {field_name}; please remove the '{backcompat_prefix}' prefix \"\n \"when using this method.\"\n )\n if field_name in self.extras:\n return self.extras[field_name]\n prefixed_name = f\"{backcompat_prefix}{field_name}\"\n if prefixed_name in self.extras:\n return self.extras[prefixed_name]\n raise KeyError(f\"Param {field_name} not found in extra dict\")",
"def field(self) -> Optional[str]:\n return pulumi.get(self, \"field\")",
"def temp_validator(cls, value, field):\n if value == \"U\":\n LOGGER.warning(\"{field.name} value is 'U'. Setting to None.\")\n return None\n return value",
"def get_field(self, field):\n return self._dict.get(field)",
"def get_field_value(self, field_name):\n if field_name in self.fields.keys():\n return self.fields[field_name]\n else:\n return \"No such field\"",
"def get_field(self, field):\n try:\n values = getattr(self, field)\n return values\n except AttributeError:\n return {\n \"message\": \"Ensure the field passed is valid.\",\n \"help\": \"The field should be an attribute of the object.\"\n }",
"def get_value(self, context, obj, field_name):\r\n try:\r\n language = get_language()\r\n value = self.get_translated_value(obj, field_name, language)\r\n if value:\r\n return value\r\n if self.FALLBACK:\r\n for lang, lang_name in settings.LANGUAGES:\r\n if lang == language:\r\n # already tried this one...\r\n continue\r\n value = self.get_translated_value(obj, field_name, lang)\r\n if value:\r\n return value\r\n untranslated = getattr(obj, field_name)\r\n if self._is_truthy(untranslated):\r\n return untranslated\r\n else:\r\n return self.EMPTY_VALUE\r\n except Exception:\r\n if settings.TEMPLATE_DEBUG:\r\n raise\r\n return self.EMPTY_VALUE",
"def formfield_for_foreignkey(self, db_field, request, **kwargs):\n profile = request.user.get_profile()\n if db_field.name == 'region' and profile.region is not None:\n kwargs['initial'] = profile.region.pk\n return db_field.formfield(**kwargs)\n\n if db_field.name == 'district' and profile.district is not None:\n kwargs['initial'] = profile.district.pk\n return db_field.formfield(**kwargs)\n\n if db_field.name == 'prescribing_officer':\n kwargs['initial'] = request.user.pk\n field = db_field.formfield(**kwargs)\n return field\n\n return super(PrescriptionAdmin, self).formfield_for_foreignkey(\n db_field, request, **kwargs)",
"def critical(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"critical\")",
"def get_field(self, name):\n for field_name, field in self._all_fields.iteritems():\n if name == self._sanitize_field_name(field_name):\n return field",
"def test_use_existing_computed_field_dict(self):\n if isinstance(self.form.computed_fields, list):\n self.form.computed_fields = self.form.get_computed_fields(self.form.computed_fields)\n self.form.fields.update(self.form.computed_fields) # only names in fields included in get_computed_field_names.\n result_names = self.form.get_computed_field_names([], self.form.fields)\n\n self.assertIsInstance(self.form.computed_fields, dict)\n self.assertIn('test_field', result_names)",
"def test_prep_country_fields(self):\n original_flag = self.form.country_optional\n self.form.country_optional = True\n original_fields = self.form.fields\n original_removed = getattr(self.form, 'removed_fields', None)\n original_computed = getattr(self.form, 'computed_fields', None)\n self.form.fields = original_fields.copy()\n if original_removed is not None:\n self.form.removed_fields = original_removed.copy()\n if original_computed is not None:\n self.form.computed_fields = original_computed.copy()\n remaining = original_fields.copy()\n opts, field_rows = {'fake_opts': 'fake', 'fields': ['nope']}, [{'name': 'assigned_field'}]\n args = ['arbitrary', 'input', 'args']\n kwargs = {'test_1': 'data_1', 'test_2': 'data_2'}\n field_names = (self.form.country_field_name, 'country_flag', )\n if not any(remaining.get(name, None) for name in field_names):\n fix_fields = {name: self.get_missing_field(name) for name in field_names if name not in remaining}\n remaining.update(fix_fields)\n expected_add = {name: remaining[name] for name in field_names if name in remaining}\n expected_field_rows = field_rows.copy()\n expected_field_rows.append(expected_add)\n expected_remaining = {name: field for name, field in remaining.items() if name not in expected_add}\n expected_opts = deepcopy(opts)\n expected_opts['fields'].append(field_names)\n\n sent = (opts, field_rows, remaining, *args)\n r_opts, r_rows, r_remaining, *r_args, r_kwargs = self.form.prep_country_fields(*sent, **kwargs)\n self.assertEqual(expected_opts, r_opts)\n self.assertEqual(expected_field_rows, r_rows)\n self.assertEqual(expected_remaining, r_remaining)\n self.assertEqual(args, r_args)\n self.assertEqual(kwargs, r_kwargs)\n\n self.form.country_optional = original_flag\n self.form.fields = original_fields\n if original_removed is not None:\n self.form.removed_fields = original_removed\n if original_computed is not None:\n self.form.computed_fields = original_computed",
"def is_known_field(self, name):\n return (name in self.fields) or (name in self.collections) or (name == self.id_field_name) or (name == 'cid')",
"def getfield(value, arg):\n #import pdb; pdb.set_trace()\n if hasattr(value, \"fields\"):\n fields = getattr(value, \"fields\")\n if str(arg) in fields:\n return str(fields[str(arg)])",
"def _get(self, field):\n return super(_Classifier, self)._get(field)",
"def get_context_data(self, project_id, category_id, field_id,\n *args, **kwargs):\n field = Field.objects.as_admin(\n self.request.user,\n project_id,\n category_id,\n field_id\n )\n return super(FieldMixin, self).get_context_data(\n project=field.category.project,\n category=field.category,\n field=field,\n is_display_field=(field == field.category.display_field),\n is_expiry_field=(field == field.category.expiry_field),\n *args,\n **kwargs\n )"
] | [
"0.7488688",
"0.6860929",
"0.64093065",
"0.62498826",
"0.59814197",
"0.5829226",
"0.56075716",
"0.54849654",
"0.54585516",
"0.54107404",
"0.54096913",
"0.54086405",
"0.5377918",
"0.5366352",
"0.51903194",
"0.5174083",
"0.5126931",
"0.5125253",
"0.51179713",
"0.5104857",
"0.5104714",
"0.5095346",
"0.50841385",
"0.50731564",
"0.5059292",
"0.50353426",
"0.50153494",
"0.5006027",
"0.5004106",
"0.49920797"
] | 0.7308993 | 1 |
The clean_country_flag method will look for country field in computed_fields if not in fields. | def test_clean_uses_computed(self):
original_request = self.request
original_form = self.form
original_cleaned = getattr(self.form, 'cleaned_data', None)
self.form = self.make_form_request()
name = self.form.country_field_name
initial = self.form.base_fields[name].initial
cleaned = {'country_flag': True, name: initial}
self.form.cleaned_data = cleaned
self.assertNotIn(name, self.form.fields)
self.assertIn(name, self.form.computed_fields)
with self.assertRaisesMessage(ValidationError, "You can input your address. "):
clean_flag = self.form.clean_country_flag()
self.form.cleaned_data[name] = ''
clean_flag = self.form.clean_country_flag()
self.assertEqual(True, clean_flag)
self.request = original_request
self.form = original_form
self.form.cleaned_data = original_cleaned
if original_cleaned is None:
del self.form.cleaned_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_clean_country_flag(self):\n # country_flag = self.cleaned_data.get('country_flag', None)\n # field = self.fields.get(self.country_field_name, None)\n # if not field and hasattr(self, 'computed_fields'):\n # field = self.computed_fields.get(self.country_field_name, None)\n # if field.initial == self.cleaned_data.get(self.country_field_name, None)\n pass",
"def test_prep_country_fields(self):\n original_flag = self.form.country_optional\n self.form.country_optional = True\n original_fields = self.form.fields\n original_removed = getattr(self.form, 'removed_fields', None)\n original_computed = getattr(self.form, 'computed_fields', None)\n self.form.fields = original_fields.copy()\n if original_removed is not None:\n self.form.removed_fields = original_removed.copy()\n if original_computed is not None:\n self.form.computed_fields = original_computed.copy()\n remaining = original_fields.copy()\n opts, field_rows = {'fake_opts': 'fake', 'fields': ['nope']}, [{'name': 'assigned_field'}]\n args = ['arbitrary', 'input', 'args']\n kwargs = {'test_1': 'data_1', 'test_2': 'data_2'}\n field_names = (self.form.country_field_name, 'country_flag', )\n if not any(remaining.get(name, None) for name in field_names):\n fix_fields = {name: self.get_missing_field(name) for name in field_names if name not in remaining}\n remaining.update(fix_fields)\n expected_add = {name: remaining[name] for name in field_names if name in remaining}\n expected_field_rows = field_rows.copy()\n expected_field_rows.append(expected_add)\n expected_remaining = {name: field for name, field in remaining.items() if name not in expected_add}\n expected_opts = deepcopy(opts)\n expected_opts['fields'].append(field_names)\n\n sent = (opts, field_rows, remaining, *args)\n r_opts, r_rows, r_remaining, *r_args, r_kwargs = self.form.prep_country_fields(*sent, **kwargs)\n self.assertEqual(expected_opts, r_opts)\n self.assertEqual(expected_field_rows, r_rows)\n self.assertEqual(expected_remaining, r_remaining)\n self.assertEqual(args, r_args)\n self.assertEqual(kwargs, r_kwargs)\n\n self.form.country_optional = original_flag\n self.form.fields = original_fields\n if original_removed is not None:\n self.form.removed_fields = original_removed\n if original_computed is not None:\n self.form.computed_fields = original_computed",
"def test_prep_country_fields_flat(self):\n original_flag = self.form.country_optional\n self.form.country_optional = True\n original_fields = self.form.fields\n original_removed = getattr(self.form, 'removed_fields', None)\n original_computed = getattr(self.form, 'computed_fields', None)\n self.form.fields = original_fields.copy()\n if original_removed is not None:\n self.form.removed_fields = original_removed.copy()\n if original_computed is not None:\n self.form.computed_fields = original_computed.copy()\n remaining = original_fields.copy()\n opts, field_rows = {'fake_opts': 'fake', 'fields': ['nope']}, [{'name': 'assigned_field'}]\n args = ['arbitrary', 'input', 'args']\n kwargs = {'test_1': 'data_1', 'test_2': 'data_2'}\n field_names = (self.form.country_field_name, 'country_flag', )\n if not any(remaining.get(name, None) for name in field_names):\n fix_fields = {name: self.get_missing_field(name) for name in field_names if name not in remaining}\n remaining.update(fix_fields)\n expected_add = {name: remaining[name] for name in field_names if name in remaining}\n expected_field_rows = field_rows.copy()\n expected_field_rows.append(expected_add)\n expected_remaining = {name: field for name, field in remaining.items() if name not in expected_add}\n expected_opts = deepcopy(opts)\n # expected_opts['fields'].append(field_names)\n kwargs['flat_fields'] = True\n expected_remaining.update(expected_add)\n\n sent = (opts, field_rows, remaining, *args)\n r_opts, r_rows, r_remaining, *r_args, r_kwargs = self.form.prep_country_fields(*sent, **kwargs)\n self.assertEqual(expected_opts, r_opts)\n self.assertEqual(expected_field_rows, r_rows)\n self.assertEqual(expected_remaining, r_remaining)\n self.assertEqual(args, r_args)\n self.assertEqual(kwargs, r_kwargs)\n\n self.form.country_optional = original_flag\n self.form.fields = original_fields\n if original_removed is not None:\n self.form.removed_fields = original_removed\n if original_computed is not None:\n self.form.computed_fields = original_computed\n pass",
"def test_pass_through_prep_country_fields(self):\n original_flag = self.form.country_optional\n self.form.country_optional = False # True\n original_fields = self.form.fields\n self.form.fields = original_fields.copy()\n remaining_fields = original_fields.copy()\n opts, field_rows = {'fake_opts': 'fake'}, [{'name': 'assigned_field'}]\n args = ['arbitrary', 'input', 'args']\n kwargs = {'test_1': 'data_1', 'test_2': 'data_2'}\n\n expected = (opts.copy(), field_rows.copy(), remaining_fields.copy(), *args, kwargs.copy())\n actual = self.form.prep_country_fields(opts, field_rows, remaining_fields, *args, **kwargs)\n self.assertEqual(expected, actual)\n\n self.form.country_optional = original_flag\n self.form.fields = original_fields",
"def checkPostalCode(self, code, country):\n if country == 'US':\n USZipCodeField().clean(code)",
"def country_flag(country):\n\tif not country:\n\t\treturn u''\n\tresult = Country.objects.filter(name__icontains=country)\n\tif result:\n\t\tc = result[0]\n\t\tiso = c.iso\n\t\tflag_location = iso_flag(iso)\n\t\treturn flag_location\n\treturn u''",
"def _feature_country_process(self):\n if 'Country' not in self._df_invoice_line.columns:\n return\n\n list_countries_keep = ['United Kingdom']\n rows_before = self._df_invoice_line.shape[0]\n \n df_invoice_line_new = pd.DataFrame()\n for country in list_countries_keep : \n df_invoice_line_new = df_invoice_line_new.append(\\\n self._df_invoice_line[self._df_invoice_line['Country']==country]\\\n , ignore_index=True)\n\n self.df_invoice_line = df_invoice_line_new\n del(df_invoice_line_new)\n \n rows_after = self._df_invoice_line.shape[0] \n _print_stat_rows(\"Countries filtering : \",rows_before, rows_after)\n\n \n #-------------------------------------------------------------------------\n # Due to the fact only one country is used, then this feature is dropped\n #-------------------------------------------------------------------------\n list_col_to_keep = [col for col in self._df_invoice_line.columns \\\n if col not in 'Country']\n \n self._df_invoice_line = self._df_invoice_line[list_col_to_keep] \n\n return",
"def clean(self):\n cleaned_data = super(AddressForm, self).clean()\n state = cleaned_data.get('state')\n country = cleaned_data.get('country') # A Country instance\n postal_code = cleaned_data.get('postal_code')\n\n # The state must be valid for the country\n if state and country and not self.checkState(state, country):\n self.add_error('state', forms.ValidationError('Invalid state for {0}.'.format(country)))\n\n # The postal code must be valid for the country\n if postal_code and country:\n try:\n self.checkPostalCode(postal_code, country)\n except forms.ValidationError as e:\n self.add_error('postal_code', e)\n return cleaned_data",
"def clean_country(raw_country):\n #\n if raw_country[0:2]==\"l'\":\n raw_country = raw_country[2:]\n country = ''.join((c for c in unicodedata.normalize('NFD', raw_country) if unicodedata.category(c) != 'Mn'))\n country = re.sub(r\"(\\s|')\", \"-\", country) # replace space and quotes with dash\n return country",
"def clean_countries(event_db):\n event_db[\"country_edb\"] = event_db[\"country_edb\"].apply(_clean_country_str)\n event_db = my_utils.split_strings_at_comma_and_distribute_to_new_rows(event_db, 'country_edb')\n return event_db",
"def clean_embargoed_countries(self):\r\n embargoed_countries = self.cleaned_data[\"embargoed_countries\"]\r\n if not embargoed_countries:\r\n return ''\r\n\r\n error_countries = []\r\n\r\n for country in embargoed_countries.split(','):\r\n country = country.strip().upper()\r\n if not self._is_valid_code(country):\r\n error_countries.append(country)\r\n\r\n if error_countries:\r\n msg = 'COULD NOT PARSE COUNTRY CODE(S) FOR: {0}'.format(error_countries)\r\n msg += ' Please check the list of country codes and verify your entries.'\r\n raise forms.ValidationError(msg)\r\n\r\n return embargoed_countries",
"def test_condition_alt_country(self):\n original_flag = self.form.country_optional\n self.form.country_optional = True\n original_data = getattr(self.form, 'data', None)\n test_data = original_data.copy()\n test_data['country_flag'] = True\n self.form.data = test_data\n first_expect = True\n first_actual = self.form.condition_alt_country()\n self.form.data['country_flag'] = False\n second_expect = False\n second_actual = self.form.condition_alt_country()\n self.form.data['country_flag'] = True\n self.form.country_optional = False\n third_expect = False\n third_actual = self.form.condition_alt_country()\n\n self.assertEqual(first_expect, first_actual)\n self.assertEqual(second_expect, second_actual)\n self.assertEqual(third_expect, third_actual)\n\n self.form.country_optional = original_flag\n self.form.data = original_data\n if original_data is None:\n del self.form.data",
"def test_init_update_computed_field_names(self):\n original_request = self.request\n original_form = self.form\n computed = getattr(self.form, 'computed_fields', None)\n get_form = self.make_form_request()\n computed_fields = getattr(get_form, 'computed_fields', None)\n\n self.assertIsNotNone(computed)\n self.assertIsNotNone(computed_fields)\n self.assertIsNotNone(self.form.country_field_name)\n self.assertIn(self.form.country_field_name, computed_fields)\n\n self.request = original_request\n self.form = original_form",
"def test_field_clean_method_called_in_clean_computed_fields(self):\n name = 'test_field'\n expected = 'clean_confirmed'\n original_func = deepcopy(self.form.test_func)\n def replace_value(value): return expected\n self.form.test_func = replace_value\n if isinstance(self.form.computed_fields, (list, tuple)):\n self.form.computed_fields = self.form.get_computed_fields([name])\n field = self.form.computed_fields.get(name) # getattr(self.form, name)\n # initial_value = self.get_initial_for_field(field, name)\n value = getattr(self.form, 'compute_%s' % name)()\n value = field.clean(value)\n original_errors = deepcopy(self.form._errors)\n if self.form._errors is None:\n self.form._errors = ErrorDict() # mimic full_clean: _error is an ErrorDict\n cleaned_data = getattr(self.form, 'cleaned_data', {})\n cleaned_data.update({name: value}) # make sure the original cleaned_data for the field is set.\n self.form.cleaned_data = cleaned_data # ensure cleaned_data is present (mimic full_clean)\n compute_errors = self.form._clean_computed_fields()\n actual = self.form.cleaned_data.get(name, None)\n\n self.assertFalse(compute_errors)\n self.assertEqual(expected, actual)\n self.assertNotEqual(expected, value)\n self.assertNotEqual(expected, self.form.test_value)\n\n self.form.test_func = original_func\n self.form._errors = original_errors",
"def country_hint(self, value):\n return None",
"def test_on_post_display_foreign_to_local(self):\n # data.get('country_flag', None)\n # address_display_version = 'foreign' if country_flag else 'local'\n # form.set_alt_data(name='country_display', field=self.fields['country_display'], value=address_display_version)\n pass",
"def findCountryCode(self):\n RecordsWithCountry = []\n for state in pycountry.subdivisions:\n #print(state.name)\n for record in self.Records: \n if state.name == record.state:\n #print(state.country, record.state)\n r = RecordCountry(date=record.date,\n country=state.country.alpha_3,\n impressions=record.impressions,\n CTR=record.CTR)\n self.Records.remove(record)\n RecordsWithCountry.append(r)\n for record in self.Records: \n r = RecordCountry(date=record.date,\n country=\"XXX\",\n impressions=record.impressions,\n CTR=record.CTR)\n RecordsWithCountry.append(r)\n self.Records = RecordsWithCountry",
"def test_clean_calls_handle_flag_field(self):\n original_data = self.form.data\n original_fields = self.form.fields\n original_computed_fields = self.form.computed_fields\n original_errors = getattr(self.form, '_errors', None)\n original_cleaned_data = getattr(self.form, 'cleaned_data', None)\n self.form.data = original_data.copy()\n self.form.fields = original_fields.copy()\n self.form.computed_fields = original_computed_fields.copy()\n self.form._errors = ErrorDict() if original_errors is None else original_errors.copy()\n new_cleaned_data = {self.form.name_for_user: 'test_value', self.form.name_for_email: 'test_value'}\n self.form.cleaned_data = new_cleaned_data.copy()\n # expected_error = {self.form.name_for_email: \"test email error\", self.form.name_for_user: \"test user error\"}\n expected_error = \"The replace_handle_flag_field test return value. \"\n def replace_handle_flag_field(email, user): return expected_error\n self.form.handle_flag_field = replace_handle_flag_field\n with self.assertRaisesMessage(ValidationError, expected_error):\n self.form.clean()\n\n self.form.data = original_data\n self.form.fields = original_fields\n self.form.computed_fields = original_computed_fields\n self.form._errors = original_errors\n self.form.cleaned_data = original_cleaned_data\n if original_errors is None:\n del self.form._errors\n if original_cleaned_data is None:\n del self.form.cleaned_data",
"def _validate_country(country):\n if country == '' or country == '--': # lint-amnesty, pylint: disable=consider-using-in\n raise errors.AccountCountryInvalid(accounts.REQUIRED_FIELD_COUNTRY_MSG)",
"def test_on_post_display_local_to_foreign(self):\n # data.get('country_flag', None)\n # address_display_version = 'foreign' if country_flag else 'local'\n # form.set_alt_data(name='country_display', field=self.fields['country_display'], value=address_display_version)\n pass",
"def iso_flag(country_id, flag_path=u''):\n if country_id == '999':\n #Added for internal call - ie flag/phone.png\n return util_iso_flag('telephone', flag_path)\n try:\n obj_country = Country.objects.get(id=country_id)\n except:\n return u''\n return util_iso_flag(obj_country.iso2, flag_path)",
"def with_preset_issuing_country(self, country):\n self.__preset_issuing_country = country\n return self",
"def get_cloudflare_country(self, request):\n try:\n return request.META['HTTP_CF_IPCOUNTRY'].lower()\n except KeyError:\n pass",
"def test_addr_country_good_values(self):\n for input_val, output_val in self.known_values:\n self.line._parse_addr_country(input_val)\n self.assertEqual(output_val, self.line.addr_country)",
"def clean_fields(self, *args, **kwargs):\n if self.ipi_name:\n self.ipi_name = self.ipi_name.zfill(11)\n if self.ipi_base:\n self.ipi_base = self.ipi_base.replace(\".\", \"\").upper()\n self.ipi_base = re.sub(\n r\"(I).?(\\d{9}).?(\\d)\", r\"\\1-\\2-\\3\", self.ipi_base\n )\n return super().clean_fields(*args, **kwargs)",
"def test_cleaned_data_modified_by_clean_computed_fields(self):\n name = 'test_field'\n field = self.form.computed_fields.get(name) # getattr(self.form, name) for BoundField instance for Field.\n value = self.form.compute_test_field()\n value = field.clean(value)\n expected = self.form.test_func(value)\n if isinstance(self.form.computed_fields, (list, tuple)):\n self.form.computed_fields = self.form.get_computed_fields([name])\n original_errors = deepcopy(self.form._errors)\n if self.form._errors is None:\n self.form._errors = ErrorDict() # mimic full_clean: _error is an ErrorDict\n self.form.cleaned_data = getattr(self.form, 'cleaned_data', {}) # mimic full_clean: cleaned_data is present\n original = self.form.cleaned_data.get(name, None)\n compute_errors = self.form._clean_computed_fields()\n actual = self.form.cleaned_data.get(name, '')\n\n self.assertFalse(compute_errors)\n self.assertNotEqual(original, actual)\n self.assertNotEqual(original, expected)\n self.assertEqual(expected, actual)\n\n self.form._errors = original_errors",
"def test_on_post_display_foreign_to_foreign(self):\n # data.get('country_flag', None)\n # address_display_version = 'foreign' if country_flag else 'local'\n # form.set_alt_data(name='country_display', field=self.fields['country_display'], value=address_display_version)\n pass",
"def mask_foreign_country(column):\n codes = misc_utils.load_country_code()\n # Remove New Zealand from foreign country list\n codes = codes.drop(codes[codes['Alpha-2'] == 'nz'].index)\n # Remove texts in brackets: belgian franc (convertible) -> belgian franc\n codes['Country'] = codes['Country'].replace({r'\\(.*\\)': ''}, regex=True).str.strip()\n regex = list()\n regex.append('|'.join(r'\\s' + codes['Country'] + r'\\b'))\n # Don't use Alpha-2 and Alpha-3 since there are lots of misreplacement\n # regex.append('|'.join(r'\\s' + codes['Alpha-2'] + r'\\b'))\n # regex.append('|'.join(r'\\s' + codes['Alpha-3'] + r'\\b'))\n regex_str = '|'.join(regex)\n column = column.replace(regex_str, ' $FOREIGN_COUNTRY ', regex=True)\n return column",
"def test_field_compute_method_called_in_clean_computed_fields(self):\n name = 'test_field'\n expected = 'compute_confirmed'\n self.form.test_value = expected\n modified = self.form.test_func(expected)\n original_func = deepcopy(self.form.test_func)\n def pass_through(value): return value\n self.form.test_func = pass_through\n if isinstance(self.form.computed_fields, (list, tuple)):\n self.form.computed_fields = self.form.get_computed_fields([name])\n original_errors = deepcopy(self.form._errors)\n if self.form._errors is None:\n self.form._errors = ErrorDict() # mimic full_clean: _error is an ErrorDict\n self.form.cleaned_data = getattr(self.form, 'cleaned_data', {}) # mimic full_clean: cleaned_data is present\n compute_errors = self.form._clean_computed_fields()\n actual = self.form.cleaned_data.get(name, None)\n\n self.assertFalse(compute_errors)\n self.assertEqual(expected, actual)\n\n self.form.test_func = original_func\n restored = self.form.test_func(expected)\n self.assertEqual(modified, restored)\n self.form._errors = original_errors",
"def _clean_address(self, field):\n data = self.cleaned_data[field]\n if data != \"\" and not is_valid_address(data):\n raise ValidationError(\"Provided value is not a valid Algorand address!\")\n return data"
] | [
"0.7905743",
"0.65489554",
"0.634155",
"0.6327185",
"0.63062096",
"0.61223215",
"0.57785374",
"0.57144135",
"0.5432035",
"0.53450346",
"0.5309917",
"0.52375585",
"0.5214699",
"0.52075917",
"0.519703",
"0.5196857",
"0.5196554",
"0.51909405",
"0.5186007",
"0.51769084",
"0.51723886",
"0.51530457",
"0.5127077",
"0.50669295",
"0.503997",
"0.5039929",
"0.5029597",
"0.5020661",
"0.49985743",
"0.4988464"
] | 0.66551816 | 1 |
Also set `AVSC_JAR` environment variable appropriately. | def build_avsc_jar():
jar_path = osp.join(DPATH, 'deps', 'avro', 'target', 'avsc.jar')
if not osp.exists(jar_path):
_logger.info('building avsc jar')
pom_path = osp.join(DPATH, 'deps', 'avro', 'pom.xml')
code = call(['mvn', '-f', pom_path, 'clean', 'compile', 'assembly:single'])
if code:
_logger.error('unable to build avsc jar')
sys.exit(1)
os.environ['AVSC_JAR'] = jar_path | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def configure(ctx):\n mxmlc = ctx.options.mxmlc\n\n if not mxmlc:\n mxmlc = ctx.find_program('mxmlc')\n\n ctx.env.MXMLC = os.path.abspath(os.path.expanduser(mxmlc))\n\n ctx.env.JAVA = ctx.find_program('java')\n\n if not ctx.env.SIKULI_HOME:\n ctx.env.SIKULI_HOME = get_sikuli_home(ctx)\n ctx.msg('Setting SIKULI_HOME', ctx.env.SIKULI_HOME)\n\n if not os.path.exists(ctx.env.SIKULI_HOME):\n ctx.fatal('Unable to find Sikuli at %r' % (ctx.env.SIKULI_HOME,))\n\n ctx.env.FLASH_PLAYER = ctx.options.flash_player\n\n if not ctx.env.FLASH_PLAYER:\n ctx.fatal('Standalone Flash player required, supply --flash_player')\n\n ctx.msg('Using Flash Standalone Player', ctx.env.FLASH_PLAYER)",
"def _setup_environment_vars(self, opts):\n # Check that these directories actually exist\n assert os.path.isdir(opts.movie_advisor_home)\n\n #if not 'install-bento' in self.actions: assert os.path.isdir(opts.bento_home)\n\n self.movie_advisor_home = opts.movie_advisor_home\n self.bento_home = opts.bento_home\n self.bento_tgz = opts.bento_tgz\n self.kiji_uri = \"kiji://.env/tutorial\"\n\n # \"express job\" takes a jar file as an argument\n assert os.path.isfile(os.path.join(self.movie_advisor_home, self.express_jar))\n\n # Set the classpath for all of the commands that we'll run\n jarsFullPaths = [os.path.join(self.movie_advisor_home, j) for j in self.jars]\n for jar in jarsFullPaths: assert os.path.isfile(jar)\n\n classpath = \":\".join(jarsFullPaths)\n os.environ['KIJI_CLASSPATH'] = classpath\n\n if opts.show_classpath:\n print(\"export KIJI_CLASSPATH=%s\" % classpath)\n sys.exit(0)",
"def create_spark_session():\n try:\n spark = (\n SparkSession.builder\n .config(\"spark.jars.packages\", os.environ['SAS_JAR'])\n # .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n .enableHiveSupport()\n .getOrCreate()\n )\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY'])\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.impl\",\"org.apache.hadoop.fs.s3a.S3AFileSystem\")\n # spark._jsc.hadoopConfiguration().set(\"com.amazonaws.services.s3.enableV4\", \"true\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.aws.credentials.provider\",\"org.apache.hadoop.fs.s3a.BasicAWSCredentialsProvider\")\n # spark._jsc.hadoopConfiguration().set(\"fs.s3a.endpoint\", \"s3.amazonaws.com\")\n except Exception as e:\n logger.error('Pyspark session failed to be created...')\n raise\n return spark",
"def vsav():\n if not sys.argv[1:]:\n sys.stderr.write(vsav.__doc__.strip() + '\\n')\n else:\n for src in sys.argv[1:]:\n version_util.save(src)",
"def spark_setup(self):\n # Update the global variables for config details\n globals()[\"spark_token\"] = self.spark_bot_token\n globals()[\"bot_email\"] = self.spark_bot_email\n\n sys.stderr.write(\"Spark Bot Email: \" + self.spark_bot_email + \"\\n\")\n sys.stderr.write(\"Spark Token: REDACTED\\n\")\n\n # Setup the Spark Connection\n globals()[\"spark\"] = CiscoSparkAPI(access_token=self.spark_bot_token)\n globals()[\"webhook\"] = self.setup_webhook(self.spark_bot_name,\n self.spark_bot_url)\n sys.stderr.write(\"Configuring Webhook. \\n\")\n sys.stderr.write(\"Webhook ID: \" + globals()[\"webhook\"].id + \"\\n\")",
"def SetBootloaderEnv(script, name, val):\n script.AppendExtra('set_bootloader_env(\"%s\", \"%s\");' % (name, val))",
"def create_spark_session(self):\n\n spark_jar_path = os.getenv(\"SPARK_JARS_PATH\")\n spark_jars = [os.path.join(spark_jar_path, jars) for jars in os.listdir(spark_jar_path)] \n\n self.spark = SparkSession\\\n .builder\\\n .config(\"spark.jars\", \",\".join(spark_jars))\\\n .appName(appname)\\\n .getOrCreate()",
"def initFromEnv(self):\n #self.command = 'scram' # SB I think this line is not needed\n self[\"SCRAM_ARCH\"] = None\n\n if 'SCRAM_ARCH' in os.environ:\n self[\"SCRAM_ARCH\"] = os.environ[\"SCRAM_ARCH\"]\n else:\n stdout, _, _ = execute_command(command='scram arch')\n self[\"SCRAM_ARCH\"] = stdout\n\n try:\n self[\"CMSSW_BASE\"] = os.environ[\"CMSSW_BASE\"]\n self[\"CMSSW_VERSION\"] = os.environ[\"CMSSW_VERSION\"]\n# Commenting these two out. I don't think they are really needed\n# self.cmsswReleaseBase = os.environ[\"CMSSW_RELEASE_BASE\"]\n# self.localRT = os.environ[\"LOCALRT\"]\n except KeyError as ke:\n self[\"CMSSW_BASE\"] = None\n self[\"CMSSW_VERSION\"] = None\n# self.cmsswReleaseBase = None\n# self.localRT = None\n msg = \"Please make sure you have setup the CMS enviroment (cmsenv). Cannot find %s in your env\" % str(ke)\n msg += \"\\nPlease refer to https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookCRAB3Tutorial#CMS_environment for how to setup the CMS enviroment.\"\n raise EnvironmentException(msg)",
"def spark_config_set(is_spark_submit):\n if is_spark_submit:\n global sc, sqlContext\n sc = SparkContext()\n sqlContext = HiveContext(sc)",
"def setup_environment(self, spack_env, run_env):\n run_env.prepend_path('PICARD',\n join_path(self.prefix, 'bin', 'picard.jar'))",
"def setup_env(app_dir, app_id, version, module_id, remote_api=False):\n # GCS library behaves differently when running under remote_api. It uses\n # SERVER_SOFTWARE to figure this out. See cloudstorage/common.py, local_run().\n if remote_api:\n os.environ['SERVER_SOFTWARE'] = 'remote_api'\n else:\n os.environ['SERVER_SOFTWARE'] = 'Development yo dawg/1.0'\n if app_dir:\n app_id = app_id or Application(app_dir).app_id\n version = version or 'default-version'\n if app_id:\n os.environ['APPLICATION_ID'] = app_id\n if version:\n os.environ['CURRENT_VERSION_ID'] = '%s.%d' % (\n version, int(time.time()) << 28)\n if module_id:\n os.environ['CURRENT_MODULE_ID'] = module_id",
"def spark_home(self, sparkHome):\n self.sparkProperties[SparkProperties.SPARK_MESOS_EXECUTOR_HOME] = sparkHome\n return self",
"def set(self):\n \n ffmpeg_installed = misc.askquestion(DialogTitle='FFMPEG Check',\n Question='Is FFMPEG installed?')\n \n if ffmpeg_installed:\n ffmpeg_dir = misc.get_dir(DialogTitle='Please select the directory where FFMPEG (binary) is installed:')\n \n if sys.platform=='win32':\n self.ffmpeg = os.path.join(ffmpeg_dir, 'ffmpeg.exe')\n self.ffplay = os.path.join(ffmpeg_dir, 'ffplay.exe')\n else:\n self.ffmpeg = os.path.join(ffmpeg_dir, 'ffmpeg')\n self.ffplay = os.path.join(ffmpeg_dir, 'ffplay')\n \n if not os.path.exists(self.ffmpeg):\n print('Sorry, {0} does not exist!'.format(self.ffmpeg))\n return\n \n if not os.path.exists(self.ffplay):\n print('Sorry, {0} does not exist!'.format(self.ffplay))\n return\n \n else:\n self.ffmpeg = None\n self.ffplay = None\n \n # Save them to the default config file\n info = {'ffmpeg':self.ffmpeg, 'ffplay': self.ffplay}\n try:\n with open(self.config_file, 'w') as outFile:\n json.dump(info, outFile)\n print('Config information written to {0}'.format(os.path.abspath(self.config_file)))\n except PermissionError as e:\n curDir = os.path.abspath(os.curdir)\n print('Current directory: {0}'.format(curDir))\n print('Error: {0}'.format(e))\n \n return",
"def get_jarfile(self):",
"def __init__(self):\n \n app_name = 'FFMPEG_info'\n app_author = 'sksound'\n \n # The package \"appdirs\" allows an OS-independent implementation\n user_data_dir = appdirs.user_data_dir(app_name, app_author)\n if not os.path.exists(user_data_dir):\n os.makedirs(user_data_dir)\n self.config_file = os.path.join(user_data_dir, 'ffmpeg.json')\n \n if not os.path.exists(self.config_file):\n \n # Check if it is in the system path\n try:\n completed_process = subprocess.run('ffmpeg')\n completed_process = subprocess.run('ffplay')\n self.ffmpeg = 'ffmpeg'\n self.ffplay = 'ffplay'\n except FileNotFoundError:\n self.set()\n else:\n with open(self.config_file, 'r') as in_file:\n info = json.load(in_file)\n self.ffmpeg = info['ffmpeg']\n self.ffplay = info['ffplay']",
"def Install(vm):\n vm.Install('openjdk')\n # TODO(user): replace with Python 3 when supported.\n # https://github.com/brianfrankcooper/YCSB/issues/1459\n vm.Install('python')\n vm.InstallPackages('curl')\n ycsb_url = (\n _ycsb_tar_url\n or FLAGS.ycsb_tar_url\n or YCSB_URL_TEMPLATE.format(FLAGS.ycsb_version)\n )\n install_cmd = (\n 'mkdir -p {0} && curl -L {1} | '\n 'tar -C {0} --strip-components=1 -xzf - '\n # Log4j 2 < 2.16 is vulnerable to\n # https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2021-44228.\n # YCSB currently ships with a number of vulnerable jars. None are used by\n # PKB, so simply exclude them.\n # After https://github.com/brianfrankcooper/YCSB/pull/1583 is merged and\n # released, this will not be necessary.\n # TODO(user): Update minimum YCSB version and remove.\n \"--exclude='**/log4j-core-2*.jar' \"\n )\n vm.RemoteCommand(install_cmd.format(YCSB_DIR, ycsb_url))\n if _GetVersion(FLAGS.ycsb_version) >= 11:\n vm.Install('maven')\n vm.RemoteCommand(install_cmd.format(HDRHISTOGRAM_DIR, HDRHISTOGRAM_TAR_URL))\n # _JAVA_OPTIONS needed to work around this issue:\n # https://stackoverflow.com/questions/53010200/maven-surefire-could-not-find-forkedbooter-class\n # https://stackoverflow.com/questions/34170811/maven-connection-reset-error\n vm.RemoteCommand(\n 'cd {hist_dir} && _JAVA_OPTIONS=-Djdk.net.URLClassPath.'\n 'disableClassPathURLCheck=true,https.protocols=TLSv1.2 '\n '{mvn_cmd}'.format(\n hist_dir=HDRHISTOGRAM_DIR, mvn_cmd=maven.GetRunCommand('install')\n )\n )",
"def start_bioformats():\n javabridge.start_vm(class_path=bioformats.JARS,run_headless=True)\n print(\"Bioformats JVM started!\")",
"def main():\n print(\"This is a library for reading video sequences into python via ffmpeg. \")\n print(\"Provides the 'Video_Reader' iterator class. \")\n print(\"Requires ffmpeg be installed. \")",
"def main():\n spark_it_up()",
"def _environment(self):\n\n self.spark_home = self._config_default(\"spark-home\",\n self._context(SparkSubmit.SPARK_HOME, default = os.environ.get(SparkSubmit.SPARK_HOME,None)))\n assert self.spark_home, \"unable to detect SPARK_HOME. set SPARK_HOME as directed in the task documentation\"\n assert os.path.exists(self.spark_home), \"provided SPARK_HOME doesn't exists\"\n\n spark_config = {'cluster-config': {}, 'other-config': {}}\n if 'config-file' in self._config_keys():\n spark_config.update(yaml.load(open(self._config('config-file')))['spark-config'])\n\n self.app_config = []\n\n spark_app = self._config('app-config')\n self.app_config.append(spark_app['application'])\n app_params = SparkSubmit._flat_node_to_cmd_line_args(spark_app['params']) if 'params' in spark_app else []\n self.app_config.extend(app_params)\n if 'resources' in spark_app:\n resources = [ ['--%s' % item] + (spark_app['resources'][item]) for item in spark_app['resources'].keys() ]\n self.resources = list(itertools.chain(*resources))\n else:\n self.resources = []\n\n\n cluster_config = self._config_default('cluster-config', {})\n cluster_config.update(spark_config['cluster-config'])\n self.cluster_options = list(itertools.chain(*[ ['--%s' % item, str(cluster_config[item]) ] for item in cluster_config.keys() ]))\n\n\n ##other options\n ## cluster options\n other_options = self._config_default('other-config',{})\n cluster_config.update(spark_config['other-config'])\n self.other_options = list(itertools.chain(*[ ['--%s' % item, str(other_options[item]) ] for item in other_options.keys() ]))",
"def __init__(self):\n\n self.config = load_config()\n self.set_env_var()",
"def deploy_static_media(env=None, asset_version='', quick=False, haus_vars={}):\n print green('Deploying static media {}'.format('__quick__' if quick else ''))\n collectstatic(no_input=True, skip_admin=quick)",
"def __init__(__self__, *,\n type: pulumi.Input[str],\n jvm_options: Optional[pulumi.Input[str]] = None,\n relative_path: Optional[pulumi.Input[str]] = None,\n runtime_version: Optional[pulumi.Input[str]] = None,\n version: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"type\", 'Jar')\n if jvm_options is not None:\n pulumi.set(__self__, \"jvm_options\", jvm_options)\n if relative_path is not None:\n pulumi.set(__self__, \"relative_path\", relative_path)\n if runtime_version is not None:\n pulumi.set(__self__, \"runtime_version\", runtime_version)\n if version is not None:\n pulumi.set(__self__, \"version\", version)",
"def create_spark_session():\n spark = SparkSession \\\n .builder \\\n .config(\"spark.jars.packages\", \"org.apache.hadoop:hadoop-aws:2.7.0\") \\\n .config(\"spark.hadoop.fs.s3a.awsAccessKeyId\", os.environ['AWS_ACCESS_KEY_ID']) \\\n .config(\"spark.hadoop.fs.s3a.awsSecretAccessKey\", os.environ['AWS_SECRET_ACCESS_KEY']) \\\n .enableHiveSupport().getOrCreate()\n \n return spark",
"def av(self, av):\n\n self._av = av",
"def __init__(self, ffmpeg_path=None, ffprobe_path=None):\n\n def which(name):\n path = os.environ.get_parser('PATH', os.defpath)\n for d in path.split(':'):\n fpath = os.path.join(d, name)\n if os.path.exists(fpath) and os.access(fpath, os.X_OK):\n return fpath\n return None\n\n if ffmpeg_path is None:\n ffmpeg_path = 'ffmpeg'\n\n if ffprobe_path is None:\n ffprobe_path = 'ffprobe'\n\n if '/' not in ffmpeg_path:\n ffmpeg_path = which(ffmpeg_path) or ffmpeg_path\n if '/' not in ffprobe_path:\n ffprobe_path = which(ffprobe_path) or ffprobe_path\n\n self.ffmpeg_path = ffmpeg_path\n self.ffprobe_path = ffprobe_path\n\n if not os.path.exists(self.ffmpeg_path):\n raise FFMpegError(\"ffmpeg binary not found: \" + self.ffmpeg_path)\n\n if not os.path.exists(self.ffprobe_path):\n raise FFMpegError(\"ffprobe binary not found: \" + self.ffprobe_path)\n\n self.hwaccels = []\n\n self.encoders = []\n self.decoders = []\n\n self._getcapabilities()",
"def setUp(self) -> None:\n self.s3 = boto3.client('s3')\n\n try:\n self.prod_env = os.environ['TEST_ENV'] == \"prod\"\n except KeyError:\n self.prod_env = True",
"def set_dev(session):\n set_environment_variables(PYBAMM_ENV, session=session)\n envbindir = session.bin\n session.install(\"-e\", \".[all]\")\n session.install(\"cmake\")\n if sys.platform == \"linux\" or sys.platform == \"darwin\":\n session.run(\n \"echo\",\n \"export\",\n f\"LD_LIBRARY_PATH={PYBAMM_ENV['LD_LIBRARY_PATH']}\",\n \">>\",\n f\"{envbindir}/activate\",\n external=True, # silence warning about echo being an external command\n )",
"def install_vk_api_for_python():\r\n\r\n print(\"Установка необходимых библиотек...\")\r\n os.startfile('install_libs.bat', 'runas')",
"def run(cmd):\n assets.main([cmd, ASSET_NAME] + sys.argv[1:])"
] | [
"0.5061263",
"0.49802852",
"0.4814673",
"0.47707972",
"0.47029126",
"0.46798176",
"0.46687746",
"0.46055582",
"0.45604882",
"0.4547775",
"0.45412117",
"0.45352414",
"0.45286763",
"0.4524826",
"0.4498402",
"0.4491043",
"0.44897157",
"0.44726545",
"0.44317535",
"0.4423739",
"0.4416446",
"0.4385297",
"0.43744898",
"0.43702358",
"0.43640116",
"0.43550706",
"0.4341109",
"0.43344307",
"0.4330142",
"0.43208113"
] | 0.7205339 | 0 |
Add a cell to this cell group. | def add(self, cell):
self.cells.add(cell)
cell.add_group(self) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add_cell(self, cell: Cell):\n\n self.cells.append(cell)",
"def add_cell(self, cell: Cell):\r\n assert isinstance(cell, Cell)\r\n self.bucket_array.add_to_free_cell_list(cell)\r\n self.cells.append(cell)\r\n cell.block = self\r\n self.size += 1",
"def add_cell(self, cell):\r\n if cell not in self.cells:\r\n self.cells.add(cell)\r\n if cell.block == \"A\":\r\n self.blockA += 1\r\n self.blockA_free += 1\r\n self.blockA_cells.append(cell)\r\n else:\r\n assert cell.block == \"B\"\r\n self.blockB += 1\r\n self.blockB_free += 1\r\n self.blockB_cells.append(cell)",
"def add_cell(self, cell):\n\n if cell.uuid is None:\n cell.uuid = self._generate_uuid()\n\n if cell.uuid in self._cells:\n error_str = \"Trying to add an already existing cell with uuid: \"\\\n + str(cell.uuid)\n raise KeyError(error_str)\n\n self._cells[cell.uuid] = Cell.from_cell(cell)\n\n return cell.uuid",
"def append_cell(self, index, cell):\n if index == len(self._grid):\n self._grid.append([])\n self._grid[index].append(cell)",
"def insert_cell(self, cell: \"Cell\", index: int):\n\n self.cells.insert(index, cell)",
"def add_cell(self, cell: Cell):\r\n assert isinstance(cell, Cell)\r\n assert -self.pmax <= cell.gain <= self.pmax\r\n\r\n self[cell.gain].append(cell)\r\n cell.bucket_num = cell.gain + self.pmax\r\n if cell.gain > self.max_gain:\r\n self.max_gain = cell.gain",
"def add(self, cell, overwrite_duplicate=False):\n if isinstance(cell, Cell):\n if (not overwrite_duplicate and cell.name in self.cell_dict and\n self.cell_dict[cell.name] is not cell):\n raise ValueError(\"[GDSPY] cell named {0} already present in \"\n \"library.\".format(cell.name))\n self.cell_dict[cell.name] = cell\n else:\n for c in cell:\n if (not overwrite_duplicate and c.name in self.cell_dict and\n self.cell_dict[c.name] is not c):\n raise ValueError(\"[GDSPY] cell named {0} already present \"\n \"in library.\".format(c.name))\n self.cell_dict[c.name] = c\n return self",
"def add_cell(self, connections=[], cell = None):\n # Create new cell\n idx = len(self.cells)\n position = np.random.randn(3) if cell == None else cell.position + 1\n connections = connections if len(connections) != 0 else [idx - 1]\n new_cell = Cell(idx=idx, connections=connections, position=position)\n # Connect new cell with connected cells in plant \"\"\"\n for old_cell in self.cells:\n if old_cell.idx in new_cell.connections: \n old_cell.connections.append(new_cell.idx)\n # Add new cell to plant\n self.cells.append(new_cell)",
"def add(topcell, subcell, center=(0,0)):\n topcell.add(gdspy.CellReference(subcell, origin=center))",
"def add_to_free_cell_list(self, cell: Cell):\r\n assert isinstance(cell, Cell)\r\n self.free_cell_list.append(cell)",
"def add_neighbor(self, cell):\n self.__neighbors.append(cell)",
"def new_cell(self, name, overwrite_duplicate=False, update_references=True):\n cell = Cell(name)\n self.add(cell, False, overwrite_duplicate, update_references)\n return cell",
"def add_cell(self,**kwargs):\n i=None\n if '_index' in kwargs:\n i=kwargs.pop('_index')\n if i==len(self.cells): # had been self.edges, seems wrong\n # this is the index we'd get anyway.\n i=None\n else:\n assert len(self.cells)>i\n assert self.cells[i]['deleted']\n\n if i is None:\n c=np.zeros( (),dtype=self.cell_dtype)\n self.cells=array_append(self.cells,c)\n i=len(self.cells)-1\n else:\n pass\n\n # default values for native fields\n self.cells['_center'][i]=np.nan\n self.cells['_area'][i]=np.nan\n self.cells['edges'][i]=self.UNKNOWN \n\n for k,v in six.iteritems(kwargs):\n if k in ['edges','nodes']: # may have to make this more generic..\n self.cells[k][i][:len(v)] = v\n self.cells[k][i][len(v):] = self.UNDEFINED # -1\n else:\n self.cells[k][i]=v\n\n # Avoids issue with bogus value of 'deleted' coming in with kwargs\n self.cells['deleted'][i]=False\n\n if self._node_to_cells is not None:\n for n in self.cell_to_nodes(i):\n self._node_to_cells[n].append(i)\n\n if self._cell_center_index is not None:\n if self.cell_center_index_point=='circumcenter':\n cc=self.cells_center()[i]\n else: # centroid\n cc=self.cells_centroid([i])[0]\n self._cell_center_index.insert(i,cc[self.xxyy])\n\n # updated 2016-08-25 - not positive here.\n # This whole chunk needs testing.\n # maybe some confusion over when edges has to be set\n edges=self.cell_to_edges(i)\n \n if 'edges' not in kwargs:\n # wait - is this circular??\n self.cells['edges'][i,:len(edges)]=edges\n self.cells['edges'][i,len(edges):]=self.UNDEFINED\n\n nodes=self.cell_to_nodes(i)\n \n for side in range(len(edges)):\n j=edges[side]\n n1=nodes[side]\n n2=nodes[ (side+1)%len(nodes) ]\n \n if ( (n1==self.edges['nodes'][j,0]) and\n (n2==self.edges['nodes'][j,1]) ):\n # this cell is on the 'left' side of the edge\n assert self.edges['cells'][j,0]<0\n # TODO: probably this ought to be using modify_edge\n self.edges['cells'][j,0]=i\n elif ( (n1==self.edges['nodes'][j,1]) and\n (n2==self.edges['nodes'][j,0]) ):\n # this cell is on the 'right' side of the edge\n assert self.edges['cells'][j,1]<0\n # TODO: probably this ought to be using modify_edge\n self.edges['cells'][j,1]=i\n else:\n assert False\n \n self.push_op(self.unadd_cell,i)\n\n return i",
"def add_new_cell(self, x, y, color):\n # if the origin changes then we are going to need to update all of the cells in the grid with new relative\n # positions.\n self.num_colored_cells += 1\n if color != self.color:\n self.color = -1\n x_origin_change = 0\n y_origin_change = 0\n bounding_box_change = False\n if x < self.top_left_x:\n x_origin_change = self.top_left_x - x\n self.top_left_x = x\n self.bounding_box_x_len += x_origin_change\n bounding_box_change = True\n elif x > self.top_left_x + self.bounding_box_x_len:\n self.bounding_box_x_len = x - self.top_left_x\n bounding_box_change = True\n if y < self.top_left_y:\n y_origin_change = self.top_left_y - y\n self.top_left_y = y\n self.bounding_box_y_len += y_origin_change\n bounding_box_change = True\n elif y > self.top_left_y + self.bounding_box_y_len:\n self.bounding_box_y_len = y - self.top_left_y\n bounding_box_change = True\n\n if bounding_box_change:\n new_cells = np.zeros((self.bounding_box_x_len + 1, self.bounding_box_y_len + 1), dtype=np.int32)\n new_cells[x_origin_change:len(self.cells) + x_origin_change,\n y_origin_change:len(self.cells[0]) + y_origin_change] = self.cells\n self.cells = new_cells\n self.cells[x - self.top_left_x][y - self.top_left_y] = color",
"def add_cell(self, top_elt, left_elt, content, color=None):\n assert left_elt in self._cells[top_elt], (\n \"%s in not a valid row in the chart %s\" % (left_elt, self._tag))\n if color:\n this_content = r\"\\cellcolor{%s}%s\" % (color, str(content))\n else:\n this_content = content\n \n self._cells[top_elt][left_elt] = this_content",
"def create_cell(self, cid):\n self.cells[cid] = Cell()",
"def add_cell_field(self,name,data,on_exists='fail'):\n # will need to get fancier to discern vector dtypes\n # assert data.ndim==1 - maybe no need to be smart?\n if name in np.dtype(self.cell_dtype).names:\n if on_exists == 'fail':\n raise GridException(\"Node field %s already exists\"%name)\n elif on_exists == 'pass':\n return\n elif on_exists == 'overwrite':\n self.cells[name] = data\n else:\n self.cells=recarray_add_fields(self.cells,\n [(name,data)])\n self.cell_dtype=self.cells.dtype",
"def add(\n self,\n cell,\n include_dependencies=True,\n overwrite_duplicate=False,\n update_references=True,\n ):\n if isinstance(cell, Cell):\n cell_set = set([cell])\n if include_dependencies:\n cell_set.update(cell.get_dependencies(True))\n else:\n cell_set = set(cell)\n if include_dependencies:\n for c in cell:\n cell_set.update(c.get_dependencies(True))\n for c in cell_set:\n if (\n not overwrite_duplicate\n and c.name in self.cells\n and self.cells[c.name] is not c\n ):\n raise ValueError(\n \"[GDSPY] Cell named {0} already present in library.\".format(c.name)\n )\n if (\n overwrite_duplicate\n and update_references\n and c.name in self.cells\n and self.cells[c.name] is not c\n ):\n self.replace_references(c.name, c)\n self.cells[c.name] = c\n return self",
"def createCell(self, xPos, yPos):\n self.cells.append(Cell(self.screen, xPos, yPos))",
"def _add(self, cell_coord, o):\n try:\n self.d.setdefault(cell_coord, set()).add(o)\n except KeyError:\n self.d[cell_coord] = set((o,))",
"def add(self, element):\n if isinstance(element, PolygonSet):\n self.polygons.append(element)\n elif isinstance(element, RobustPath) or isinstance(element, FlexPath):\n self.paths.append(element)\n elif isinstance(element, Label):\n self.labels.append(element)\n elif isinstance(element, CellReference) or isinstance(element, CellArray):\n self.references.append(element)\n else:\n for e in element:\n if isinstance(e, PolygonSet):\n self.polygons.append(e)\n elif isinstance(e, RobustPath) or isinstance(e, FlexPath):\n self.paths.append(e)\n elif isinstance(e, Label):\n self.labels.append(e)\n elif isinstance(e, CellReference) or isinstance(e, CellArray):\n self.references.append(e)\n else:\n raise ValueError(\n \"[GDSPY] Only instances of `PolygonSet`, `FlexPath`, \"\n \"`RobustPath`, `Label`, `CellReference`, and \"\n \"`CellArray` can be added to `Cell`.\"\n )\n self._bb_valid = False\n return self",
"def put_cell(self, x, y, num):\n if self.is_empty(x,y):\n self.grid[y][x] = num\n return True\n return False",
"def add2cell(self, row, col, content, sheet):\r\n sheet.Cells(row,col).Value = content",
"def add(self, mp):\n \n self.tile_contents.append(mp)\n if(self.tile_contents[-1].raised == False):\n self.paint_blocks += 1.00",
"def add(self, pro):\n self.registry[pro.cell_id] = pro",
"def set_cell(self, point, cell):\n self._grid[point.x][point.y] = cell",
"def cell(self, cell_id):\r\n return Cell(self, cell_id)",
"def addDataCellProperty(self):\n\n if len(self.config.get('dataCell', 'propertyName')) > 0 :\n self.dataCellPropertyName = self.config.get('dataCell', 'propertyName')\n else :\n self.dataCellPropertyName = 'hasValue'\n \n self.graph.add((self.namespaces['tablink'][self.dataCellPropertyName], RDF.type, self.namespaces['qb']['MeasureProperty']))\n \n #Take labels from config\n if len(self.config.get('dataCell', 'labels')) > 0 :\n labels = self.config.get('dataCell', 'labels').split(':::')\n for label in labels :\n labelProperties = label.split('-->')\n if len(labelProperties[0]) > 0 and len(labelProperties[1]) > 0 :\n self.graph.add((self.namespaces['tablink'][self.dataCellPropertyName], RDFS.label, Literal(labelProperties[1],labelProperties[0])))\n \n if len(self.config.get('dataCell', 'literalType')) > 0 :\n self.graph.add((self.namespaces['tablink'][self.dataCellPropertyName], RDFS.range, URIRef(self.config.get('dataCell', 'literalType'))))",
"def cells(self, cells):\n\n self.container['cells'] = cells"
] | [
"0.8155925",
"0.7513701",
"0.72820836",
"0.71640146",
"0.7104436",
"0.70445544",
"0.7016003",
"0.70067173",
"0.6841812",
"0.673482",
"0.6638499",
"0.65292174",
"0.64524055",
"0.64106095",
"0.6381007",
"0.6374173",
"0.6130089",
"0.6118597",
"0.6066999",
"0.60550976",
"0.6019677",
"0.5975439",
"0.5814352",
"0.57030994",
"0.57016355",
"0.56721103",
"0.5650915",
"0.5611262",
"0.5607961",
"0.559483"
] | 0.87547165 | 0 |
Create a mapping from ``frozenset``s of possible symbols to a lists of cells, that these are their possible symbols. | def create_possible_symbols_to_cells_mapping(self):
possibles_to_cells = defaultdict(set)
for cell in self.iterate_empty_cells():
possibles_to_cells[frozenset(cell.get_possible_symbols())].add(cell)
return possibles_to_cells | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_symbol_to_possible_cell_mapping(self):\r\n symbols_to_cells = defaultdict(set)\r\n for cell in self.iterate_empty_cells():\r\n for symbol in cell.get_possible_symbols():\r\n symbols_to_cells[symbol].add(cell)\r\n return symbols_to_cells",
"def create_cell_map(dim):\n for cell, faces in cell_face_map.iteritems():\n \n for face in faces:\n nds = face_list[face - 1][1]\n \n if not cell in cell_map:\n cell_map[cell] = copy(nds)\n \n else:\n cell_map[cell] = list(Set(cell_map[cell] + nds))",
"def create_taxon_to_state_set_map(self, char_indices=None):\n taxon_to_state_indices = {}\n for t in self.taxon_seq_map.keys():\n cdv = self[t]\n if char_indices is None:\n ci = range(len(cdv))\n else:\n ci = char_indices\n v = []\n for char_index in ci:\n cell = cdv[char_index]\n cell_value = cell.value\n try:\n state_alphabet = cell.character_type.state_alphabet\n except AttributeError:\n state_alphabet = self.default_state_alphabet\n inds = [state_alphabet.index(i) for i in cell_value.fundamental_states]\n v.append(set(inds))\n taxon_to_state_indices[t] = v\n return taxon_to_state_indices",
"def potential_value_map(self) -> DefaultDict[Hashable, Set[Cell]]:\n\n\t\tvalue_to_cell_map = defaultdict(set)\n\n\t\tfor cell in self:\n\t\t\tif not cell.value():\n\t\t\t\tfor value in cell.potential_values():\n\t\t\t\t\tvalue_to_cell_map[value].add(cell)\n\n\t\treturn value_to_cell_map",
"def _get_sym_mappings_from_permutations(permutations, atom_list_done):\n\n assert permutations.ndim == 2\n num_pos = permutations.shape[1]\n\n # filled with -1\n map_atoms = np.zeros((num_pos,), dtype='intc') - 1\n map_syms = np.zeros((num_pos,), dtype='intc') - 1\n\n atom_list_done = set(atom_list_done)\n for atom_todo in range(num_pos):\n for (sym_index, permutation) in enumerate(permutations):\n if permutation[atom_todo] in atom_list_done:\n map_atoms[atom_todo] = permutation[atom_todo]\n map_syms[atom_todo] = sym_index\n break\n else:\n text = (\"Input forces are not enough to calculate force constants,\"\n \"or something wrong (e.g. crystal structure does not \"\n \"match).\")\n print(textwrap.fill(text))\n raise ValueError\n\n assert set(map_atoms) & set(atom_list_done) == set(map_atoms)\n assert -1 not in map_atoms\n assert -1 not in map_syms\n return map_atoms, map_syms",
"def symbolsAssign(clusters):\n \n alphabet = ['A','a','B','b','C','c','D','d','E','e',\n 'F','f','G','g','H','h','I','i','J','j',\n 'K','k','L','l','M','m','N','n','O','o',\n 'P','p','Q','q','R','r','S','s','T','t',\n 'U','u','V','v','W','w','X','x','Y','y','Z','z']\n \n clusters = pd.Series(clusters)\n N = len(clusters.unique())\n\n cluster_sort = [0] * N \n counter = collections.Counter(clusters)\n for ind, el in enumerate(counter.most_common()):\n cluster_sort[ind] = el[0]\n\n if N >= len(alphabet):\n alphabet = [chr(i+33) for i in range(0, N)]\n else:\n alphabet = alphabet[:N]\n hashm = dict(zip(cluster_sort + alphabet, alphabet + cluster_sort))\n strings = [hashm[i] for i in clusters]\n return strings, hashm",
"def decode_cell_names(iterable):\n mapper = two_bit_mapper(np.unique(iterable))\n return [mapper[x] for x in iterable]",
"def components(self) -> Iterable[Mapping[T, Set[T]]]:",
"def __set_symbol_dict(self):\r\n return {0: list(alph) if self.is_case_snstv else list(alph)[:26],\r\n 1: list(dgt),\r\n 2: list(spcl) if self.is_spcl else []}",
"def map_factory(cls, list_to_alter):\n temp_list = []\n for letter in list_to_alter:\n # Only cell types which are allowed can be transformed\n if letter not in cls.dict_cells.keys():\n raise ValueError(f\"{letter} is not an allowed cell type\")\n temp_list.append(cls.dict_cells[letter]())\n\n return temp_list",
"def solved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_solved_cells())",
"def solved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_solved_cells())",
"def get_states_as_cells(self, oids=None, symbols=None, tokens=None):\n return [CharacterDataCell(value=s) for \\\n s in self.get_states(oids=oids, symbols=symbols, tokens=tokens)]",
"def symbols(self) -> List[SingleMapping]:\n return self._symbols",
"def _create_symbol_mapping():\n normal_items = [\"+\", \"-\"]\n unicode_items = [chr(0x2070 + i) for i in range(10, 12)]\n\n # Create a dict mapping the two.\n return DefaultDictionary(zip(normal_items, unicode_items))",
"def generate_combinations(rack,placed_tile):\n combinations_set = set()\n if placed_tile == \"\":\n for i in range(3, (len(rack)+1)):\n for x in itertools.combinations(rack, i):\n combinations_set.add(x)\n else:\n word = rack+placed_tile\n for i in range(3, (len(word)+1)):\n for x in itertools.combinations(word, i):\n if placed_tile in x:\n combinations_set.add(x)\n \n return combinations_set",
"def create_periodic_cell_face_map():\n for f0, f1 in periodic_face_map.iteritems():\n # f0, f1 = periodic face0 - face1\n face0 = face_list[f0 - 1]\n face1 = face_list[f1 - 1] # shadow\n nd, nds, cells, bc_type, zone_id = [0,]*2, [0,]*2, [0,]*2, [0,]*2, [0,]*2\n for i, ff in enumerate([face0, face1]):\n nd[i], nds[i], cells[i], bc_type[i], zone_id[i] = ff\n \n cell_face_pair = []\n for i in range(2):\n c = max(cells[i])\n if len(nds[i]) == 2:\n cell_face_pair.append((c, face_map[c][(nds[i][0], nds[i][1])]))\n else:\n cell_face_pair.append((c, face_map[c][eval('f'+str(i))]))\n \n periodic_cell_face_map[cell_face_pair[0]] = cell_face_pair[1]\n periodic_cell_face_map[cell_face_pair[1]] = cell_face_pair[0]",
"def get_all_combinations(self, hash_set):\n\t\tnames = sorted(hash_set)\n\t\treturn [dict(zip(names, prod)) for prod in it.product(\n\t\t*(hash_set[name] for name in names))]",
"def self_affine_equivalent_mappings(s):\n result = []\n for cstt_in in range(0, len(s)):\n for cstt_out in range(0, len(s)):\n mappings = linear_equivalence(\n s,\n [oplus(cstt_out, s[oplus(cstt_in, x)]) for x in range(0, len(s))],\n all_mappings=True\n )\n for AB in mappings:\n A = [oplus(apply_bin_mat(x, AB[0]), cstt_in) for x in range(0, len(s))]\n B = [apply_bin_mat(oplus(x, cstt_out), AB[1]) for x in range(0, len(s))]\n result.append([A, B])\n return result",
"def create_city_map(n: int) -> set:\n return set((row, col) for row in range(0, n) for col in range(0, n))",
"def canonical_variables(self):\n if not hasattr(self, 'bound_symbols'):\n return {}\n dums = numbered_symbols('_')\n reps = {}\n # watch out for free symbol that are not in bound symbols;\n # those that are in bound symbols are about to get changed\n bound = self.bound_symbols\n names = {i.name for i in self.free_symbols - set(bound)}\n for b in bound:\n d = next(dums)\n if b.is_Symbol:\n while d.name in names:\n d = next(dums)\n reps[b] = d\n return reps",
"def to_set(self):\n\n return frozenset(\n (i, j, self[i][j]) for i, j in self.cell_index_iter if self[i][j] is not None\n )",
"def cells_list(self):\n xx, yy = np.meshgrid(self.x_spacings, self.y_spacings)\n return np.vstack([yy.ravel(), xx.ravel()]).transpose()",
"def create_label_map(label_lists, trailing_piece_tag=\"X\"):\n\n label_set = set()\n for labels in label_lists:\n label_set.update(labels)\n\n label_map = {label: i for i, label in enumerate(label_set)}\n\n if trailing_piece_tag not in label_set:\n label_map[trailing_piece_tag] = len(label_set)\n return label_map",
"def _generate_character_map(self):\n self._ct = [-1] * 256\n index = 0\n for c_range in self._meta.character_ranges:\n for c_pos in range(c_range['min'], c_range['max'] + 1):\n self._ct[c_pos] = index\n index += 1",
"def _init_symbol_tracker(self):\n # Initialize with an empty set\n atoms_indx = {symb: set([]) for symb in self.symbols}\n\n # Populate the sets\n for atom in self.atoms:\n symb = atom.symbol\n atoms_indx[symb].add(atom.index)\n return atoms_indx",
"def init_dict() -> None:\n for elem in letters:\n ascii_dict[elem] = []\n for elem in numbers:\n ascii_dict[elem] = []\n for elem in symbols:\n ascii_dict[elem] = []",
"def construct_choice_lists(sheet):\n d = {}\n for row in range(1,sheet.nrows):\n c = {}\n for col in range(0,sheet.ncols):\n c[sheet.cell(0,col).value] = sheet.cell(row,col).value\n list_name = c.pop(\"list name\")\n if list_name in d:\n d[list_name].append(c)\n else:\n d[list_name] = [c]\n return d",
"def tile_set():\n TILES = {\n \"ocean\":\"~\"\n ,\"rock\":\"R\"\n ,\"mountain\":\"M\"\n ,\"player\":\"X\"\n ,\"end\":\"⋆\"\n ,\"npc\":\"I\"\n ,\"cave\":\"C\"\n ,\"dirt\":\"+\"\n ,\"sign\":\"!\"\n }\n\n return TILES",
"def unpruned_atom_pairs(\n molecules: List[masm.Molecule], idx_map: List[Tuple[int, int]], distance_bounds: Tuple[int, int]\n) -> Set[Tuple[int, int]]:\n\n def structure_idx(c: int, i: int) -> int:\n return idx_map.index((c, i))\n\n pairs: Set[Tuple[int, int]] = set()\n\n for component, molecule in enumerate(molecules):\n for i in molecule.graph.atoms():\n distances = np.array(masm.distance(i, molecule.graph))\n partners = np.nonzero((distances <= max(distance_bounds)) & (distances >= min(distance_bounds)))[0]\n\n # Back-transform to structure indices and add to set\n s_i = structure_idx(component, i)\n s_partners = [structure_idx(component, j) for j in partners]\n pairs |= set(make_sorted_pair(s_i, s_j) for s_j in s_partners)\n\n return pairs"
] | [
"0.8228644",
"0.6247691",
"0.5996187",
"0.59912086",
"0.57363427",
"0.56508225",
"0.5617025",
"0.5577898",
"0.55488765",
"0.5521399",
"0.55198526",
"0.55198526",
"0.5480722",
"0.54555774",
"0.542111",
"0.5322558",
"0.5308515",
"0.5259221",
"0.5250977",
"0.52377856",
"0.5204893",
"0.5201337",
"0.5111101",
"0.50935185",
"0.5092615",
"0.5082441",
"0.50784504",
"0.5064528",
"0.5053132",
"0.5027665"
] | 0.8634398 | 0 |
Create a mapping from symbols to a list of cells, in which this symbol is a possible assignment. | def create_symbol_to_possible_cell_mapping(self):
symbols_to_cells = defaultdict(set)
for cell in self.iterate_empty_cells():
for symbol in cell.get_possible_symbols():
symbols_to_cells[symbol].add(cell)
return symbols_to_cells | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def create_possible_symbols_to_cells_mapping(self):\r\n possibles_to_cells = defaultdict(set)\r\n for cell in self.iterate_empty_cells():\r\n possibles_to_cells[frozenset(cell.get_possible_symbols())].add(cell)\r\n return possibles_to_cells",
"def _create_symbol_mapping():\n normal_items = [\"+\", \"-\"]\n unicode_items = [chr(0x2070 + i) for i in range(10, 12)]\n\n # Create a dict mapping the two.\n return DefaultDictionary(zip(normal_items, unicode_items))",
"def create_cell_map(dim):\n for cell, faces in cell_face_map.iteritems():\n \n for face in faces:\n nds = face_list[face - 1][1]\n \n if not cell in cell_map:\n cell_map[cell] = copy(nds)\n \n else:\n cell_map[cell] = list(Set(cell_map[cell] + nds))",
"def __set_symbol_dict(self):\r\n return {0: list(alph) if self.is_case_snstv else list(alph)[:26],\r\n 1: list(dgt),\r\n 2: list(spcl) if self.is_spcl else []}",
"def map_factory(cls, list_to_alter):\n temp_list = []\n for letter in list_to_alter:\n # Only cell types which are allowed can be transformed\n if letter not in cls.dict_cells.keys():\n raise ValueError(f\"{letter} is not an allowed cell type\")\n temp_list.append(cls.dict_cells[letter]())\n\n return temp_list",
"def potential_value_map(self) -> DefaultDict[Hashable, Set[Cell]]:\n\n\t\tvalue_to_cell_map = defaultdict(set)\n\n\t\tfor cell in self:\n\t\t\tif not cell.value():\n\t\t\t\tfor value in cell.potential_values():\n\t\t\t\t\tvalue_to_cell_map[value].add(cell)\n\n\t\treturn value_to_cell_map",
"def _create_subscript_mapping():\n # Create the normal and subscript digits list.\n normal_digits = [i for i in range(10)]\n subscript_digits = [chr(0x2080 + i) for i in range(10)]\n\n # Convert the normal digits to strings.\n normal_digits = [str(i) for i in normal_digits]\n\n # Create a dict mapping the two.\n return DefaultDictionary(zip(normal_digits, subscript_digits))",
"def init_dict() -> None:\n for elem in letters:\n ascii_dict[elem] = []\n for elem in numbers:\n ascii_dict[elem] = []\n for elem in symbols:\n ascii_dict[elem] = []",
"def get_map(self):\n\n self.mp = defaultdict(lambda : ord('x'))\n y, x = 0, 0\n while True:\n cond, output = self.ic()\n\n if cond: break\n # New row of the print out\n if output == 10:\n y += 1\n x = 0\n # Assign the value to the map\n else:\n self.mp[y,x] = output\n x += 1\n \n return self.mp",
"def create_taxon_to_state_set_map(self, char_indices=None):\n taxon_to_state_indices = {}\n for t in self.taxon_seq_map.keys():\n cdv = self[t]\n if char_indices is None:\n ci = range(len(cdv))\n else:\n ci = char_indices\n v = []\n for char_index in ci:\n cell = cdv[char_index]\n cell_value = cell.value\n try:\n state_alphabet = cell.character_type.state_alphabet\n except AttributeError:\n state_alphabet = self.default_state_alphabet\n inds = [state_alphabet.index(i) for i in cell_value.fundamental_states]\n v.append(set(inds))\n taxon_to_state_indices[t] = v\n return taxon_to_state_indices",
"def symbolsAssign(clusters):\n \n alphabet = ['A','a','B','b','C','c','D','d','E','e',\n 'F','f','G','g','H','h','I','i','J','j',\n 'K','k','L','l','M','m','N','n','O','o',\n 'P','p','Q','q','R','r','S','s','T','t',\n 'U','u','V','v','W','w','X','x','Y','y','Z','z']\n \n clusters = pd.Series(clusters)\n N = len(clusters.unique())\n\n cluster_sort = [0] * N \n counter = collections.Counter(clusters)\n for ind, el in enumerate(counter.most_common()):\n cluster_sort[ind] = el[0]\n\n if N >= len(alphabet):\n alphabet = [chr(i+33) for i in range(0, N)]\n else:\n alphabet = alphabet[:N]\n hashm = dict(zip(cluster_sort + alphabet, alphabet + cluster_sort))\n strings = [hashm[i] for i in clusters]\n return strings, hashm",
"def create_periodic_cell_face_map():\n for f0, f1 in periodic_face_map.iteritems():\n # f0, f1 = periodic face0 - face1\n face0 = face_list[f0 - 1]\n face1 = face_list[f1 - 1] # shadow\n nd, nds, cells, bc_type, zone_id = [0,]*2, [0,]*2, [0,]*2, [0,]*2, [0,]*2\n for i, ff in enumerate([face0, face1]):\n nd[i], nds[i], cells[i], bc_type[i], zone_id[i] = ff\n \n cell_face_pair = []\n for i in range(2):\n c = max(cells[i])\n if len(nds[i]) == 2:\n cell_face_pair.append((c, face_map[c][(nds[i][0], nds[i][1])]))\n else:\n cell_face_pair.append((c, face_map[c][eval('f'+str(i))]))\n \n periodic_cell_face_map[cell_face_pair[0]] = cell_face_pair[1]\n periodic_cell_face_map[cell_face_pair[1]] = cell_face_pair[0]",
"def _generate_character_map(self):\n self._ct = [-1] * 256\n index = 0\n for c_range in self._meta.character_ranges:\n for c_pos in range(c_range['min'], c_range['max'] + 1):\n self._ct[c_pos] = index\n index += 1",
"def cell_map_from_database(self) -> None:\n for row in self.session.query(DatamapItem).all():\n self.cell_map.append(\n Cell(\n datamap_id=row.id,\n cell_key=row.key,\n cell_value=None,\n template_sheet=row.bicc_sheet,\n bg_colour=None,\n fg_colour=None,\n number_format=None,\n verification_list=None,\n cell_reference=row.bicc_cellref))",
"def create_board(self):\n board = dict()\n cell_names = self.create_cell_names()\n\n for cell_name in cell_names:\n if cell_name in self.given_cells:\n is_given = True\n value = self.given_cells[cell_name]\n else:\n is_given = False\n value = 0\n new_cell = c.Cell(cell_name, is_given, value, self.size)\n board[cell_name] = new_cell\n return board",
"def symbol_state_map(self):\n map = {}\n for state in self:\n map[state.symbol] = state\n map.update(self.symbol_synonyms)\n if not self.case_sensitive:\n for state in self:\n if state.symbol.islower():\n map[state.symbol.upper()] = state\n else:\n map[state.symbol.lower()] = state\n for symbol, state in self.symbol_synonyms.items():\n if symbol.islower():\n map[symbol.upper()] = state\n else:\n map[symbol.lower()] = state\n return map",
"def canonical_variables(self):\n if not hasattr(self, 'bound_symbols'):\n return {}\n dums = numbered_symbols('_')\n reps = {}\n # watch out for free symbol that are not in bound symbols;\n # those that are in bound symbols are about to get changed\n bound = self.bound_symbols\n names = {i.name for i in self.free_symbols - set(bound)}\n for b in bound:\n d = next(dums)\n if b.is_Symbol:\n while d.name in names:\n d = next(dums)\n reps[b] = d\n return reps",
"def letter_grid(self, assignment):\n letters = [\n [None for _ in range(self.crossword.width)]\n for _ in range(self.crossword.height)\n ]\n for variable, word in assignment.items():\n direction = variable.direction\n for k in range(len(word)):\n i = variable.i + (k if direction == Variable.DOWN else 0)\n j = variable.j + (k if direction == Variable.ACROSS else 0)\n letters[i][j] = word[k]\n return letters",
"def letter_grid(self, assignment):\n letters = [\n [None for _ in range(self.crossword.width)]\n for _ in range(self.crossword.height)\n ]\n for variable, word in assignment.items():\n direction = variable.direction\n for k in range(len(word)):\n i = variable.i + (k if direction == Variable.DOWN else 0)\n j = variable.j + (k if direction == Variable.ACROSS else 0)\n letters[i][j] = word[k]\n return letters",
"def letter_grid(self, assignment):\n letters = [\n [None for _ in range(self.crossword.width)]\n for _ in range(self.crossword.height)\n ]\n for variable, word in assignment.items():\n direction = variable.direction\n for k in range(len(word)):\n i = variable.i + (k if direction == Variable.DOWN else 0)\n j = variable.j + (k if direction == Variable.ACROSS else 0)\n letters[i][j] = word[k]\n return letters",
"def get_states_as_cells(self, oids=None, symbols=None, tokens=None):\n return [CharacterDataCell(value=s) for \\\n s in self.get_states(oids=oids, symbols=symbols, tokens=tokens)]",
"def grid_values(self, grid):\n chars = [col for col in grid if col in self.digits or col in '0.']\n assert len(chars) == 81\n return dict(zip(self.squares, chars))",
"def register_cell(pos, s):\n area = to_area(s.x, s.y, s.width, s.height)\n for p in area:\n lst = []\n if p in Globals.instance.cells:\n lst = Globals.instance.cells[p]\n else:\n Globals.instance.cells[p] = lst\n lst.append(s)",
"def initialize_assignment(self):\n # Initialize empty frozensets for each agent\n init_assignment = frozendict({a:frozenset() for a in self.agents})\n \n # Add hard assignments\n if self.hard_assignment:\n init_dict = dict(init_assignment)\n for a, t in self.hard_assignment.items():\n init_dict[a] = init_dict[a] | t\n init_assignment = frozendict(init_dict)\n \n return init_assignment",
"def boardtohashmap(board_2d: List[List[str]]) -> Dict[Tuple[int, int], Gridspace]:\n\n nrows, ncols = len(board_2d), len(board_2d[0])\n return {\n (r, c): Gridspace(r, c, board_2d[r][c], nrows, len(board_2d[r]))\n for r in range(nrows) for c in range(len(board_2d[r]))\n }",
"def initialize_symtab(self, allow_rebind_args):\n symbols = symtab.Symtab(self.symtab)\n for var_name in self.local_names:\n variable = symtab.Variable(None, name=var_name, is_local=True)\n\n # Set cellvar status. Free variables are not assignments, and\n # are caught in the type inferencer\n variable.is_cellvar = var_name in self.cellvars\n # variable.is_freevar = var_name in self.freevars\n\n variable.renameable = (\n var_name not in self.locals and not\n (variable.is_cellvar or variable.is_freevar) and\n (var_name not in self.argnames or allow_rebind_args))\n\n symbols[var_name] = variable\n\n return symbols",
"def get_map(self, chars):\n\n byte_offset = 0\n cb_map = {}\n\n for char_offset, char in enumerate(chars):\n cb_map[char_offset] = byte_offset\n byte_offset += len(char.encode('utf-8'))\n return cb_map",
"def create_map():\n pass\n # for line in range(0, shared.lines):\n # map_data[line][0] = (1, -1)\n # map_data[line][shared.columns - 1] = (1, -1)\n #\n # for column in range(0, shared.columns):\n # map_data[0, column] = (-1, 1)\n # # if column <= shared.left_space or column > shared.columns - shared.left_space:\n # map_data[shared.lines - 1, column] = (-1, 1)",
"def map_table(asm):\n # Dictionary of mappings between pre-defined names\n table = {\n \"SP\" : 0,\n \"LCL\" : 1,\n \"ARG\" : 2,\n \"THIS\" : 3,\n \"THAT\" : 4,\n \"SCREEN\": 16384,\n \"KBD\" : 24576,\n }\n # R0-R15\n for i in range(0, 16):\n table[\"R\" + str(i)] = i\n\n # Add user-defined names i.e. variables and gotos\n variables_list = [] # list of all @-values\n reg = 16 # start after R15\n count = -1 # keep track of instruction memory position\n\n for line in asm:\n parsed, flag = parser(line)\n\n if flag == \"GOTO_INSTRUCTION\":\n table[parsed] = count + 1 # add next position after goto\n elif flag == \"A_DECIMAL\":\n count += 1\n elif flag == \"A_INSTRUCTION\":\n if parsed not in variables_list:\n variables_list.append(parsed) # append to list if it doesn't exist\n count += 1\n elif flag == \"C_INSTRUCTION\":\n count += 1\n\n for i in variables_list:\n try:\n table[i]\n except KeyError:\n table[i] = reg # if key doesn't exist add it\n reg += 1\n\n return table",
"def grid_vals(grid):\n\tletters = list(grid)\n\t#print \"---------------------------------\\n-------------------\"\n\t#print letters\n\t#print \"----------------------------------\\n-------------------\"\n\tassert len(letters) == 81\n\ttempdict = zip(squares, letters)\n\treturn dict(tempdict)"
] | [
"0.79313326",
"0.62948644",
"0.6092999",
"0.59227717",
"0.56628823",
"0.56573343",
"0.5636128",
"0.5633549",
"0.5541741",
"0.5522341",
"0.5443451",
"0.537345",
"0.53710127",
"0.5341381",
"0.5305604",
"0.5301605",
"0.526607",
"0.5264708",
"0.5264708",
"0.5264708",
"0.5242213",
"0.5235913",
"0.5225676",
"0.51644385",
"0.5143836",
"0.5136465",
"0.5132531",
"0.5132122",
"0.5127119",
"0.5124914"
] | 0.8147504 | 0 |
Remove this group from other groups, when this group is a subgroup of another group. | def remove_as_subgroup(self, other_groups):
symbols_to_exclude = reduce(lambda alphabet, cell: alphabet.union(cell.get_possible_symbols()),
self.cells, set())
my_cells = set(self.cells)
for group in other_groups:
if my_cells.issubset(group.cells) and self is not group:
# Remove my cells from the other group
for cell in self.cells:
cell.remove_group(group)
group.cells.remove(cell)
# Update the alphabets in the other group
for cell in group.cells:
cell.remove_possible_symbols(symbols_to_exclude) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove_from_group(self, group):\n\n if self.in_group(group):\n self.secondary_groups.remove(group)\n return self",
"def test_remove_self_as_parent(self):\n groupa, groupb = Group('groupa'), Group('groupb')\n groupa.add_parent(groupb)\n groupb.del_parent(groupb)",
"def removeGroup(self, *args):\n return _libsbml.GroupsModelPlugin_removeGroup(self, *args)",
"def remove_from_group(self, org, contact, group):\n pass",
"def test_070_delete_group_from_group(self):\n\n testflow.step(\n \"Removing group %s from group %s\",\n TEST_GROUP1, TEST_GROUP2\n )\n assert MANAGE_CLI.run(\n 'groupdel',\n TEST_GROUP1,\n group=TEST_GROUP2,\n )[0], \"Failed to delete group from group '%s'\" % TEST_GROUP1",
"def remove_group(self, resolvable):\n group = self._resolve_group(resolvable)\n\n for membership in self.group_memberships:\n if membership.group.href == group.href:\n membership.delete()\n return\n\n raise StormpathError({\n 'developerMessage': 'This user is not part of Group %s.' % group.name,\n })",
"def del_from_groups(self, username, groups):\n pass",
"def test_04_self_unshare_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.unshare_group_with_user(meowers, dog)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertFalse(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [], dog.uaccess.get_group_unshare_users(meowers)))",
"def _merge_groups(self, group, newgroup):\n\n # name\n if group.name != newgroup.name:\n raise errors.AnsibleError(\"Cannot merge group %s with %s\" % (group.name, newgroup.name))\n\n # depth\n group.depth = max([group.depth, newgroup.depth])\n\n # hosts list (host objects are by now already added to self.hosts)\n for host in newgroup.hosts:\n grouphosts = dict([(h.name, h) for h in group.hosts])\n if host.name in grouphosts:\n # same host name but different object, merge\n self._merge_hosts(grouphosts[host.name], host)\n else:\n # new membership, add host to group from self\n # group from self will also be added again to host.groups, but\n # as different object\n group.add_host(self.hosts[host.name])\n # now remove this the old object for group in host.groups\n for hostgroup in [g for g in host.groups]:\n if hostgroup.name == group.name and hostgroup != self.groups[group.name]:\n self.hosts[host.name].groups.remove(hostgroup)\n\n\n # group child membership relation\n for newchild in newgroup.child_groups:\n # dict with existing child groups:\n childgroups = dict([(g.name, g) for g in group.child_groups])\n # check if child of new group is already known as a child\n if newchild.name not in childgroups:\n self.groups[group.name].add_child_group(newchild)\n\n # group parent membership relation\n for newparent in newgroup.parent_groups:\n # dict with existing parent groups:\n parentgroups = dict([(g.name, g) for g in group.parent_groups])\n # check if parent of new group is already known as a parent\n if newparent.name not in parentgroups:\n if newparent.name not in self.groups:\n # group does not exist yet in self, import him\n self.groups[newparent.name] = newparent\n # group now exists but not yet as a parent here\n self.groups[newparent.name].add_child_group(group)\n\n # variables\n group.vars = utils.combine_vars(group.vars, newgroup.vars)",
"def link_groups(self, groups):\n # Find the largest group\n max_group = groups[0]\n for group in groups:\n if group.size > max_group.size:\n max_group = group\n # Remove it from the list\n groups.remove(max_group)\n # Iterate over the smaller groups\n for group in groups:\n # Merge the sets containing the stones in that group\n max_group.add_members(group.members)\n for stone_index in group.members:\n self.get(stone_index).group = max_group\n # And remove the smaller group from the global list\n self.groups.remove(group)",
"def remove_groups(self, resolvables):\n memberships = [membership for membership in self.group_memberships]\n for g in [self._resolve_group(group) for group in resolvables]:\n done = False\n for membership in memberships:\n if membership.group.href == g.href:\n membership.delete()\n done = True\n\n if not done:\n raise StormpathError({\n 'developerMessage': 'This user is not part of Group %s.' % g.name,\n })",
"def test_delete_group_reparent_groups(self, inventoryloader):\n inventoryloader.del_group('glance_all', reparent_groups=True)\n assert inventoryloader.groups['glance_api'].has_group('all')\n assert inventoryloader.groups['all'].has_group('glance_api')",
"def test_remove_parent(self):\n groupa, groupb = self.test_add_parent()\n groupa.del_parent(groupb)\n assert groupb not in groupa.parents\n assert groupa not in groupb.children",
"def remove(self, *args):\n return _libsbml.ListOfGroups_remove(self, *args)",
"def test_ungrouping(self):\n n1 = self.create(NodeItem, UML.Node)\n n2 = self.create(NodeItem, UML.Node)\n\n self.group(n1, n2)\n self.ungroup(n1, n2)\n\n assert n2.subject not in n1.subject.nestedNode\n assert n1.subject not in n2.subject.nestedNode",
"async def async_remove_from_group(self, device):\n if device.entity_id in self._multiroom_group:\n self._multiroom_group.remove(device.entity_id)\n# await self.async_schedule_update_ha_state(True)\n\n if len(self._multiroom_group) <= 1:\n self._multiroom_group = []\n self._is_master = False\n self._slave_list = None\n\n for member in self._multiroom_group:\n for player in self.hass.data[DOMAIN].entities:\n if player.entity_id == member and player.entity_id != self.entity_id:\n await player.async_set_multiroom_group(self._multiroom_group)",
"def clear_Groups(self):\n\n\t\tself.__groups[:] = []",
"async def async_will_remove_from_hass(self) -> None:\n self._group.set_callback(None)\n self.hass.data[DOMAIN][self._entry_id].groups.remove(self)",
"def remove_group():\n _id = request.form['_id']\n data, code, message = FIELD_SERVICE.remove_group(_id)\n return __result(data, code, message)",
"def __check_removed_groups(self) -> None:\n for group in Group.objects.all():\n if group.name not in main_app_groups:\n self.__delete_group(group)\n\n self.stdout.write(f'Removed {group} group')",
"def remove_group(self, index):\n group = self.get(index).group\n for stone_index in group.members:\n self.board[stone_index] = None",
"def group_remove(group, board):\n for xy in group:\n board[xy[0]][xy[1]] = None\n return deepcopy(board)",
"def test_05_self_can_downgrade_group(self):\n meowers = self.meowers\n cat = self.cat\n dog = self.dog\n cat.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.CHANGE)\n self.assertTrue(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))\n dog.uaccess.share_group_with_user(meowers, dog, PrivilegeCodes.VIEW)\n self.assertFalse(dog in meowers.gaccess.edit_users)\n self.assertTrue(dog in meowers.gaccess.members)\n self.assertTrue(\n is_equal_to_as_set(\n [dog],\n dog.uaccess.get_group_unshare_users(meowers)))",
"def drop_groups(self, group_ids=None):\n return self.groups.delete(group_ids)",
"def delete_challenge_groups_hook(*_, instance: Challenge, using, **__):\n try:\n instance.admins_group.delete(using=using)\n except ObjectDoesNotExist:\n pass\n\n try:\n instance.participants_group.delete(using=using)\n except ObjectDoesNotExist:\n pass",
"async def leave(self):\n\t\tif self.group == None:\n\t\t\traise exceptions.ClientError('NO_GROUP')\n\n\t\tawait self.group.remove(self)\n\n\t\tself.group = None",
"def unlink_Group(self, group):\n\t\tself.__groups.remove(group.weakref)\n\t\tself._cli_invalidate()",
"def delete_group(self, group):\n raise NotImplementedError('delete_group')",
"def group_collide(sprite_group, other_object):\n sprites = set(sprite_group)\n for sprite in sprites:\n if sprite.collide(other_object):\n sprite_group.remove(sprite)\n sprite.delete()\n return True\n return False",
"def cleanup_user_groups(event):\n name = event.object.name\n\n if name.startswith(\"group:\"):\n principals = get_principals()\n users_groups = [p for p in principals if name in principals[p].groups]\n for user_or_group in users_groups:\n principals[user_or_group].groups.remove(name)\n\n DBSession.query(LocalGroup).filter(\n LocalGroup.principal_name == name).delete()"
] | [
"0.7457541",
"0.6660896",
"0.66377795",
"0.66275835",
"0.64256984",
"0.63906753",
"0.6237391",
"0.62194926",
"0.6219388",
"0.62129915",
"0.6203625",
"0.61968744",
"0.6175795",
"0.616916",
"0.6147823",
"0.59947467",
"0.59588474",
"0.5858979",
"0.58396405",
"0.5839059",
"0.58359075",
"0.58189666",
"0.58139735",
"0.578222",
"0.57493716",
"0.57372874",
"0.57156444",
"0.5709839",
"0.5707246",
"0.56842554"
] | 0.7494452 | 0 |
Remove cells that have an assigned symbol from this group. | def remove_assigned_cells(self):
cells = list(self.cells)
for cell in ifilter(lambda cell: cell.symbol is not None, cells):
cell.remove_group(self)
self.cells.remove(cell)
return len(cells) != len(self.cells) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def take_symbol(self, symbol):\r\n if symbol not in self._taken_symbols:\r\n for cell in self.cells:\r\n cell.remove_possible_symbol(symbol)\r\n self._taken_symbols.add(symbol)",
"def unsetSymbol(self):\n return _libsbml.InitialAssignment_unsetSymbol(self)",
"def remove_dummy(self) -> None:\n\n for i, atom in enumerate(self):\n if isinstance(atom, DummyAtom):\n del self[i]\n return",
"def remove_checker(self, col):\n for r in range(self.height):\n if self.slots[r][col] != ' ':\n self.slots[r][col] = ' '\n break",
"def del_cells(self):\t\r\n del self._cells",
"def _remove(self, cell_coord, o):\n cell = self.d[cell_coord]\n cell.remove(o)\n\n # Delete the cell from the hash if it is empty.\n if not cell:\n del(self.d[cell_coord])",
"def del_symbol(self):\n if not self.tbl_symbols.selectedRanges():\n return\n\n confirm_message = \"Delete the selected symbol?\"\n reply = QtWidgets.QMessageBox().question(self, 'Message',\n confirm_message,\n QtWidgets.QMessageBox.Yes,\n QtWidgets.QMessageBox.No)\n\n if reply == QtWidgets.QMessageBox.Yes:\n row = self.tbl_symbols.currentRow()\n state_item = self.tbl_symbols.item(row, 0)\n state = state_item.text()\n self.symbols.pop(state, None)\n self.tbl_symbols.removeRow(row)\n self.clear_form()",
"def cell_removed(self):\n self.stop_discharge()\n self.set_empty()\n log.info(\"Cell removed from slot {}.\".format(self.channel))",
"def remove_as_subgroup(self, other_groups):\r\n symbols_to_exclude = reduce(lambda alphabet, cell: alphabet.union(cell.get_possible_symbols()),\r\n self.cells, set())\r\n my_cells = set(self.cells)\r\n\r\n for group in other_groups:\r\n if my_cells.issubset(group.cells) and self is not group:\r\n # Remove my cells from the other group\r\n for cell in self.cells:\r\n cell.remove_group(group)\r\n group.cells.remove(cell)\r\n\r\n # Update the alphabets in the other group\r\n for cell in group.cells:\r\n cell.remove_possible_symbols(symbols_to_exclude)",
"def remove_cells(self, tag):\n tagged_cells = self.get_cells(tag)\n if tagged_cells:\n print(f\"- removing cells tagged {tag} from {self.filename}\")\n self.content.cells = filter(lambda cell: cell not in tagged_cells, self.content.cells)",
"def removeSpeciesGlyph(self, *args):\n return _libsbml.Layout_removeSpeciesGlyph(self, *args)",
"def remove(self, cell, remove_references=True):\n if isinstance(cell, Cell):\n name = cell.name\n else:\n name = cell\n if name in self.cells:\n del self.cells[name]\n removed = 0\n if remove_references:\n for c in self.cells.values():\n removed += len(c.references)\n c.references = [\n ref\n for ref in c.references\n if name\n != (\n ref.ref_cell.name\n if isinstance(ref.ref_cell, Cell)\n else ref.ref_cell\n )\n ]\n removed -= len(c.references)\n return removed",
"def drop_unattached(self):\n for x in range(self.size):\n for y in range(self.size):\n coords = (x, y)\n if self.is_cell_unattached(coords):\n self.drop([coords])",
"def updateEmptiesSet(self):\n self.emptiesSet = []\n for i in self.Range:\n if self.get_cell(i) == 0:\n self.emptiesSet.append(i)",
"def cleanup(self):\n for residue in self.debumper.biomolecule.residues:\n if not isinstance(residue, aa.Amino):\n continue\n if residue.name == \"GLH\" or \"GLH\" in residue.patches:\n if residue.has_atom(\"HE1\") and residue.has_atom(\"HE2\"):\n residue.remove_atom(\"HE1\")\n elif residue.name == \"ASH\" or \"ASH\" in residue.patches:\n if residue.has_atom(\"HD1\") and residue.has_atom(\"HD2\"):\n residue.remove_atom(\"HD1\")",
"def clear(self):\n\n for cell in self.cells:\n cell.clear()",
"def static_drop(self):\n if self.any_in_buffer(self.active_piece):\n return\n for cell in TransformPiece.sort_cells(self.grid.keys(), self.current_direction):\n self.drop([cell])",
"def remove(self, board):\n for c in board.copy():\n while self in c:\n index = tuple(c.inputs.values()).index(self)\n key = tuple(c.inputs.keys())[index]\n c.inputs[key] = None\n # fixes possible memory leak\n self.inputs = {k: None for k, v in self.inputs.items()}",
"def unoccupied(self):\n self.is_occupied = 0\n for hex in self.fon:\n hex.remove_neighbor()\n hex.set_quality()",
"def complement(self):\n for cell in self.compact:\n cell.set(not cell.peg)",
"def removeSpeciesReferenceGlyph(self, *args):\n return _libsbml.Layout_removeSpeciesReferenceGlyph(self, *args)",
"def remove(self, *nonterminals):\n # type: (Iterable[Type[Nonterminal]]) -> None\n for nonterm in set(nonterminals):\n _NonterminalSet._control_nonterminal(nonterm)\n if nonterm not in self:\n raise KeyError('Nonterminal ' + nonterm.__name__ + ' is not inside')\n self._grammar.rules.remove(*self._assign_map[nonterm], _validate=False)\n del self._assign_map[nonterm]\n if self._grammar.start is nonterm:\n del self._grammar.start\n super().remove(nonterm)",
"def remove_piece(self) -> None:\r\n if self.has_piece():\r\n self.piece.square = None\r\n self.piece = None",
"def removeSymbol(self, address: ghidra.program.model.address.Address, name: unicode) -> bool:\n ...",
"def group_remove(group, board):\n for xy in group:\n board[xy[0]][xy[1]] = None\n return deepcopy(board)",
"def mark_safe(self, cell):\n if cell in self.cells:\n self.cells.remove(cell)",
"def unsolved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_unsolved_cells())",
"def unsolved_cells(self) -> Set[Cell]:\n\t\treturn set(self.iter_unsolved_cells())",
"def __del__(self) -> None:\n self.map.solid_id.discard(self.id)",
"def remove_possibles(self):\n for row in range(self.board_size):\n for col in range(self.board_size):\n self.remove_poss(row, col)"
] | [
"0.6538775",
"0.5958141",
"0.5792549",
"0.57550293",
"0.5732101",
"0.56934553",
"0.56710565",
"0.5628901",
"0.5613413",
"0.55985063",
"0.5562257",
"0.5482202",
"0.5455962",
"0.5439964",
"0.54102564",
"0.5367527",
"0.53613144",
"0.53547674",
"0.530147",
"0.5270734",
"0.5245552",
"0.5241578",
"0.52134025",
"0.52066183",
"0.51995367",
"0.51857287",
"0.5172037",
"0.5172037",
"0.5158854",
"0.51532084"
] | 0.80309904 | 0 |
Create Column Headers for Various Modes | def make_header(mode):
return ("{}\n".format('\t'.join(
['#chrom', 'coord', 'total', 'dtotal'] +
DIVNAMES[mode] +
['{}_{}'.format(x, y)
for x in STATNAMES[mode]
for y in ('left', 'right', 'total',
'stat', 'chisq', 'Pvalue')] +
['introgression'] +
['introg{}'.format(x) for x in INTROGPATTERNS[mode]]))
).encode('utf-8') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _generateColumnHeader(self, obj, **args):\n result = []\n header = self._script.utilities.columnHeaderForCell(obj)\n if not header:\n return result\n\n text = self._script.utilities.displayedText(header)\n if not text:\n return result\n\n roleString = self.getLocalizedRoleName(obj, pyatspi.ROLE_COLUMN_HEADER)\n if args.get('mode') == 'speech':\n if settings.speechVerbosityLevel == settings.VERBOSITY_LEVEL_VERBOSE \\\n and not args.get('formatType') in ['basicWhereAmI', 'detailedWhereAmI']:\n text = \"%s %s\" % (text, roleString)\n elif args.get('mode') == 'braille':\n text = \"%s %s\" % (text, roleString)\n\n result.append(text)\n return result",
"def make_colheadings( output_type ):\n col0a = 'Target '.rjust( 11 )\n col0b = ''.rjust( 11 )\n col1a = 'Epoch'.center( 8 )\n col1b = '(MJD)'.center( 8 )\n col2a = 'Time Start'.center( 19 )\n col2b = '(UT)'.center( 19 )\n col3a = 'Time End'.center( 19 )\n col3b = '(UT)'.center( 19 )\n col4a = 'Zenith'.center( 6 )\n col4b = '(deg)'.center( 6 )\n col5a = 'Airm'.center( 4 )\n col5b = ''.center( 4 )\n col6a = 'Transit-type'.center( 21 )\n col6b = ''.center( 21 )\n col7a = 'Moon-type'.center( 12 )\n col7b = ''.center( 12 )\n col8a = 'Moon-dist'.center( 9 )\n col8b = '(deg)'.center( 9 )\n col9a = 'Moon-phase'.center( 10 )\n col9b = '(percent)'.center( 10 )\n if output_type=='byplanet':\n colheadingsa = '#{0} {1} {2} {3} {4} {5} {6} {7} {8}\\n'\\\n .format( col1a, col2a, col3a, col4a, col5a, \\\n col6a, col7a, col8a, col9a )\n colheadingsb = '#{0} {1} {2} {3} {4} {5} {6} {7} {8}\\n'\\\n .format( col1b, col2b, col3b, col4b, col5b, \\\n col6b, col7b, col8b, col9b )\n elif output_type=='chronolog':\n colheadingsa = '#{0} {1} {2} {3} {4} {5} {6} {7} {8} {9}\\n'\\\n .format( col0a, col1a, col2a, col3a, col4a, col5a, \\\n col6a, col7a, col8a, col9a )\n colheadingsb = '#{0} {1} {2} {3} {4} {5} {6} {7} {8} {9}\\n'\\\n .format( col0b, col1b, col2b, col3b, col4b, col5b, \\\n col6b, col7b, col8b, col9b )\n return colheadingsa, colheadingsb",
"def customize_headers(self,executer, tree, cursor, table,custom_headers):\n headers = executer.get_columns(table, cursor)\n tree[\"columns\"] = custom_headers\n\n\n set_width = int(self.column_length_configurator / len(headers))\n\n # Setting columns width and headers\n for column in custom_headers:\n tree.column(column, width=set_width, minwidth=self.min_width)\n tree.heading(column, text=column)",
"def columnTitles(self):\n \n pass",
"def columnTitles(self):\n \n pass",
"def _generateColumnHeaderIfToggleAndNoText(self, obj, **args):\n # If we're reading just a single cell in speech, the new\n # header portion is going to give us this information.\n #\n if args['mode'] == 'speech' and not args.get('readingRow', False):\n return []\n\n result = []\n descendant = self._script.utilities.realActiveDescendant(obj)\n label = self._script.utilities.displayedText(descendant)\n if not label and self._script.utilities.hasMeaningfulToggleAction(obj):\n accHeader = self._script.utilities.columnHeaderForCell(obj)\n result.append(accHeader.name)\n return result",
"def _addStatsHeadersToMatrix(self, m):\n\n atoz = \"JKLMNOPQRSTUVWXYZABCDEFGHI\"\n\n counter = 0\n\n for col in m.TopAxis.DataMembers:\n if counter < 26:\n logicalletter = str(atoz[counter])\n col.MemberSigTestHeading = logicalletter\n counter += 1\n else:\n counter = 0",
"def init_column_headers(self):\r\n col_headers = {1: \"bid\", 2: \"tricks\", 3: \"score\"}\r\n for player in range(1, 5):\r\n ent_player_name = tk.Entry(self.master, font='courier 10 bold', fg='blue',\r\n borderwidth=2, relief=\"groove\")\r\n ent_player_name.grid(row=0, column=(player - 1) * 3 + 1, columnspan=3,\r\n sticky=tk.W + tk.E, pady=5)\r\n ent_player_name.insert(0, \"Player\" + str(player))\r\n self.ent_player_names.append(ent_player_name)\r\n for key in col_headers:\r\n lbl_column_header = tk.Label(self.master, text=col_headers[key],\r\n font='courier 10 bold', fg='blue',\r\n borderwidth=2, relief=\"groove\")\r\n lbl_column_header.grid(row=1, column=(player - 1) * 3 + key,\r\n sticky=tk.W + tk.E, pady=2)",
"def set_headers(self,executer, tree, cursor, table, columns_size):\n\n # Getting headers\n headers = executer.get_columns(table, cursor)\n tree[\"columns\"] = headers\n\n # Setting width to all column headers basing on columns amount.\n set_width = int(self.column_length_configurator/len(headers))\n\n\n # Setting columns width and headers\n for column in headers:\n tree.column(column, width=set_width,minwidth=self.min_width)\n tree.heading(column, text=column)",
"def setup_normalyzer_header(design_matrix: DF, annot_cols: List[str], normalyzer_vals:DF) -> DF:\n\n # Get numbers set up as list of stringified numbers ('-1', '0', '0', '1', '1')\n nbr_annot_cols = len(annot_cols)\n sample_head = [-1] + [0] * (nbr_annot_cols - 1) + list(design_matrix['biorepgroup'])\n sample_head_str = [str(e) for e in sample_head]\n\n # Get text-information about each column\n label_row = list(normalyzer_vals.columns)[:nbr_annot_cols] + list(design_matrix['name'])\n\n headers = pd.DataFrame([sample_head_str, label_row])\n headers.columns = normalyzer_vals.columns\n\n return headers",
"def _generateRowHeader(self, obj, **args):\n result = []\n header = self._script.utilities.rowHeaderForCell(obj)\n if not header:\n return result\n\n text = self._script.utilities.displayedText(header)\n if not text:\n return result\n\n roleString = self.getLocalizedRoleName(obj, pyatspi.ROLE_ROW_HEADER)\n if args.get('mode') == 'speech':\n if settings.speechVerbosityLevel == settings.VERBOSITY_LEVEL_VERBOSE \\\n and not args.get('formatType') in ['basicWhereAmI', 'detailedWhereAmI']:\n text = \"%s %s\" % (text, roleString)\n elif args.get('mode') == 'braille':\n text = \"%s %s\" % (text, roleString)\n\n result.append(text)\n return result",
"def construct_header(self): \n \n # create the individual labels\n hdr_bits = [hb.format(hdr) for hb, hdr in zip(self.row_base, self.headers)]\n \n # stick it all together and return with hdr_sep underneath\n hdr_str = f\"|{'|'.join(hdr_bits)}|\\n\"\n return hdr_str + self.hdr_sep * (len(hdr_str)-1) + \"\\n\"",
"def generate_colnames(df, labelnum=0): # need to be adjusted for GC content\n colnames = []\n for field in range(len(df.columns) - labelnum):\n colnames.append(BEDCOLS[field])\n for label in range(labelnum):\n colnames.append(f\"label_{label+1}\")\n return colnames",
"def writeExcelHeader(worksheet, titleCols):\n cno = 0\n for titleCol in titleCols:\n worksheet.write(0, cno, titleCol)\n cno = cno + 1",
"def first_header():\n return \"\"\"\n<th>Target\n<th>Date\n<th colspan=\"2\">UT\n<th>Exp\n<th>Cycle\n<th>No. of\n<th>Filters\n<th>XxY\n<th>Speed\n<th>NX1xNY1\n<th>X1\n<th>Y1\n<th>NX2xNY2\n<th>X2\n<th>Y2\n<th>Grat.\n<th>Slit\n<th>Slit\n<th>ID\n<th>PI\n<th align=\"left\">Comment\n\"\"\"",
"def writeheader(fh,colnames):\n for i in range(len(colnames)):\n fh.write('# %d %s\\n'%(i+1,colnames[i]))",
"def header(self, format=None):\n return [\" ID \",\n \"East\",\n \"North\",\n \"TARGET ELEV\",\n \" LENGTH\",\n \" AZ\",\n \" DIP\",\n \"PLAN ELEV\"]",
"def generate_headers(self):\n raise NotImplementedError()",
"def _html_table_headers(self, row_axes, col_axes):\n dsh = self.get_dshape()\n nb_blank_cols = len(row_axes) * 2 # nb of blank cols preprended to\n # each line of the column header\n nb_rows = int(np.prod([dsh[a] for a in row_axes]))\n nb_cols = int(np.prod([dsh[a] for a in col_axes]))\n # col header\n if nb_blank_cols > 0:\n blank_cells = ['']\n blank_cells_attrs = [{'colspan': str(nb_blank_cols)}]\n else:\n blank_cells = []\n blank_cells_attrs = []\n col_header = []\n nb_repets = 1\n span = nb_cols\n for a in col_axes:\n dom = [str(v)\n for v in self.get_domain(a)] # TODO: better dv format\n span /= len(dom)\n # row showing the axis label\n col_header.append(html_list_to_row(blank_cells + [a], 'h',\n blank_cells_attrs +\n [{'colspan': nb_cols}]))\n # row showing domain values\n col_header.append(html_list_to_row(blank_cells + dom * nb_repets, 'h',\n blank_cells_attrs +\n [{'colspan': str(span)}] *\n len(dom) * nb_repets))\n nb_repets *= len(dom)\n\n # row header\n # initialization of all rows because row filling wont be sequential:\n row_header = [[] for i in range(nb_rows)]\n nb_repets = 1\n span = nb_rows\n for a in row_axes:\n # 1st row contains all axis labels:\n row_header[0].append(html_cell(html_div(a, {'class': 'rotate'}), 'h',\n {'rowspan': nb_rows}))\n\n # dispatch domain values across corresponding rows:\n dom = [str(v)\n for v in self.get_domain(a)] # TODO: better dv format\n span /= len(dom)\n for idv, dv in enumerate(dom * nb_repets):\n row_header[\n idv * span].append(html_cell(dv, 'h', {'rowspan': span}))\n\n nb_repets *= len(dom)\n\n return [''.join(r) for r in row_header], col_header",
"def addheader(datasets):\n header = get_header()\n for i in range(0, len(datasets)):\n datasets[i].columns = header\n return datasets",
"def header(self):\n\n data = {}\n data['latitude'] = self.latitude()\n data['latitude_unc'] = self.latitude_unc()\n data['longitude'] = self.longitude()\n data['longitude_unc'] = self.longitude_unc()\n data['uid'] = self.uid()\n data['n_levels'] = self.n_levels()\n data['year'] = self.year()\n data['month'] = self.month()\n data['day'] = self.day()\n data['time'] = self.time()\n data['cruise'] = self.cruise()\n data['probe_type'] = self.probe_type()\n \n header = pd.Series(data)\n\n return header",
"def header(out_file=sys.stdout, ac=None):\n if ac is not None:\n print(*Features.FEATURE_COLS, \"AC\", sep=\"\\t\", file=out_file)\n else:\n print(*Features.FEATURE_COLS, sep=\"\\t\", file=out_file)",
"def csv_make_header(self, fileobj, title, comment=\"\"):\n #Line of header info\n \n fileobj.write(csv_line( ['Notes'] + [x.name for x in self.angles] + ['Wait For/n', 'Value'] ) )",
"def write_header(self):\n lines = [\"\"]\n\n for key in self._header_keys:\n value = self.get_attr_from_name(key)\n if isinstance(value, list):\n value = \",\".join([f\"{v:.1f}\" for v in value])\n elif isinstance(value, (float)):\n value = f\"{value:.7f}\"\n elif isinstance(value, (int)):\n value = f\"{value:.0f}\"\n\n key = (\n key.replace(\"_\", \" \")\n .title()\n .replace(\" \", \"\")\n .replace(\"MTEdit.\", \"MTEdit:\")\n )\n\n lines.append(f\"${key}={value.capitalize()}\")\n\n return lines",
"def get_headers(df):\n return df.columns.values",
"def get_bp_headers(self) -> None:\n self.col_headers = []\n for bp in self.body_parts_lst:\n c1, c2, c3 = (f\"{bp}_x\", f\"{bp}_y\", f\"{bp}_p\")\n self.col_headers.extend((c1, c2, c3))",
"def table_header(self):\n title = 'HYPERPARAMETER FINE-TUNING RESULTS'\n title_len = len(title)\n extra_spaces = self.max_length - title_len\n left_spaces = extra_spaces // 2\n right_spaces = extra_spaces - left_spaces - 1\n\n return '| ' + (left_spaces * ' ') + title + (right_spaces * ' ') + ' |\\n'",
"def csv_make_header(self, fileobj, title, comment=\"\"):\n fileobj.write(csv_line( [\"#Title:\", title] ) )\n fileobj.write(csv_line( [\"#Comment:\", comment] ) )\n #Any other useful comment s trings?\n fileobj.write('#\"First column is the sample phi motor rotation, in radians\"\\n' )\n fileobj.write('#\"Next 6 columns are the XY leg positions in mm, relative to the central (neutral) position.\"\\n' )\n fileobj.write('#\"Next are 2 columns for the stopping criterion parameters.\"\\n' )\n #Line of header info\n fileobj.write(csv_line( ['Phi', 'LegA_X', 'LegA_Y', 'LegB_X', 'LegB_Y', 'LegC_X', 'LegC_Y', 'CountFor', 'CountValue', 'Comment'] ) )",
"def make_headers():\n headers = [\"agent_ident\", \"chro\"]\n for i in range(10):\n for j in range(5):\n s = \"d\" + str(i) + \"e\" + str(j)\n headers.append(s + \"_gene ident\")\n headers.append(s + \"_weight\")\n headers.append(s + \"_mut\")\n headers.append(s + \"_dom\")\n for j in range(5):\n s = \"d\" + \"a\" + \"e\" + str(j)\n headers.append(s + \"_gene ident\")\n headers.append(s + \"_weight\")\n headers.append(s + \"_mut\")\n headers.append(s + \"_dom\")\n for j in range(5):\n s = \"d\" + \"b\" + \"e\" + str(j)\n headers.append(s + \"_gene ident\")\n headers.append(s + \"_weight\")\n headers.append(s + \"_mut\")\n headers.append(s + \"_dom\")\n for i in range(6):\n for j in range(5):\n s = \"s\" + str(i) + \"e\" + str(j)\n headers.append(s + \"_gene ident\")\n headers.append(s + \"_weight\")\n headers.append(s + \"_mut\")\n headers.append(s + \"_dom\")\n for i in range(5):\n for j in range(6):\n s = \"e\" + str(i) + \"a\" + str(j)\n headers.append(s + \"_gene ident\")\n headers.append(s + \"_weight\")\n headers.append(s + \"_mut\")\n headers.append(s + \"_dom\")\n return headers",
"def header(self, cols, parent_row):\n out = []\n for col in cols:\n if col == 'gau_id':\n out.append(self.name_for('Geographies', parent_row['geography_id']))\n elif col == 'oth_1_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_1_id']))\n elif col == 'oth_2_id':\n out.append(self.name_for('OtherIndexes', parent_row['other_index_2_id']))\n else:\n out.append(col)\n return out"
] | [
"0.7301636",
"0.68024063",
"0.66927755",
"0.6617009",
"0.6617009",
"0.6566764",
"0.6508237",
"0.649484",
"0.6321167",
"0.63026154",
"0.6261138",
"0.62600505",
"0.62569094",
"0.61962223",
"0.6120518",
"0.60768306",
"0.6066733",
"0.6040872",
"0.6030909",
"0.6017008",
"0.6016963",
"0.5997922",
"0.5993154",
"0.5987806",
"0.5972532",
"0.59579265",
"0.59454644",
"0.59311235",
"0.59226525",
"0.58960193"
] | 0.73253417 | 0 |
Returns a file name containing the log physical use a physical file or not a pointer to a log file str OSError if this file cannot be created | def GetLogFile (physical = False) :
if sys.hal_log_values ["__log_file"] is None :
if physical :
path = GetPath ()
if sys.hal_log_values ["__log_file_name"] is None :
if os.path.exists (path) : sys.hal_log_values ["__log_file_name"] = os.path.join (path, sys.hal_log_values ["__log_const"])
else : raise PQHException ("unable to create a log file in folder " + path)
if not isinstance (sys.hal_log_values ["__log_file_name"], str) :
sys.hal_log_values ["__log_file"] = sys.hal_log_values ["__log_file_name"]
else :
try :
sys.hal_log_values ["__log_file"] = open (sys.hal_log_values ["__log_file_name"], "w", encoding="utf-8")
except Exception as e:
raise OSError ("unable to create file " + sys.hal_log_values ["__log_file_name"] + "\n" + str(e))
else :
sys.hal_log_values ["__log_file"] = LogFakeFileStream()
return sys.hal_log_values ["__log_file"] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tempfile_name():\n ret = os.path.join(tempfile.gettempdir(), 'system_monitor.log')\n if os.access(ret, os.F_OK) and not os.access(ret, os.W_OK):\n print(\"WARNING: Couldn't write to log file {0}: (Permission denied)\".format(ret))\n ret = tempfile.mkstemp(prefix='system_monitor', suffix='.tmp', text=True)\n print(\"Create a new log file: {0}\".format(ret[1]))\n return ret[1]\n\n return ret",
"def _get_log_filename(self):\n fnd = self._get_session_dir()\n fn = os.path.join(fnd, '%s.log' % self.timestamp.time_string())\n\n if not os.path.exists(fn):\n with open(fn, 'wt') as log_file:\n log_file.write('Log Created %s by ' % str(datetime.now()))\n log_file.write('%s V%s\\n' % (__PROGRAM_NAME__, __VERSION__))\n\n return fn",
"def _get_log_filepath(self, imgname):\n\t\treturn os.path.join(self.workdir, imgname + \".log.txt\")",
"def _create_logfile(self):\r\n if not self.console_redirect:\r\n return None\r\n\r\n # PCU_logs.robot need a timestamp for console logs as can be run several times\r\n if self.name == self.log_test.replace('.robot', ''):\r\n return open('{0}\\{1}_console_log_{2}'.format(\r\n self.output_dir_path, self.name, datetime.now().strftime(\"%m%d%H%M\")), \"w+\")\r\n else:\r\n return open('{0}\\{1}_console_log'.format(self.output_dir_path, self.name), \"w+\")",
"def get_system_logfile():\n return \"system\" + get_day() + \".log\"",
"def os_open_comm_log( self, ):\r\n AppGlobal.os_open_txt_file( self.parameters.comm_logging_fn )",
"def open_log(fn):\n\n global log_file\n if fn is not None:\n d = os.path.dirname(fn)\n if d != \"\":\n makedirs(d)\n log_file = open(fn, \"a+\")",
"def _output_log_path(name):\n output = Path(\"../Raw Data/\").joinpath(str(date.today()))\n output.mkdir(parents=True, exist_ok=True)\n return output.joinpath(\"000_logging.hdf5\")",
"def create_file_path(logging_file_name):\n root_folder = os.path.dirname(os.path.normpath(os.getcwd()))\n folder_name = os.path.join(root_folder, logging_file_name + '_' + str(date.today()))\n log_file_name = os.path.join(folder_name, 'log_' + Utils.get_time() + '.json')\n return log_file_name",
"def create_log_file(log_dir, filename):\n timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S.%f')\n symlink_name = os.path.join(log_dir, filename)\n timestamped_name = '%s.%s' % (symlink_name, timestamp)\n if os.path.islink(symlink_name):\n os.remove(symlink_name)\n os.symlink(timestamped_name, symlink_name)\n return open(timestamped_name, 'w')",
"def create_log_file(file_name: str):\n if not log_file_exists(file_name):\n os.mknod(get_complete_file_name(file_name))\n os.chmod(get_complete_file_name(file_name), 0o777)",
"def create_filename (self):\n\t\tassert self.__patient_name and self.__location_name, \"New filename could not be determined, one or more needed arguments is empty!\"\n\t\t_patient_name = self.__patient_name.split(' ')\n\t\t_patient_name.reverse()\n\t\t\n\t\treturn os.path.join(os.path.dirname(self.file._path), \"%s MR %s%s\" % (self.__location_name, ', '.join(_patient_name).upper(), self._file.extension))",
"def _get_filepath(self, name=None, use_timestamp=True):\n current_time = str(int(time.time()))\n if not name and not use_timestamp:\n raise Exception(\"Name or timestamp is required\")\n if name:\n self.fname = \"%s\" % name\n current_time = \"_%s\" % current_time\n if use_timestamp:\n self.fname = \"%s%s\" % (self.fname, current_time)\n if len(self.fname) > 0:\n self.fname = \"%s/%s.jpg\" % (self.picture_directory, self.fname)\n return self.fname",
"def test_file_creation(data, logging_file_name):\n create_instance(data, logging_file_name)\n log_file_name = create_file_path(logging_file_name)\n print(log_file_name)\n if data is None or len(data) == 0:\n assert not os.path.exists(log_file_name)\n else:\n assert os.path.exists(log_file_name)",
"def _createLogFile(LogFile,date,LocalPath,ShowTagsResult):\n try:\n LOG = open(LogFile,\"w\")\n if _verbose:\n print(\"Writing Production Host, Location, Release and Tags information in %s\" % LogFile) \n LOG.write(\"These performance tests were executed on host %s and published on %s\" % (HOST,date))\n LOG.write(\"They were run in %s\" % LocalPath)\n LOG.write(\"Results of showtags -r in the local release:\\n%s\" % ShowTagsResult)\n LOG.close()\n except IOError as detail:\n print(\"WARNING: Can't create log file\") \n print(detail)",
"def _log_name():\n return os.path.splitext(os.path.basename(__file__))[0]",
"def syslog_local_file(handle, admin_state, name, severity=\"emergencies\",\n size=\"40000\"):\n\n from ucsmsdk.mometa.comm.CommSyslogFile import CommSyslogFile\n\n mo = CommSyslogFile(parent_mo_or_dn=\"sys/svc-ext/syslog\", size=size,\n admin_state=admin_state,\n name=name,\n severity=severity)\n handle.add_mo(mo, modify_present=True)\n handle.commit()\n return mo",
"def get_log_file(self):\n self.log_file = os.path.join(\n self.directory,\n \"ts\",\n self.ts.reaction_label,\n \"conformers\",\n \"{}_{}_{}.log\".format(self.ts.reaction_label, self.ts.direction, self.ts.index))\n return self.log_file",
"def _create_file(self, rel_path, text):\n # FIXME: There are better/more secure APIs for creating tmp file paths.\n file_path = self.filesystem.join(self._temp_dir, rel_path)\n self.filesystem.write_text_file(file_path, text)\n return file_path",
"def test_passing_log_fname(self):\n\n log_env_file = \"test.log\"\n log_file = \"test_2.log\"\n whole_env_log_file = os.path.join(LOG_FOLDER, log_env_file)\n whole_log_file = os.path.join(LOG_FOLDER, log_file)\n\n # remove both files if they exist\n for file in (whole_env_log_file, whole_log_file):\n if os.path.exists(file):\n os.remove(file)\n\n os.environ[ENV_WORK_DIR] = TMP_DIR\n os.environ[ENV_LOG_FNAME] = log_env_file\n\n logger = pgo_logger.get_logger(log_file_name=log_file)\n assert logger is not None\n\n logger.info(\"test\")\n\n assert os.path.exists(whole_log_file) is True\n assert os.path.isfile(whole_log_file) is True\n assert os.path.exists(whole_env_log_file) is False",
"def get_base_logfile():\n return \"baseLog\" + get_day() + \".log\"",
"def create_file(self, sensor_id:str, timestamp:str, sensor_name:str)->str:\n file_name = '%s/%s.%s.%s.json' % (self.generate_data_prep, sensor_id, timestamp, sensor_name)\n try: \n open(file_name, 'w').close()\n except Exception as e: \n print(\"Unable to create file (%s) - %s\" % (self.generate_data_prep, e))\n return False \n return file_name",
"def missing_but_potential_file():\r\n tempf = tempfile.NamedTemporaryFile()\r\n fname = tempf.name\r\n tempf.close()\r\n return fname",
"def generate_log_filename():\n return \"LOG_\"+strftime(\"(%Y-%m-%d)_%H-%M-%S\", gmtime())+\".txt\"",
"def get_log_file(self):\n return self.log_file.read_text(errors=\"backslashreplace\")",
"def _get_temp_path(self):\n handle, path = tempfile.mkstemp()\n # windows can't write to a file that is already open by another process\n # (tests use pipe redirection to a log file)\n os.close(handle)\n return path",
"def out_file_core():\n date = str(datetime.datetime.now().strftime(\"%Y%d%m_%H%M%S\"))\n return f\"log-{date}-{str(uuid.uuid4())}\"",
"def GetLogFilePath():\n global _LOG_FILE\n return _LOG_FILE",
"def _getLockFile(self):\n if self.baseFilename.endswith(\".log\"):\n lock_file = self.baseFilename[:-4]\n else:\n lock_file = self.baseFilename\n lock_file += \".lock\"\n return lock_file",
"def _logFile_default(self):\n print \"choosing default log file\"\n return os.path.join(self.rpiADCLogFolder,time.strftime(\"rpiADC-%Y-%m-%d.csv\", self.currentLocalTime))"
] | [
"0.65035456",
"0.6242941",
"0.6138833",
"0.6034926",
"0.5939936",
"0.58189434",
"0.58029103",
"0.57990223",
"0.5787745",
"0.5775017",
"0.57370406",
"0.5699442",
"0.5673442",
"0.56574756",
"0.5648243",
"0.5603709",
"0.5595422",
"0.5588167",
"0.5581843",
"0.5575622",
"0.5570398",
"0.5561372",
"0.55559665",
"0.5537998",
"0.5505151",
"0.54759586",
"0.5463482",
"0.54585904",
"0.5455743",
"0.5449272"
] | 0.7608302 | 0 |
Builds a message on a single line with the date, it deals with encoding issues. l list of fields p dictionary of fields if p contains OutputPrint, call Print (OutputPrint), if p contains LogPath, it calls init (v) if p contains LogFile, it changes the log file name (it creates a new one, the previous is closed). if p contains LogPathAdd, it adds this path to the temporary file if p contains Lock, it Locks option OutputPrint if p contains UnLock, it unlock option OutputPrint | def fLOG (*l, **p) :
path_add = p.get ("LogPathAdd", [] )
lock = p.get("Lock", None)
if lock is not None : sys.hal_log_values["Lock"] = lock
if "LogFile" in p and "LogPath" in p : init (p ["LogPath"], p ["LogFile"])
elif "LogFile" in p : init (filename = p ["LogFile"], path_add = path_add)
elif "LogPath" in p : init (path = p ["LogPath"], path_add = path_add)
def myprint(s): print(s)
if "OutputPrint" in p :
Print (p ["OutputPrint"])
if "LogFile" in p :
logfile = GetLogFile(True)
dt = datetime.datetime (2009,1,1).now ()
if len (l) > 0 :
def _str_process (s) :
if isinstance (s, str) : return s
elif isinstance(s, bytes) : return s.decode("utf8")
else :
try:
return str (s)
except Exception as e :
raise Exception("unable to convert s into string: type(s)=" + str(type(s))) from e
message = str (dt).split (".")[0] + " " + " ".join ( [_str_process(s) for s in l ] ) + sys.hal_log_values ["__log_file_sep"]
if sys.hal_log_values ["__log_display"] :
try :
myprint (message.strip ("\r\n"))
except UnicodeEncodeError :
try :
myprint ("\n".join (repr (message.strip ("\r\n")).split ("\\n")))
except UnicodeEncodeError :
try :
rr = repr (message.strip ("\r\n")).split ("\\n")
for r in rr :
myprint (r.encode("utf8"))
except UnicodeEncodeError :
myprint ("look error in log file")
GetLogFile ().write (message)
st = " "
else :
st = str (dt).split (".")[0] + " "
for k,v in p.items () :
if k == "OutputPrint" and v : continue
message = st + "%s = %s%s" % (str (k), str (v), sys.hal_log_values ["__log_file_sep"])
if "INNER JOIN" in message :
break
GetLogFile ().write (message)
if sys.hal_log_values ["__log_display"] :
try :
myprint (message.strip ("\r\n"))
except UnicodeEncodeError :
myprint ("\n".join (repr (message.strip ("\r\n")).split ("\\n")))
GetLogFile ().flush () | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def build_log_entry(\n hostname: str, user: str, date: dt.datetime, wdir: Path, cmd: str\n) -> str:\n return (\n f'[{date.strftime(\"%Y-%m-%d %H:%M:%S\")}] ({user}@{hostname}) '\n f\"{wdir}\\n\\t{cmd}\\n\"\n )",
"def Create_log():\r\n \"\"\"And Maintain log file to the current date in MMM_DD_YY format\"\"\"\r\n \r\n name = multiprocessing.current_process().name\r\n config = config_create()\r\n Stream = config.get('Log', 'Log1')\r\n Tweet = config.get('Log', 'Log2')\r\n OverallLog = config.get('Log', 'Log3')\r\n \r\n uscore = '_'\r\n txtn = '.txt'\r\n StreamL = uscore +Stream+ txtn\r\n TweetL = uscore +Tweet+ txtn\r\n OverallLogL = OverallLog+txtn\r\n \r\n \r\n \r\n name = multiprocessing.current_process().name\r\n StreamFileName = time.strftime(\"%b_%d_%y\")+StreamL\r\n TweetFileName = time.strftime(\"%b_%d_%y\")+TweetL\r\n config.set('Latest_Log', 'currentstreamlog',StreamFileName)\r\n config.set('Latest_Log', 'currenttweetlog',TweetFileName)\r\n config.set('Latest_Log', 'overalllog',OverallLogL)\r\n \r\n with open('botconfig.ini', 'w') as x:\r\n config.write(x)\r\n if os.path.isfile(StreamFileName) is False:\r\n open(StreamFileName, 'w')\r\n \r\n if os.path.isfile(OverallLogL) is False:\r\n open(OverallLogL, 'w')\r\n \r\n if os.path.isfile(TweetFileName) is False:\r\n twfile = open(TweetFileName, 'w')\r\n ## Edit this or comment to change first line entered upon\r\n ## File creation\r\n twfile.write('0 ComicTweetBot')\r\n #time.sleep(1)\r\n #Create_log()\r",
"def log_builder(self, log_level, hrtimestamp, datestamp, timestamp, log_msg, tags):\n log_body = {}\n log_body[\"filename\"] = self.filename\n log_body[\"log_level\"] = log_level\n log_body[\"hrtimestamp\"] = hrtimestamp\n log_body[\"datestamp\"] = datestamp\n log_body[\"timestamp\"] = timestamp\n log_body[\"log_msg\"] = log_msg\n log_body[\"tags\"] = tags\n return log_body",
"def make_log():\n log_file = os.path.join(phys_dir,'ge_phys2bids_'+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")+'.log')\n with open(log_file,'w') as log:\n log.write('-------- GE phys2bids --------\\n\\n')\n log.write('DICOM directory: %s\\n'%dcm_dir)\n log.write('Physiology directory: %s\\n'%phys_dir)\n log.write('Output directory: %s\\n\\n'%out_dir)\n log.write('%d EPI files were found\\n\\n'%len(dcm_dict))\n for rn in dcm_dict.keys():\n log.write('------------------------------\\n')\n log.write('%s\\n'%dcm_dict[rn]['out_name'])\n log.write('Start time: %s\\n'%dcm_dict[rn]['start_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('End time: %s\\n'%dcm_dict[rn]['end_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('PPG file: %s\\n'%dcm_dict[rn]['ppg_file'])\n log.write('Respiration file: %s\\n'%dcm_dict[rn]['resp_file'])\n log.write('ECG file: %s\\n'%dcm_dict[rn]['ecg_file'])\n log.write('------------------------------\\n\\n')",
"def log(self, msg):\n current_datetime = self.get_date_time()\n self.file.write(\"%s %s\\n\" % (current_datetime, msg))",
"def _createLogFile(LogFile,date,LocalPath,ShowTagsResult):\n try:\n LOG = open(LogFile,\"w\")\n if _verbose:\n print(\"Writing Production Host, Location, Release and Tags information in %s\" % LogFile) \n LOG.write(\"These performance tests were executed on host %s and published on %s\" % (HOST,date))\n LOG.write(\"They were run in %s\" % LocalPath)\n LOG.write(\"Results of showtags -r in the local release:\\n%s\" % ShowTagsResult)\n LOG.close()\n except IOError as detail:\n print(\"WARNING: Can't create log file\") \n print(detail)",
"def build_log(self):\n if not self._build_log_text:\n self._build_log_text = self._cat('/tmp/log')\n return self._build_log_text",
"def _createlog(self):\n\t\tif self.toemail and self.fromemail and self.smtphost:\n\t\t\t# Use the email logger as the first logger, so that when sending the email (in :meth:`EmailLogger.close`) fails, it will still be logged to the log file/stdout/stderr\n\t\t\tself._loggers.append(EmailLogger(self))\n\t\tif self.log2stderr:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stderr, self._formatlogline))\n\t\tif self.log2stdout:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stdout, self._formatlogline))\n\t\tif self.log2file:\n\t\t\t# Create the log file\n\t\t\tlogfilename = ul4c.Template(self.logfilename, \"logfilename\").renders(job=self)\n\t\t\tlogfilename = url.File(logfilename).abs()\n\t\t\tself.logfileurl = str(url.Ssh(misc.sysinfo.user_name, misc.sysinfo.host_fqdn or misc.sysinfo.host_name, logfilename.local()))\n\t\t\tskipurls = [logfilename]\n\t\t\tlogfile = logfilename.open(mode=\"w\", encoding=self.encoding, errors=self.errors)\n\t\t\tif self.loglinkname is not None:\n\t\t\t\t# Create the log link\n\t\t\t\tloglinkname = ul4c.Template(self.loglinkname, \"loglinkname\").renders(job=self)\n\t\t\t\tloglinkname = url.File(loglinkname).abs()\n\t\t\t\tskipurls.append(loglinkname)\n\t\t\t\tlogfilename = logfilename.relative(loglinkname)\n\t\t\t\ttry:\n\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\texcept OSError as exc:\n\t\t\t\t\tif exc.errno == errno.EEXIST:\n\t\t\t\t\t\tloglinkname.remove()\n\t\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\t\telse:\n\t\t\t\t\t\traise\n\t\t\tself._loggers.append(URLResourceLogger(self, logfile, skipurls, self._formatlogline))",
"def log(self):\n f = open(self.log_dir + 'parsed.log', 'a')\n try:\n # Write: local time | CurrentCost \"time\" | id | temp/C | power/W \n f.write(\"%s\\t%s\\t%s\\t%s\\t%s\\n\" \n % (self.ts('now'), self.ts('cc'), self.id, self.temp, self.watts))\n finally:\n f.close()",
"def create_log_message(self, command: str, answer: str, params: str):\n answer_log = answer_codes[answer] if answer in answer_codes.keys() else answer\n caption = self.TxtLog.toPlainText()\n self.TxtLog.setText(caption + \">> \" + command + \" \" + str(self.state.device_id) + ' ' + params + '\\n'\n + '<<' + answer_log + '\\n')",
"def generateLog(outq1, outq2, outq3):\n # generating formatted string for output of question 1\n output_q1 = \"\\n\\t\\tArticles Ranked by Popularity\\n\"\n output_q1 += '-'*60 + \"\\n\"\n output_q1 += '{0:40} | {1:20}'.format('Article', 'Number Of Views') + '\\n'\n output_q1 += '-'*60 + \"\\n\"\n for ele in outq1:\n output_q1 += '{0:40} | {1:15}'.format(ele[0], ele[1]) + \"\\n\"\n output_q1 += '-'*60 + \"\\n\"\n\n # generating formatted string for output of question 2\n output_q2 = \"\\n\\t\\tAuthors Ranked by Popularity\\n\"\n output_q2 += '-'*60 + \"\\n\"\n output_q2 += '{0:40} | {1:20}'.format('Authors', 'Number Of Views') + '\\n'\n output_q2 += '-'*60 + \"\\n\"\n for ele in outq2:\n output_q2 += '{0:40} | {1:15}'.format(ele[0], ele[1]) + \"\\n\"\n output_q2 += '-'*60 + \"\\n\"\n\n # generating formatted string for output of question 3\n output_q3 = \"\\n\\t\\tDays with more than 1% error returns\\n\"\n output_q3 += '-'*60 + \"\\n\"\n output_q3 += '{0:15} | {1:20} | {2:20}'.format(\n 'Date',\n 'Number Of Views',\n 'Number of Errors') + '\\n'\n output_q3 += '-'*60 + \"\\n\"\n for ele in outq3:\n output_q3 += '{0:15} | {1:20} | {2:15}'.format(\n str(ele[0]),\n ele[1], ele[2]) + \"\\n\"\n output_q3 += '-'*60 + \"\\n\"\n\n with open('report.txt', 'w') as f:\n f.write(output_q1)\n f.write(output_q2)\n f.write(output_q3)\n f.close()\n\n print(output_q1)\n print(output_q2)\n print(output_q3)",
"def init (path = None, filename = None, create = True, path_add = None) :\n if path_add is None:\n path_add=[]\n if path is None :\n path = sys.hal_log_values [\"__log_path\"]\n \n if path == \"###\" :\n if sys.platform.startswith(\"win\") :\n path = \"d:\\\\temp\" if os.path.exists (\"d:\\\\temp\") else \"c:\\\\temp\"\n path = os.path.join (path, \"log_pyquickhelper\")\n else :\n path = \"/tmp\"\n path = os.path.join (path, \"log_pyquickhelper\")\n \n if len (path_add) > 0 : \n if not isinstance (path_add, list) : path_add = [ path_add ]\n temp = []\n for p in path_add :\n spl = os.path.splitext (p)\n temp.append (spl [0])\n path = os.path.join (path, *temp)\n \n if filename is None :\n filename = sys.hal_log_values [\"__log_file_name\"]\n \n if (sys.hal_log_values [\"__log_path\"] != path or sys.hal_log_values [\"__log_file_name\"] != filename) \\\n and sys.hal_log_values [\"__log_file\"] != None :\n sys.hal_log_values [\"__log_file\"].close ()\n sys.hal_log_values [\"__log_file\"] = None\n sys.hal_log_values [\"__log_path\"] = path\n sys.hal_log_values [\"__log_file_name\"] = filename\n \n if create :\n if not os.path.exists (sys.hal_log_values [\"__log_path\"]) :\n os.makedirs (sys.hal_log_values [\"__log_path\"])\n else :\n if not os.path.exists (sys.hal_log_values [\"__log_path\"]) :\n raise PQHException (\"unable to find path \" + sys.hal_log_values [\"__log_path\"])",
"def __build_message_to_print_in_log(log: LogModel) -> Optional[str]:\n\n if log is None:\n return None\n\n log_level_name: str = LogHelper.get_log_level_name(log.log_level)\n message: str = \\\n f'{log.creation_date} |->\\t[{log_level_name}]\\t{log.message}\\t\\t[Line: {log.line_number}]\\t[{log.filename}]'\n\n return message",
"def createLogHeader(self,):\n \n #\n # Imports\n #\n import sys\n import getpass\n import commands\n from socket import gethostname\n \n #\n # get information\n #\n username = getpass.getuser()\n computer = gethostname()\n \n #\n # create the header\n #\n output = ''\n output += 'Running program: '+self.commandLine+'.\\n'\n output += 'time: '+self.startTimeStr+'\\n'\n output += 'Master process id='+str(MASTER)+'\\n'\n output += 'Started by user = '+username+' on host = '+computer+'\\n'\n if self.onUppmax: output += 'Program is run on uppmax, any temporary files will be placed in '+commands.getoutput('echo $SNIC_TMP')+' .\\n'\n \n return output",
"def __init__(self, level, pathname, lineno, msg, args, exc_info, func=None):\n #\n # The following statement allows passing of a dictionary as a sole\n # argument, so that you can do something like\n # logging.debug(\"a %(a)d b %(b)s\", {'a':1, 'b':2})\n # Suggested by Stefan Behnel.\n # Note that without the test for args[0], we get a problem because\n # during formatting, we test to see if the arg is present using\n # 'if self.args:'. If the event being logged is e.g. 'Value is %d'\n # and if the passed arg fails 'if self.args:' then no formatting\n # is done. For example, logger.warn('Value is %d', 0) would log\n # 'Value is %d' instead of 'Value is 0'.\n # For the use case of passing a dictionary, this should not be a problem.\n if args and len(args) == 1 and isinstance(args[0], dict) and args[0]:\n args = args[0]\n self.args = args\n self.levelno = level\n self.pathname = pathname\n self.msg = msg\n\n self.levelname = \"FOOBAR\" #getLevelName(level)\n\n try:\n self.filename = os.path.basename(pathname)\n self.module = os.path.splitext(self.filename)[0]\n except (TypeError, ValueError, AttributeError):\n self.filename = pathname\n self.module = \"Unknown module\"\n\n self.exc_info = exc_info\n self.exc_text = None # used to cache the traceback text\n self.lineno = lineno\n self.func_name = func\n self.created = time.time()\n self.asctime = time.asctime()\n # Remove milliseconds\n i = self.asctime.find(\".\")\n if i != -1: self.asctime = self.asctime[:i]",
"def create_logger(\n project_name: str,\n level: str = \"INFO\",\n log_dir: str = \"/tmp/logs\",\n file_name: Optional[str] = None,\n do_print: bool = True,\n simple_logging: bool = False,\n log_to_file: bool = False,\n rich_logging: bool = False,\n time_zone: Optional[str] = None,\n):\n import __main__\n\n if file_name is None:\n try:\n file_name = ntpath.basename(__main__.__file__).split(\".\")[0]\n except:\n file_name = \"logs\"\n\n logger = logging.getLogger(file_name)\n logger.handlers.clear()\n logger.setLevel(getattr(logging, level))\n\n if time_zone:\n from pytz import timezone, utc\n def time_formatter(*args):\n # TODO: Doesnt work with rich formatter\n utc_dt = utc.localize(datetime.datetime.utcnow())\n my_tz = timezone(time_zone)\n converted = utc_dt.astimezone(my_tz)\n return converted.timetuple()\n\n logging.Formatter.converter = time_formatter\n\n if rich_logging:\n from rich.logging import RichHandler\n stream_format = f\"{project_name}:%(module)s:%(funcName)s: %(message)s\"\n stream_handler = RichHandler(omit_repeated_times=False)\n else:\n stream_format = f\"%(asctime)s:%(levelname)s:{project_name}:%(module)s:%(funcName)s: %(message)s\"\n stream_handler = logging.StreamHandler()\n\n file_formatter = stream_formatter = logging.Formatter(\n stream_format, \"%Y-%m-%d %H:%M:%S\"\n )\n\n if simple_logging:\n file_formatter = logging.Formatter(\"%(message)s\")\n stream_formatter = logging.Formatter(\"%(message)s\")\n\n if log_to_file:\n date = datetime.date.today()\n date = \"%s-%s-%s\" % (date.day, date.month, date.year)\n log_file_path = os.path.join(log_dir, \"%s-%s.log\" % (file_name, date))\n\n create_folder(log_dir)\n file_handler = logging.FileHandler(log_file_path)\n file_handler.setFormatter(file_formatter)\n logger.addHandler(file_handler)\n\n if do_print:\n stream_handler.setFormatter(stream_formatter)\n logger.addHandler(stream_handler)\n\n logger.propagate = False\n\n return logger",
"def _format_and_write(self, message, values, process_key_num, level):\n if not process_key_num:\n raise ValueError('process_key_num not defined')\n\n message = self._formatted_string(message, values)\n\n if message_id.get():\n message += f' RequestId={message_id.get()}'\n if correlation_id.get():\n message += f' CorrelationId={correlation_id.get()}'\n if inbound_message_id.get():\n message += f' InboundMessageId={inbound_message_id.get()}'\n if interaction_id.get():\n message += f' InteractionId={interaction_id.get()}'\n\n message += f' ProcessKey={self.process_key_tag + process_key_num}'\n\n self.logger.log(level, message)",
"def __init__(self, log_path, on=True):\n self.log_path = log_path\n self.on = on\n\n if self.on:\n while os.path.isfile(self.log_path):\n self.log_path += '+'",
"def log_data(self):\n\n self.check_dir()\n with open(self.log_file, \"a\") as logger_file:\n logger_file.write(\"{}, {}\\n\".format(self.time, self.msg))",
"def LogProcess(self):\n time = datetime.today().strftime('%a %Y%b%d %X')\n# Get user name.\n f = os.popen(\"whoami\",\"r\")\n user = f.read().strip()\n f.close()\n\n entry = '%s\\t%s\\t%s\\t%s\\n' % (time, self.topdir, user, self.version)\n\n if ismounted(c.exams_file):\n# Append info to the exams file.\n try:\n f = open(c.exams_file,'a+')\n f.seek(0, 2)\n f.write(entry)\n f.close()\n except:\n# Not a huge problem if this doesn't work.\n pass",
"def updater_log_file(self,request):\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/logfile invoked with:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(request.options).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" << %s\" % s)\n\t\t# -----------------------------------\n\t\tresult = None\n\t\tjob = ''\n\t\tif self._current_job and 'job' in self._current_job:\n\t\t\tjob = self._current_job['job']\n\t\telse:\n\t\t\tjob = request.options.get('job','')\n\n\t\tcount = request.options.get('count',0)\n\t\tif count < 0:\n\t\t\tresult = 0\n\t\telse:\n\t\t\tresult = []\n\t\tif not job in INSTALLERS:\n\t\t\t# job empty: this is the first call I can't avoid\n\t\t\tif job != '':\n\t\t\t\tMODULE.warn(\" ?? Don't know a '%s' job\" % job)\n\t\telse:\n\t\t\tif not 'logfile' in INSTALLERS[job]:\n\t\t\t\tMODULE.warn(\" ?? Job '%s' has no associated log file\" % job)\n\t\t\telse:\n\t\t\t\tfname = INSTALLERS[job]['logfile']\n\t\t\t\tif count < 0:\n\t\t\t\t\tresult = self._logstamp(fname)\n\t\t\t\telse:\n\t\t\t\t\t# don't read complete file if we have an 'ignore' count\n\t\t\t\t\tif ('lines' in self._current_job) and (self._current_job['lines']):\n\t\t\t\t\t\tcount += int(self._current_job['lines'])\n\t\t\t\t\tresult = self._logview(fname, -count)\n\n\t\t# again debug, shortened\n\t\tif isinstance(result,int):\n\t\t\tMODULE.info(\" >> %d\" % result)\n\t\telse:\n\t\t\tMODULE.info(\" >> %d lines\" % len(result))\n\n\t\t# ----------- DEBUG -----------------\n\t\tMODULE.info(\"updater/installer/logfile returns:\")\n\t\tpp = pprint.PrettyPrinter(indent=4)\n\t\tst = pp.pformat(result).split(\"\\n\")\n\t\tfor s in st:\n\t\t\t\tMODULE.info(\" >> %s\" % s)\n\t\t# -----------------------------------\n\n\t\tself.finished(request.id, result)",
"def build_custom_log(\n dp_shell_history: Path,\n fp_results: Path,\n *,\n daterange: List[str],\n username: str = None,\n wdir: Path = None,\n hostname: str = None,\n regexp: str = None,\n unique: bool = False,\n) -> None:\n dt_start, dt_end = get_daterange(daterange)\n\n log.trace(\"dt_start: {}\", dt_start) # type: ignore\n log.trace(\"dt_end: {}\", dt_end) # type: ignore\n\n hostname = os.uname().nodename if hostname is None else hostname\n regexp = \".*\" if regexp is None else regexp\n\n with fp_results.open(\"w\") as f:\n f.write(f\"# vim: filetype={SCRIPTNAME}\\n\\n\")\n\n dt_tmp = dt_start\n entry_count = 0\n while date_ym_value(dt_tmp) <= date_ym_value(dt_end):\n fp_log = Path(\n f\"{dp_shell_history}/{hostname}/{dt_tmp.year}/\"\n f\"{str(dt_tmp.month).zfill(2)}.log\"\n )\n\n try:\n if hostname.lower() == \"all\":\n fp_log = merge_hosts(\n dp_shell_history, dt_tmp.year, dt_tmp.month\n )\n\n skip_date_check = (\n dt_tmp.month != dt_start.month or dt_tmp.year != dt_start.year\n ) and (dt_tmp.month != dt_end.month or dt_tmp.year != dt_end.year)\n\n log_lines = process_logfile(\n fp_log,\n dt_start=dt_start,\n dt_end=dt_end,\n regexp=regexp,\n username=username,\n wdir=wdir,\n unique=unique,\n skip_date_check=skip_date_check,\n )\n\n with fp_results.open(\"a+\") as f:\n f.writelines(log_lines)\n\n entry_count += len(log_lines)\n except LogsNotFound:\n log.debug(f\"No Log Files for {dt_tmp.month}-{dt_tmp.year} Exist.\")\n finally:\n dt_tmp = dt_tmp + relativedelta(months=1)\n\n with fp_results.open(\"a+\") as f:\n f.write(\n f\"# Number of shell commands matched by {SCRIPTNAME} query: \"\n f\"{entry_count}\"\n )",
"def _log2mylog(self, msg):\n time_str = mod_time.strftime(\n \"%Y-%m-%d %H:%M:%S\", mod_time.localtime(mod_time.time())\n )\n msg = str(msg)\n content = \"%s [%s]\\n\" % (time_str, msg)\n fa = open(self.mylogfile, \"a\")\n fa.write(content)\n fa.close()",
"def out_file_core():\n date = str(datetime.datetime.now().strftime(\"%Y%d%m_%H%M%S\"))\n return f\"log-{date}-{str(uuid.uuid4())}\"",
"def log(self, message, level=DEBUG, fields=None):\n fields = {} if fields is None else fields\n fields[\"process_id\"] = self._id\n fields[\"process_state\"] = self._run_state.value\n if self.current_upload:\n fields[\"filepath\"] = self.current_upload.get(\"filepath\")\n fields[\"upload_id\"] = self.current_upload.get(\"upload_id\")\n fields[\"md5\"] = self.current_upload.get(\"md5\")\n log_fields = [\"%s=%s\" % (key, val) for key, val in fields.iteritems()]\n log_msg = \"%s %s\" % (message, \" \".join(log_fields))\n if level == EXCEPTION:\n LOGGER.exception(log_msg)\n else:\n LOGGER.log(level, log_msg)",
"def qaPrint(log, message):\n # current date and time as string + message. example: [Oct 25 01:52:33.000001] TC1 - Passed\n log_message = getCurTime(\"[%b %d %H:%M:%S.%f]\") + \" \" + message\n # prints log_message\n print log_message\n # writes message to a log file\n log.write(log_message + \"\\n\")",
"def __init__(self, message_type: LogType, message: str):\n self.timestamp = datetime.datetime.now().strftime(\"%m-%d-%Y %I:%M:%S %p\")\n self.message = message\n self.message_type = message_type",
"def debug_log(self, buf, shell):\n\n # Handle Shell output\n if shell == True:\n self.debugfile.write(\"<shell time=\\\" \" + datetime.datetime.now().strftime(\"%H:%M:%S \") + \"\\\" >\" )\n self.debugfile.write(\"<![CDATA[\"+buf+\"]]></shell>\\n\")\n\n # Handle User Input\n else:\n self.debugfile.write(\"<user time=\\\" \" + datetime.datetime.now().strftime(\"%H:%M:%S \") + \"\\\" >\" )\n self.debugfile.write(\"<![CDATA[\"+buf+\"]]></user>\\n\")",
"def log_message(self, formate, *args):\n return",
"def __init__(self, filename, mode='a', encoding=None, delay=0,\n file_name_format='%project_name-%log-%date'):\n if codecs is None:\n encoding = None\n FileHandler.__init__(self, filename, mode, encoding, delay)\n\n # 日志文件路径\n self.file_path = os.path.split(filename)[0]\n # 日志文件名称\n file_name = os.path.split(filename)[1]\n\n temp_file_name = file_name.split('.')\n if len(temp_file_name) == 1:\n self.project_name = temp_file_name[0]\n self.log_suffix = 'log'\n else:\n self.project_name, self.log_suffix = temp_file_name[0], temp_file_name[1]\n\n self.mode = mode\n self.encoding = encoding\n self.suffix = \"%Y-%m-%d\"\n self.suffix_time = \"\"\n self.file_name_format = file_name_format"
] | [
"0.5940724",
"0.5826949",
"0.57114995",
"0.5540157",
"0.54971814",
"0.5481836",
"0.54646546",
"0.54116577",
"0.5368127",
"0.5306057",
"0.52894217",
"0.52886254",
"0.5274615",
"0.52668566",
"0.52148986",
"0.52125263",
"0.51963454",
"0.5185633",
"0.5158358",
"0.51370734",
"0.5119761",
"0.5117089",
"0.5099861",
"0.5098128",
"0.5090578",
"0.50898516",
"0.5089557",
"0.50753933",
"0.5060834",
"0.50564885"
] | 0.6367271 | 0 |
return the relative path between a folder and a file folder folder file file relative path str | def get_relative_path (folder, file) :
if not os.path.exists (folder) : raise PQHException (folder + " does not exist.")
if not os.path.exists (file) : raise PQHException (file + " does not exist.")
sd = folder.replace("\\","/").split("/")
sf = file.replace("\\","/").split("/")
for i in range (0, len (sd)) :
if i >= len (sf) : break
elif sf [i] != sd [i] : break
res = copy.copy (sd)
j = i
while i < len (sd) :
i += 1
res.append ("..")
res.extend (sf [j:])
return os.path.join (*res) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_file_path(cls, file_name, folder_name):\n return cls.file_path.parent / folder_name / file_name",
"def get_relative_path(self, file_path):\n file_path = os.path.abspath(file_path)\n if self.base_dir is not None:\n file_path = file_path.replace(os.path.abspath(self.base_dir), \"\")\n assert file_path[0] == \"/\"\n file_path = file_path[1:]\n return file_path",
"def get_relative_pathname(self):\n return os.path.join(Exam.EXAM_FILES_LOCATION,\n str(self.unique_id)[0:2],\n str(self.unique_id) + self.file_ext)",
"def _GetRelPath(self, filename):\r\n absname = os.path.join(self.repo_dir, filename)\r\n return os.path.relpath(absname)",
"def _GetRelPath(self, filename):\n assert filename.startswith(self.subdir), (filename, self.subdir)\n return filename[len(self.subdir):].lstrip(r\"\\/\")",
"def find_full_path(path_to_file):\r\n for subdir, dirs, files in os.walk(\".\"):\r\n full = os.path.relpath(os.path.join(subdir, path_to_file))\r\n if os.path.exists(full):\r\n return full",
"def relative_path(root_dir, dirpath, f):\n full = os.path.join(dirpath, f)\n if not root_dir:\n return full\n if not full.startswith(root_dir):\n print(\"ERROR - bad path for root\", full)\n return None\n full = full[len(root_dir):]\n if full.startswith(\"/\"):\n return full[1:]\n return full",
"def get_path_relative_to_http_root(file_path):\n return os.path.relpath(file_path, get_http_path_prefix())",
"def _get_rel_path(self, file_path: Union[str, os.PathLike]) -> Optional[str]:\n file_path = Path(file_path).absolute()\n try:\n # use os.path.relpath instead of Path.relative_to in case file_path is not a child of self.base_path\n return os.path.relpath(file_path, self.base_path)\n except ValueError:\n # 2 paths are on different drives\n return None",
"def get_relative_path(file_string, root_path, working_path=\"\"):\r\n if not working_path:\r\n working_path = os.getcwd()\r\n new_file = to_abs_list(file_string, root_path)[0]\r\n new_file = win2unix(new_file)\r\n working_path = win2unix(working_path)\r\n f, d = re.split(\"/\", new_file), re.split(\"/\", working_path)\r\n ff, dd = f[:], d[:]\r\n for i, item in enumerate(f):\r\n try:\r\n d_item = d[i]\r\n except Exception:\r\n break\r\n on_win, nt_lin = get_os_name()\r\n if on_win:\r\n same = (item.lower() == d_item.lower())\r\n else:\r\n same = (item == d_item)\r\n if same:\r\n ff.remove(item)\r\n dd.remove(d_item)\r\n else:\r\n break\r\n # -----------------\r\n if len(ff) == len(f):\r\n return new_file\r\n else:\r\n dd_len = len(dd)\r\n if not dd_len:\r\n pre_path = [\".\"]\r\n else:\r\n if dd_len > 3:\r\n # sometimes the python os module can not treat ../../../../../a/b/c/d.v as a file when the file exists!\r\n return new_file\r\n pre_path = [\"..\"] * dd_len\r\n new_path = pre_path + ff[:]\r\n return \"/\".join(new_path)",
"def file_path(file_name, path):\n return path.rstrip('\\/') + \"/{0}\".format(file_name) if path else os.getcwd() + \"/{0}\".format(file_name)",
"def get_relative_path(file_string, root_path, working_path=\"\"):\r\n if not working_path:\r\n working_path = os.getcwd()\r\n new_file = to_abs_list(file_string, root_path)[0]\r\n working_path = win2unix(working_path)\r\n f, d = re.split(\"/\", new_file), re.split(\"/\", working_path)\r\n ff, dd = f[:], d[:]\r\n for i, item in enumerate(f):\r\n try:\r\n d_item = d[i]\r\n except Exception:\r\n break\r\n on_win, nt_lin = get_os_name()\r\n if on_win:\r\n same = (item.lower() == d_item.lower())\r\n else:\r\n same = (item == d_item)\r\n if same:\r\n ff.remove(item)\r\n dd.remove(d_item)\r\n else:\r\n break\r\n # -----------------\r\n if len(ff) == len(f):\r\n return new_file\r\n else:\r\n dd_len = len(dd)\r\n if not dd_len:\r\n pre_path = [\".\"]\r\n else:\r\n if dd_len > 3:\r\n # sometimes the python os module can not treat ../../../../../a/b/c/d.v as a file when the file exists!\r\n return new_file\r\n pre_path = [\"..\"] * dd_len\r\n new_path = pre_path + ff[:]\r\n return \"/\".join(new_path)",
"def get_relative_file(in_file, directory, ext):\n filename_w_ext = os.path.basename(in_file)\n filename, file_extension = os.path.splitext(filename_w_ext)\n return os.path.join(directory, filename + '.' + ext)",
"def path_to_related(self, path):\n # self.path = \"...functional/fixtures/img/logo.png\"\n # path = \"...functional/fixtures/docs/index.md\"\n current = self.dir\n\n while not path.startswith(current.dir.path):\n current = current.dir.parent.dir\n\n remaining = current.relative(self.path)\n\n level = current.relative(path).count(os.sep)\n\n way_back = os.sep.join(['..'] * level) or '.'\n result = \"{0}/{1}\".format(way_back, remaining)\n\n return result",
"def relative_to_media_root(filepath, media_root=settings.MEDIA_ROOT):\n relative_path = os.path.relpath(filepath, media_root)\n return relative_path",
"def _relpath(self, path):\n\n # abandon query parameters\n path = path.split('?', 1)[0]\n path = path.split('#', 1)[0]\n path = os.path.normpath(unquote(path))\n words = path.split('/')\n words = filter(None, words)\n path = ''\n for word in words:\n drive, word = os.path.splitdrive(word)\n head, word = os.path.split(word)\n if word not in (os.curdir, os.pardir):\n path = os.path.join(path, word)\n\n return path",
"def _adjust_path(self, file):\n path_component = '/osm_pla/test/'\n real_path = os.path.realpath(file)\n if path_component not in real_path:\n return os.path.dirname(real_path) + path_component + os.path.basename(real_path)\n else:\n return real_path",
"def relative_path(filename):\n length = len(os.path.abspath(DOC_BUILD_DIR)) + 1\n return os.path.abspath(filename)[length:]",
"def get_abs_path(file_path, relative_path):\n import os\n dir_path = os.path.dirname(file_path)\n abs_path = os.path.join(dir_path, relative_path)\n return abs_path",
"def full_path(filename):\n\timport os.path\n\tfolder = os.path.dirname(os.path.realpath(__file__))\n\treturn os.path.join(folder, filename)",
"def compute_path(file: mesonlib.FileOrString) -> str:\n if isinstance(file, File):\n return file.absolute_path(self.source_dir, self.build_dir)\n return os.path.normpath(os.path.join(self.build_dir, file))",
"def relative(self, path):\n return re.sub(self.path_regex, '', path).lstrip(os.sep)",
"def path(self, f):\n\t\treturn os.path.join(self.directory, f)",
"def _file_path(self, file: str) -> str:\n return os.path.abspath(f\"tests/resources/{file}\")",
"def format_path(file: str) -> str:\n return os.path.abspath([file.replace('/', os.path.sep)][0])",
"def rel_filename(filename, relative_to=None):\n if relative_to is None:\n relative_to = os.getcwd()\n if not relative_to.endswith(os.path.sep):\n relative_to += os.path.sep\n filename = os.path.normpath(os.path.abspath(filename))\n if filename.startswith(relative_to):\n return filename[len(relative_to):]\n else:\n return filename",
"def file_directory(file):\n return os.path.dirname(os.path.realpath(file))",
"def lpath(file0, file1):\n return os.path.abspath(os.path.join(os.path.dirname(file0), file1))",
"def file_path(self):\n return posixpath.dirname(self.file_name)",
"def get_file_path(filename, path='Data/'):\n path= os.path.abspath(os.path.dirname(path))\n return os.path.join(path, filename)"
] | [
"0.79193115",
"0.7430679",
"0.72578657",
"0.7221772",
"0.7174331",
"0.69166344",
"0.6843943",
"0.68290794",
"0.6818401",
"0.6815434",
"0.68043053",
"0.67901987",
"0.6769839",
"0.6764841",
"0.6727673",
"0.6724213",
"0.6714352",
"0.6676587",
"0.66659003",
"0.66582036",
"0.6634853",
"0.66340953",
"0.6633461",
"0.6632053",
"0.66004705",
"0.65879494",
"0.6577279",
"0.65664095",
"0.65619594",
"0.656092"
] | 0.8269769 | 0 |
Download a file to the folder path_unzip if not present, if the downloading is interrupted, the next time, it will start from where it stopped. Before downloading, the function creates a temporary file, which means the downloading has began. If the connection is lost, an exception is raised and the program stopped. Next time, the program will detect the existence of the temporary file and will start downloading from where it previously stopped. After it ends, the temporary file is removed. httpfile (str) url path_unzip (str) path where to unzip the file, if None, choose GetPath () outfile (str) if None, the function will assign a filename unless this parameter is specified local file name | def download (httpfile, path_unzip = None, outfile = None) :
if path_unzip is None : path_unzip = GetPath ()
file = _check_source (httpfile, path_unzip = path_unzip, outfile = outfile)
return file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getzip(url, zipfile, unzipdir):\n done_file = os.path.join(unzipdir, '.'+os.path.basename(zipfile)+'.done')\n if file_exists(done_file):\n print('{} already downloaded and extracted; skipping. To reinstall \"rm {}\"'.format(os.path.basename(zipfile), done_file))\n else:\n print('Downloading {} as {}.'.format(url, zipfile))\n urlretrieve(url, zipfile)\n print('Extracting {} into {}.'.format(zipfile, unzipdir))\n with ZipFile(zipfile, 'r') as zip:\n zip.extractall(unzipdir)\n os.remove(zipfile)\n with open(done_file, 'w'):\n pass",
"def main(file_url, file_path):\n\n # extract file from the link\n\n if not os.path.exists(file_path):\n os.makedirs(file_path, exist_ok=True)\n \n r = requests.get(str(file_url))\n\n #unzip the zip file\n z = zipfile.ZipFile(io.BytesIO(r.content))\n z.extractall(path = file_path)",
"def download_and_unzip(url, zip_path, csv_path, data_folder):\n\n download_from_url(url, zip_path)\n\n unzip(zip_path, csv_path, data_folder)\n\n print('Done.')",
"def _fetch_and_unzip(url, file_name):\n res = requests.get(url, stream=True, verify=False)\n # get dataset size\n total_size = int(res.headers[\"Content-Length\"])\n temp_size = 0\n with open(file_name, \"wb+\") as f:\n for chunk in res.iter_content(chunk_size=1024):\n temp_size += len(chunk)\n f.write(chunk)\n f.flush()\n done = int(100 * temp_size / total_size)\n # show download progress\n sys.stdout.write(\"\\r[{}{}] {:.2f}%\".format(\"█\" * done, \" \" * (100 - done), 100 * temp_size / total_size))\n sys.stdout.flush()\n print(\"\\n============== {} is already ==============\".format(file_name))\n _unzip(file_name)\n os.remove(file_name)",
"def download_zip(url, folder=None):\n\n # get this file folder name and save the file name\n if not folder:\n folder = os.path.dirname(os.path.abspath(__file__))\n file_name = os.path.split(url)[1]\n\n # Download the file from \"url\" and save it locally under \"file_name\":\n try:\n with urllib.request.urlopen(url) as response, open(folder + \"/\" + file_name, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n except urllib.error.URLError as e:\n print('urllib.error.URLError')\n raise Exception(e)\n except Exception as e:\n raise Exception(e)\n else:\n return folder,file_name",
"def unzip (file, path_unzip = None, outfile = None) :\n if path_unzip is None : path_unzip = GetPath ()\n fLOG (\"unzip file\", file)\n file = _check_source (file, path_unzip = path_unzip, outfile = outfile)\n \n nb = 0\n while not os.path.exists (file) and nb < 10 :\n time.sleep(0.5)\n nb += 1\n \n if not os.path.exists (file) :\n raise FileNotFoundError(file)\n \n return file",
"def download_one_zip(data_url, data_dir):\r\n\r\n zipfile_path, unzip_dir = zip_file_name_from_url(data_url, data_dir)\r\n if not is_there_file(zipfile_path, unzip_dir):\r\n if not os.path.isdir(unzip_dir):\r\n os.makedirs(unzip_dir)\r\n r = requests.get(data_url, stream=True)\r\n with open(zipfile_path, \"wb\") as py_file:\r\n for chunk in r.iter_content(chunk_size=1024): # 1024 bytes\r\n if chunk:\r\n py_file.write(chunk)\r\n unzip_nested_zip(zipfile_path, unzip_dir), download_small_file",
"def download_and_unzip(url, extract_to='.'):\n http_response = urlopen(url)\n zipfile = ZipFile(BytesIO(http_response.read()))\n zipfile.extractall(path=extract_to)",
"def download_file(url, path):\n file_name = path + url.split(\"/\")[-1]\n req = requests.get(url)\n zipped_info = req.content\n print(file_name)\n if not os.path.isfile(file_name):\n print(\"file doesnt exist, writing\", file_name)\n with open(file_name, 'wb') as f:\n f.write(zipped_info)\n else:\n print(\"file exists\", file_name)",
"def download_and_decompress(url, download_path):\n\n # Extract the filename from the URL\n parsed = urlparse(url)\n filename = basename(parsed.path)\n\n # Ensure the output directory exists\n if not os.path.exists(download_path):\n os.makedirs(download_path)\n\n # Get a temporary file path for the compressed file download\n downloaded_file = os.path.join(tempfile.gettempdir(), filename)\n\n # Download the file\n urlretrieve(url, downloaded_file)\n\n # Decompress and extract all files to the specified local path\n tar = tarfile.open(downloaded_file, \"r\")\n tar.extractall(download_path)\n tar.close()\n\n # Remove the downloaded file\n os.remove(downloaded_file)",
"def _maybe_download_and_extract(self, filename):\n if not os.path.exists(self.work_dir):\n os.mkdir(self.work_dir)\n filepath = os.path.join(self.work_dir, filename)\n if not os.path.exists(filepath):\n filepath, _ = urllib.urlretrieve(self.url + filename, filepath)\n statinfo = os.stat(filepath)\n log.info('Successfully downloaded', filename, statinfo.st_size,\n 'bytes.')\n log.info('Extracting zip file ... ')\n f = zipfile.ZipFile(filepath)\n f.extractall(path=self.work_dir)\n log.info('Extraction finished ... ')",
"def download_file(url, fn, cookiejar, cookies_file, wget_bin):\n\ttry:\n\t\t# create the path if need be\n\t\tbasedir = os.path.dirname(fn)\n\t\tif not os.path.isdir(basedir):\n\t\t\tos.makedirs(basedir)\n\n\t\tif wget_bin is not None:\n\t\t\tdownload_file_wget(wget_bin, url, fn, cookies_file)\n\t\telse:\n\t\t\tdownload_file_nowget(url, fn, cookiejar)\n\n\texcept KeyboardInterrupt, e: \n\t\tprint \"\\nKeyboard Interrupt -- Removing partial file:\", fn\n\t\tos.remove(fn)\n\n\t\traise e",
"def _download_file(file_url: str, file_path: str) -> str:\n if os.path.exists(file_path):\n return file_path\n op_desc = f\"Downloading {os.path.basename(file_path)}\"\n try:\n with requests.Session() as req_sess:\n req_res = req_sess.get(file_url, stream=True)\n total_length = int(req_res.headers.get(\"Content-Length\"))\n with tqdm.wrapattr(req_res.raw, \"read\", total=total_length, desc=op_desc) as raw:\n with open(file_path , \"wb\") as file:\n shutil.copyfileobj(raw,file)\n return file_path\n except Exception as network_error:\n if os.path.exists(file_path):\n os.remove(file_path)\n raise network_error",
"def download_and_unzip_dataset(url, path):\n dl = urllib.urlretrieve(url)\n zf = zipfile.ZipFile(dl[0])\n zf.extractall(path)\n return zf",
"def download_file(url, local_path):\n try:\n local_filename = normalizeFilenameToCommonDateFormat(url.split('/')[-1])\n \n destination_dir = local_path #os.path.join(local_path, os.path.splitext(os.path.basename(local_filename))[0])\n \n #if not os.path.exists(destination_dir):\n # os.makedirs(destination_dir)\n \n destination_file = os.path.join(destination_dir, local_filename)\n \n if not os.path.exists(destination_file):\n # NOTE the stream=True parameter \n r = requests.get(url, stream=True)\n with open(destination_file, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n #f.flush() commented by recommendation from J.F.Sebastian\n # Sleep so that we aren't rude\n sleep(1)\n else:\n return destination_file + ' already '\n \n return destination_file\n except ValueError as err:\n return \"Skipping %s, not \" % (url.split('/')[-1])",
"def _download_zip(self, zip_url, dest_dir):\n # TODO(jsirois): Wrap with workunits, progress meters, checksums.\n self.context.log.info('Downloading {}...'.format(zip_url))\n sess = requests.session()\n sess.mount('file://', self.LocalFileAdapter())\n res = sess.get(zip_url)\n if not res.status_code == requests.codes.ok:\n raise TaskError('Failed to download {} ({} error)'.format(zip_url, res.status_code))\n\n with open_zip(BytesIO(res.content)) as zfile:\n safe_mkdir(dest_dir)\n for info in zfile.infolist():\n if info.filename.endswith('/'):\n # Skip directories.\n continue\n # Strip zip directory name from files.\n filename = os.path.relpath(info.filename, get_basedir(info.filename))\n f = safe_open(os.path.join(dest_dir, filename), 'w')\n f.write(zfile.read(info))\n f.close()",
"def download_small_zip(data_url, data_dir):\r\n zipfile_path, unzip_dir = zip_file_name_from_url(data_url, data_dir)\r\n if not is_there_file(zipfile_path, unzip_dir):\r\n if not os.path.isdir(unzip_dir):\r\n os.mkdir(unzip_dir)\r\n zipfile_path, _ = urllib.request.urlretrieve(data_url, zipfile_path)\r\n unzip_nested_zip(zipfile_path, unzip_dir)",
"def download(url: str, dest: Optional[str] = None, extract: bool=True, ignore_if_exists: bool = False,\n compression: Optional[str] = None):\n\n base_url = url.split(\"?\")[0]\n\n if dest is None:\n dest = [f for f in base_url.split(\"/\") if f][-1]\n\n if os.path.exists(dest) and ignore_if_exists:\n return dest\n\n stream = UrlStream(url)\n extension = base_url.split(\".\")[-1].lower()\n\n if extract and extension in ['gz', 'bz2', 'zip', 'tgz', 'tar']:\n os.makedirs(dest, exist_ok=True)\n\n if extension == \"gz\" and not base_url.endswith(\".tar.gz\"):\n decompressed_file = gzip.GzipFile(fileobj=stream)\n with open(os.path.join(dest, url.split(\"/\")[-1][:-3]), 'wb') as f:\n while True:\n d = decompressed_file.read(1024 * 1024)\n if not d:\n break\n f.write(d)\n else:\n if extension in ['gz', 'bz2', \"tgz\", \"tar\"]:\n decompressed_file = tarfile.open(fileobj=stream, mode='r|' +\n (compression or (\n \"gz\" if extension == \"tgz\" else extension)))\n elif extension == 'zip':\n decompressed_file = zipfile.ZipFile(stream, mode='r')\n else:\n assert False, \"Invalid extension: %s\" % extension\n\n decompressed_file.extractall(dest)\n else:\n try:\n with open(dest, 'wb') as f:\n for d in stream.iter_content(1024 * 1024):\n f.write(d)\n except:\n os.remove(dest)\n raise\n return dest",
"def download():\n try:\n cli.run(\n [URL, '--output', TEMP_DIR],\n )\n except SystemExit:\n return None",
"def get_file(url):\n helpers.make_workdir() # create temp working directory\n file_url = url + constant.MALICIOUS_LOCATION\n print(file_url)\n filename = wget.download(file_url, out=constant.WORKDIR)\n return filename",
"def download_and_unzip_data(url, destination, prefix='state-'):\n # make sure destination exists or create a temporary directory\n if not destination:\n destination = tempfile.mkdtemp(prefix=prefix)\n logger.debug(\"Created temp directory {}\".format(destination))\n else:\n if not os.path.exists(destination):\n os.makedirs(destination)\n logger.info(\"Created {}\".format(destination))\n zip_filename = get_zipfile_path(url, destination)\n # don't re-download data if raw data file already exists\n if os.path.exists(zip_filename):\n logger.debug(\"{} exists, skipping download\".format(zip_filename))\n else:\n logger.debug(\"Downloading data to {}\".format(zip_filename))\n response = requests.get(url, stream=True)\n # XXX check status code here; e.g., if permissions haven't been granted\n # for a file being downloaded from S3 a 403 will be returned\n content_length = int(response.headers.get('content-length'))\n start = time.clock()\n downloaded = 0\n with open(zip_filename, 'wb') as f:\n for chunk in response.iter_content(chunk_size=1024):\n if chunk:\n downloaded += len(chunk)\n now = time.clock()\n if (now - start) >= 5:\n logger.debug('{0:.2g}% downloaded'.format(downloaded/content_length*100))\n start = now\n f.write(chunk)\n f.flush()\n logger.debug('100% downloaded')\n\n unzip_data(destination, url=url)\n return destination",
"def download(self, *path, **kwargs):\n extract = kwargs.get(\"extract\", True)\n callback = kwargs.get(\"callback\", None)\n info = self.serverfiles.info(*path)\n\n extract = extract and \"compression\" in info\n target = self.localpath(*path)\n self.serverfiles.download(*path,\n target=target + \".tmp\" if extract else target,\n callback=callback)\n\n _save_file_info(target + '.info', info)\n\n if extract:\n if info.get(\"compression\") in [\"tar.gz\", \"tar.bz2\"]:\n f = tarfile.open(target + \".tmp\")\n try:\n os.mkdir(target)\n except OSError:\n pass\n f.extractall(target)\n elif info.get(\"compression\") == \"gz\":\n f = gzip.open(target + \".tmp\")\n shutil.copyfileobj(f, open(target, \"wb\"))\n elif info.get(\"compression\") == \"bz2\":\n f = bz2.BZ2File(target + \".tmp\", \"r\")\n shutil.copyfileobj(f, open(target, \"wb\"))\n f.close()\n os.remove(target + \".tmp\")",
"def _download(url, outpath=None, dirname=None, branch='master', release=None):\n six.print_('downloading...')\n outfolder = outpath or os.getcwd()\n file, archive_url = get_archive_url(url, branch, release)\n six.print_(archive_url)\n if dirname:\n outfolder = \"{}/{}.zip\".format(outfolder, dirname)\n return file, wget.download(archive_url, out=outfolder)",
"def download():\n response = requests.get(URL, stream=True)\n\n file = open(FILE_NAME, 'wb')\n file.write(response.content)\n\n with zipfile.ZipFile(FILE_NAME, 'r') as zip_ref:\n zip_ref.extractall()\n\n file.close()\n os.remove(FILE_NAME)",
"def download_file_with_extract(file_link, file_path, extract_path):\n if not os.path.exists(extract_path):\n download_file(file_link, file_path)\n zip_ref = zipfile.ZipFile(file_path, 'r')\n print(\"extracting downloaded file...\")\n zip_ref.extractall(extract_path)\n os.remove(file_path)\n print(\"extracted and removed downloaded zip file\")\n print(\"file already extracted in the path %s\" % extract_path)",
"def main(url, localfile):\n ph.download_file(url, localfile)",
"def download_file(url, download_path):\n\n # Extract the filename from the URL\n parsed = urlparse(url)\n filename = basename(parsed.path)\n\n # Ensure the output directory exists\n if not os.path.exists(download_path):\n os.makedirs(download_path)\n\n # Get a temporary file path for the compressed file download\n downloaded_file = os.path.join(tempfile.gettempdir(), filename)\n\n # Download the file\n urlretrieve(url, downloaded_file)\n\n # Move the file to the destination folder\n destination_path = os.path.join(download_path, filename)\n os.rename(downloaded_file, destination_path)",
"def download(self, url):\n try:\n webFile = urllib.urlopen(url)\n localFile = open(self.workdir + \"/\" + url.split('/')[-1], 'w')\n localFile.write(webFile.read())\n webFile.close()\n localFile.close()\n except IOError:\n print(\"could not get url \" + url)",
"def get_file(url, file_name=None):\n cache_dir = os.path.join(os.path.expanduser(\"~\"), \".jhML\")\n\n if file_name is None:\n file_name = url[url.rfind('/') + 1:]\n file_path = os.path.join(cache_dir, file_name)\n\n if not os.path.exists(cache_dir):\n os.mkdir(cache_dir)\n\n if os.path.exists(file_path):\n return file_path\n\n print(\"Downloading: \" + file_name)\n try:\n urllib.request.urlretrieve(url, file_path, show_progress)\n except (Exception, KeyboardInterrupt) as e:\n if os.path.exists(file_path):\n os.remove(file_path)\n raise\n print(\" Done\")\n\n return file_path",
"def download_and_extract(path, url, input_filename, target_filename):\n logging.info('Downloading and extracting data to: %s' % path)\n # Check if extracted files already exist in path\n input_file = find_file(path, input_filename)\n target_file = find_file(path, target_filename)\n if input_file and target_file:\n logging.info(\"Already downloaded and extracted %s.\" % url)\n return input_file, target_file\n\n # Download archive file if it doesn't already exist.\n compressed_file = download_from_url(path, url)\n\n # Extract compressed files\n logging.info(\"Extracting %s.\" % compressed_file)\n with tarfile.open(compressed_file, \"r:gz\") as corpus_tar:\n corpus_tar.extractall(path)\n\n # Return filepaths of the requested files.\n input_file = find_file(path, input_filename)\n target_file = find_file(path, target_filename)\n\n if input_file and target_file:\n return input_file, target_file\n\n raise OSError(\"Download/extraction failed for url %s to path %s\" %\n (url, path))"
] | [
"0.7106732",
"0.7035156",
"0.67302674",
"0.66116464",
"0.65801996",
"0.65664876",
"0.6558638",
"0.65487367",
"0.6536646",
"0.6485532",
"0.642969",
"0.6391242",
"0.63488287",
"0.63421416",
"0.62707514",
"0.62670064",
"0.6254896",
"0.6252065",
"0.6247422",
"0.6233285",
"0.62292504",
"0.622303",
"0.6219919",
"0.6191934",
"0.6191374",
"0.61678374",
"0.61631525",
"0.61631477",
"0.6139231",
"0.613326"
] | 0.8036328 | 0 |
unzip a file into the temporary folder file (str) zip files path_unzip (str) where to unzip the file, if None, choose GetPath () outfile (str) if None, the function will assign a filename unless this parameter is specified expanded file name | def unzip (file, path_unzip = None, outfile = None) :
if path_unzip is None : path_unzip = GetPath ()
fLOG ("unzip file", file)
file = _check_source (file, path_unzip = path_unzip, outfile = outfile)
nb = 0
while not os.path.exists (file) and nb < 10 :
time.sleep(0.5)
nb += 1
if not os.path.exists (file) :
raise FileNotFoundError(file)
return file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unzip_file(data_zip, path_unzip):\r\n with zipfile.ZipFile(data_zip, \"r\") as zip_temp:\r\n zip_temp.extractall(path_unzip)",
"def _unzip_file(zip_file_path: str, unzip_dir: str = \"\") -> None:\n if not unzip_dir:\n unzip_dir = os.path.dirname(zip_file_path)\n op_desc = f\"Extracting: {os.path.basename(zip_file_path)}\"\n try:\n with ZipFile(file=zip_file_path) as zip_file:\n for member_name in tqdm(zip_file.namelist(), desc=op_desc):\n file_name = os.path.basename(member_name)\n if not file_name:\n continue\n target_path = os.path.join(unzip_dir, file_name)\n target_path = open(target_path, \"wb\")\n source_file = zip_file.open(member_name)\n with source_file, target_path:\n shutil.copyfileobj(source_file, target_path)\n os.remove(zip_file_path)\n except Exception as zip_error:\n zip_file_str = os.path.basename(zip_file_path)\n zip_file_str = os.path.splitext(zip_file_str)[0]\n for file_name in os.listdir(unzip_dir):\n if zip_file_str in file_name:\n os.remove(os.path.join(unzip_dir, file_name))\n raise zip_error",
"def unzip_file(zip_file: str) -> None:\n destination = tempfile.mkdtemp(prefix='gaelo_pross_unzip_')\n with ZipFile(zip_file) as my_zip:\n for member in my_zip.namelist():\n filename = os.path.basename(member)\n # skip directories\n if not filename:\n continue\n # copy file (taken from zipfile's extract)\n source = my_zip.open(member)\n target = open(os.path.join(destination, filename), \"wb\")\n with source, target:\n shutil.copyfileobj(source, target)\n # return destination",
"def unzip_to_temp_dir(zip_file_name):\n if not zip_file_name or not os.path.exists(zip_file_name):\n return None\n\n zf = zipfile.ZipFile(zip_file_name)\n\n if zf.testzip() is not None:\n return None\n\n # Unzip the files into a temporary directory\n LOGGER.info(\"Extracting zipped file: %s\" % zip_file_name)\n tempdir = tempfile.mkdtemp()\n\n try:\n # Create directories that don't exist\n for zip_name in zf.namelist():\n # We have no knowledge on the os where the zipped file was\n # created, so we restrict to zip files with paths without\n # charactor \"\\\" and \"/\".\n name = (zip_name.replace(\"\\\\\", os.path.sep).\n replace(\"/\", os.path.sep))\n dest = os.path.join(tempdir, name)\n if (name.endswith(os.path.sep) and not os.path.exists(dest)):\n os.mkdir(dest)\n LOGGER.debug(\"Directory %s created.\" % dest)\n\n # Copy files\n for zip_name in zf.namelist():\n # We have no knowledge on the os where the zipped file was\n # created, so we restrict to zip files with paths without\n # charactor \"\\\" and \"/\".\n name = (zip_name.replace(\"\\\\\", os.path.sep).\n replace(\"/\", os.path.sep))\n dest = os.path.join(tempdir, name)\n if not (name.endswith(os.path.sep)):\n LOGGER.debug(\"Copying file %s......\" % dest)\n outfile = open(dest, 'wb')\n outfile.write(zf.read(zip_name))\n outfile.close()\n LOGGER.debug(\"File %s copied.\" % dest)\n\n LOGGER.info(\"Unzipped file can be found at %s\" % tempdir)\n return tempdir\n\n except IOError as err:\n LOGGER.error(\"Error in extracting webdriver.xpi: %s\" % err)\n return None",
"def unzip(file_path: str) -> str:\n destination_directory, zip_file = os.path.split(file_path)\n extracted_path, _ = os.path.splitext(file_path)\n\n if file_path.endswith('tar.gz') or file_path.endswith('tgz'):\n handle_tar(file_path, 'r:gz', extracted_path, destination_directory)\n elif file_path.endswith('tar'):\n handle_tar(file_path, 'r:', extracted_path, destination_directory)\n return extracted_path",
"def unzip_file(zipfile_path, target_dir, touchfile_path):\r\n with zipfile.ZipFile(zipfile_path, 'r') as zip_ref:\r\n zip_ref.extractall(target_dir)\r\n\r\n with open(touchfile_path, 'w') as touchfile:\r\n touchfile.write(f'unzipped {zipfile_path}')",
"def unzip_all(input_file: pathlib.Path) -> Tuple[pathlib.Path, tempfile.TemporaryDirectory]:\n output_temp_dir = tempfile.TemporaryDirectory()\n output_path = pathlib.Path(output_temp_dir.name)\n\n extract_zip(input_file, output_path)\n\n return output_path, output_temp_dir",
"def _unzip(filename, branch=None):\n try:\n file = zipfile.ZipFile(filename)\n basename = os.path.dirname(filename)\n basename = basename.replace(\".zip\", \"\")\n file.extractall(path=basename)\n return basename, filename\n except Exception as e:\n six.print_(e)",
"def unzip(zipped_file, output_directory=None,\n prefix=\"apsharvest_unzip_\", suffix=\"\"):\n if not output_directory:\n # We create a temporary directory to extract our stuff in\n try:\n output_directory = mkdtemp(suffix=suffix,\n prefix=prefix,\n dir=os.path.join(CFG_TMPSHAREDDIR, 'apsharvest'))\n except Exception, e:\n try:\n os.removedirs(output_directory)\n except TypeError:\n pass\n raise e\n return _do_unzip(zipped_file, output_directory)",
"def unzip(zip_path, output_file, data_folder):\n\n print('Unzipping file: {}'.format(zip_path))\n pyunpack.Archive(zip_path).extractall(data_folder)\n\n # Checks if unzip was successful\n if not os.path.exists(output_file):\n raise ValueError(\n 'Error in unzipping process! {} not found.'.format(output_file))",
"def unzip_file(path_to_zip_file: str, dir_to_extract_to: str) -> str:\n with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:\n zip_ref.extractall(dir_to_extract_to)\n return f'{dir_to_extract_to}/{zip_ref.namelist()[0]}'",
"def dir_tester_unzip_tmp():\n return abspath('tmpunzip')",
"def unzip(path):\n zip_ref = zipfile.ZipFile(path, 'r')\n new_path = path[:-3]\n zip_ref.extractall(new_path)\n zip_ref.close()\n return new_path",
"def test_unzip_file(self):\n\n # Path to the compressed file\n zipped_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.zip\")\n # Test for correct data\n # NOTE : For this test case to pass the source xml zipped file\n # should be present in the download path\n self.assertTrue(unzip_file(zipped_file, self.xmlfilepath))\n\n # Test for wrong target path\n self.assertFalse(unzip_file(zipped_file, r\"D:\\kqcA CK j \"))\n\n # Test for incorrect compressed file\n self.assertFalse(unzip_file(\"D:\\somerandomfile\", self.xmlfilepath))",
"def _do_unzip(zipped_file, output_directory):\n z = zipfile.ZipFile(zipped_file)\n for path in z.namelist():\n relative_path = os.path.join(output_directory, path)\n dirname, dummy = os.path.split(relative_path)\n try:\n if relative_path.endswith(os.sep) and not os.path.exists(dirname):\n os.makedirs(relative_path)\n elif not os.path.exists(relative_path):\n dirname = os.path.join(output_directory, os.path.dirname(path))\n if os.path.dirname(path) and not os.path.exists(dirname):\n os.makedirs(dirname)\n fd = open(relative_path, \"w\")\n fd.write(z.read(path))\n fd.close()\n except IOError, e:\n raise e\n return output_directory",
"def test_unzip_without_flatten(\n self,\n mocker: MockerFixture,\n tmp_path: pathlib.Path,\n ) -> None:\n mock_zip = MockZipFile()\n mock_zip.add_files(\"my_dir/abcde\", \"my_dir/funky\")\n\n zipfile = mocker.patch(\"matl_online.utils.zipfile.ZipFile\")\n zipfile.return_value = mock_zip\n\n unzip(BytesIO(), tmp_path, flatten=False)\n\n extract_args = mock_zip.extract_all_arguments\n\n assert len(extract_args) == 1\n assert extract_args[0] == tmp_path",
"def test_unzip_and_flatten(\n self,\n mocker: MockerFixture,\n tmp_path: pathlib.Path,\n ) -> None:\n mock_zip = MockZipFile()\n mock_zip.add_files(\"my_dir/abcde\", \"my_dir/funky\")\n\n zipfile = mocker.patch(\"matl_online.utils.zipfile.ZipFile\")\n zipfile.return_value = mock_zip\n\n unzip(BytesIO(), tmp_path)\n\n extract_args = mock_zip.extract_all_arguments\n\n assert len(extract_args) == 2\n assert extract_args[0] == tmp_path\n\n output_names = [obj.filename for obj in extract_args[1]]\n\n assert len(output_names) == 2\n assert output_names[0] == \"abcde\"\n assert output_names[1] == \"funky\"",
"def _unzip_files(self) -> None:\n for file in self.input_path.iterdir():\n if is_zipfile(file):\n with ZipFile(file, mode=\"r\") as archive:\n archive.extractall(path=self.temp_path)",
"def unzip_first(input_filename: str, extract_dir: str) -> str:\n with zipfile.ZipFile(input_filename) as zip_file:\n zip_file_list = zip_file.infolist()\n zip_index = 0\n while zip_index < len(zip_file_list) and zip_file_list[zip_index].is_dir():\n zip_index += 1\n if zip_index == len(zip_file_list):\n res = ''\n else:\n file_to_extract = zip_file_list[zip_index]\n zip_file.extract(file_to_extract, extract_dir)\n res = os.path.join(extract_dir, file_to_extract.filename)\n return res",
"def unzip_single_file(zip_file_name, output_file_name):\n if not os.path.isfile(output_file_name):\n with open(output_file_name, 'wb') as out_file:\n with zipfile.ZipFile(zip_file_name) as zipped:\n for info in zipped.infolist():\n if output_file_name in info.filename:\n with zipped.open(info) as requested_file:\n out_file.write(requested_file.read())\n return",
"def _check_zip_file (filename, path_unzip, outfile) :\n assert path_unzip is not None\n file,ext = os.path.splitext (filename)\n ext = ext.lower ()\n if ext == \".gz\" :\n \n import gzip\n \n if outfile is None :\n dest = filename.split (\"!\")\n dest = dest [ len(dest)-1 ]\n ext = os.path.splitext (dest) [1]\n dest = dest.replace (ext, \".txt\")\n path = os.path.split (filename)\n path = \"/\".join (path [:len (path)-1])\n dest = path + \"/\" + dest\n else :\n dest = outfile\n \n if not os.path.exists (dest) :\n file = gzip.GzipFile (filename, \"r\")\n if outfile is None :\n dest = os.path.split (dest) [1]\n dest = os.path.join (path_unzip, dest)\n \n if os.path.exists (dest) :\n st1 = datetime.datetime.utcfromtimestamp (os.stat (filename).st_mtime)\n st2 = datetime.datetime.utcfromtimestamp (os.stat (dest).st_mtime)\n if st2 > st1 : \n fLOG (\"ungzipping file (already done)\", dest)\n return dest\n \n fLOG (\"ungzipping file\", dest)\n f = open (dest, \"w\")\n data = file.read (2**27)\n size = 0\n while len (data) > 0 :\n size += len (data)\n fLOG (\"ungzipping \", size, \"bytes\")\n if isinstance (data, bytes) : f.write (bytes.decode (data))\n else : f.write (data)\n data = file.read (2**27)\n f.close ()\n file.close ()\n \n return dest\n \n if ext == \".zip\" :\n \n import zipfile\n try :\n file = zipfile.ZipFile (filename, \"r\")\n except Exception as e :\n fLOG (\"problem with \", filename)\n raise e\n \n if len (file.infolist()) != 1:\n if outfile is not None :\n raise PQHException (\"the archive contains %d files and not one as you expected by filling outfile\" % len (file.infolist()))\n fLOG (\"unzip file (multiple) \", filename)\n #message = \"\\n\".join ([ fi.filename for fi in file.infolist() ] )\n #raise Exception.YstException(\"ColumnInfoSet.load_from_file: file %s contains no file or more than one file\\n\" + message)\n folder = os.path.split (filename) [0]\n todo = 0\n _zip7_path = r\"c:\\Program Files\\7-Zip\"\n zip7 = os.path.exists (_zip7_path)\n wait = [ ]\n for info in file.infolist () :\n fileinside = info.filename\n dest = os.path.join (folder, fileinside)\n if not os.path.exists (dest) :\n fol = os.path.split (dest) [0]\n if not os.path.exists (fol) : os.makedirs (fol)\n if os.path.exists (dest) :\n st1 = datetime.datetime.utcfromtimestamp (os.stat (filename).st_mtime)\n st2 = datetime.datetime.utcfromtimestamp (os.stat (dest).st_mtime)\n if st2 > st1 : \n continue\n \n if not sys.platform.startswith(\"win\") or not zip7 :\n data = file.read (fileinside)\n dest = os.path.split (dest) [1]\n dest = os.path.join (path_unzip, dest)\n fLOG (\"unzipping file\", dest)\n wait.append(dest)\n f = open (dest, \"w\")\n if isinstance (data, bytes) :\n f.write (str (data))\n else :\n f.write (data)\n f.close ()\n else :\n todo += 1\n \n if todo > 0 and zip7 :\n dest = os.path.realpath (path_unzip)\n cmd = '\"' + _zip7_path + '\\\\7z.exe\" e -y -o\"%s\" \"%s\"' % (dest, os.path.realpath (filename)) \n out,err = run_cmd (cmd, wait = True)\n if len (err) > 0 : raise PQHException (\"command {0} failed\\n{1}\".format(cmd,err))\n if \"Error\" in out : raise PQHException (\"command {0} failed\\n{1}\".format(cmd,out))\n else :\n dest = path_unzip\n \n file.close ()\n \n ch = False\n while not ch :\n ch = True\n for a in wait :\n if not os.path.exists(a) : \n ch = False\n break\n time.sleep(0.5)\n \n return dest\n \n else :\n for info in file.infolist () :\n fileinside = info.filename\n \n path = os.path.split (filename)\n dest = outfile if outfile is not None else path [0] + \"/\" + fileinside\n if not os.path.exists (dest) :\n data = file.read (fileinside)\n if outfile is None :\n dest = os.path.split (dest) [1]\n dest = os.path.join (path_unzip, dest)\n \n if os.path.exists (dest) :\n st1 = datetime.datetime.utcfromtimestamp (os.stat (filename).st_mtime)\n st2 = datetime.datetime.utcfromtimestamp (os.stat (dest).st_mtime)\n if st2 > st1 : \n fLOG(\"unzipping one file (already done)\", dest)\n return dest\n \n fLOG(\"unzipping one file\", dest)\n f = open (dest, \"w\")\n if isinstance (data, bytes) :\n f.write (bytes.decode (data))\n else :\n f.write (data)\n f.close ()\n file.close ()\n return dest\n \n return filename",
"def main(self, line):\n # filename with optional destination\n args = bash(line)\n if args is None:\n return\n elif not (1 <= len(args) <= 2):\n print \"unzip: Usage: unzip file [destination]\"\n else:\n filename = os.path.abspath(args[0])\n if not os.path.isfile(filename):\n print \"unzip: %s: No such file\" % args[0]\n else:\n # PK magic marker check\n f = open(filename)\n try:\n pk_check = f.read(2)\n except Exception:\n pk_check = ''\n finally:\n f.close()\n if pk_check != 'PK':\n print \"unzip: %s: does not appear to be a zip file\" % args[0]\n else:\n if (os.path.basename(filename).lower().endswith('.zip')):\n altpath = os.path.splitext(os.path.basename(filename))[0]\n else:\n altpath = os.path.basename(filename) + '_unzipped'\n altpath = os.path.join(os.path.dirname(filename), altpath)\n location = (args[1:2] or [altpath])[0]\n if (os.path.exists(location)) and not (os.path.isdir(location)):\n print \"unzip: %s: destination is not a directory\" % location\n return\n elif not os.path.exists(location):\n os.makedirs(location)\n zipfp = open(filename, 'rb')\n try:\n zipf = zipfile.ZipFile(zipfp)\n # check for a leading directory common to all files and remove it\n dirnames = [os.path.join(os.path.dirname(x), '') for x in zipf.namelist()]\n common_dir = os.path.commonprefix(dirnames or ['/'])\n # Check to make sure there aren't 2 or more sub directories with the same prefix\n if not common_dir.endswith('/'):\n common_dir = os.path.join(os.path.dirname(common_dir), '')\n for name in zipf.namelist():\n data = zipf.read(name)\n fn = name\n if common_dir:\n if fn.startswith(common_dir):\n fn = fn.split(common_dir, 1)[-1]\n elif fn.startswith('/' + common_dir):\n fn = fn.split('/' + common_dir, 1)[-1]\n fn = fn.lstrip('/')\n fn = os.path.join(location, fn)\n dirf = os.path.dirname(fn)\n if not os.path.exists(dirf):\n os.makedirs(dirf)\n if fn.endswith('/'):\n # A directory\n if not os.path.exists(fn):\n os.makedirs(fn)\n else:\n fp = open(fn, 'wb')\n try:\n fp.write(data)\n finally:\n fp.close()\n except Exception:\n zipfp.close()\n print \"unzip: %s: zip file is corrupt\" % args[0]\n return\n finally:\n zipfp.close()",
"def unzip(zip_file, dest_folder):\n zip = zipfile.ZipFile(zip_file, 'r')\n if os.path.exists(dest_folder):\n pass\n else:\n os.makedirs(dest_folder)\n if dest_folder[-1] != '/':\n dest_folder += '/'\n for filename in zip.namelist():\n # --- Folder?\n if filename.endswith('/'):\n if os.path.exists(join(abspath(dest_folder),filename)):\n pass\n else:\n os.makedirs(join(abspath(dest_folder),filename))\n else:\n try:\n os.makedirs(normpath((abspath(dest_folder)+'/'+dirname(filename))))\n try:\n bytes = zip.read(filename)\n #print 'Unzipping file:', filename, 'with', len(bytes), 'bytes..'\n file((join(dest_folder,filename)), 'wb').write(zip.read(filename))\n accesstime = time.time()\n timeTuple=(int(zip.getinfo(filename).date_time[0]),\\\n int(zip.getinfo(filename).date_time[1]),\\\n int(zip.getinfo(filename).date_time[2]),\\\n int(zip.getinfo(filename).date_time[3]) ,\\\n int(zip.getinfo(filename).date_time[4]),\\\n int(zip.getinfo(filename).date_time[5]),\\\n int(0),int(0),int(0))\n modifiedtime = mktime(timeTuple)\n utime((join(dest_folder,filename)), (accesstime,modifiedtime))\n except IOError:\n pass\n except:\n if os.path.exists(normpath((abspath(dest_folder)+'/'+dirname(filename)))):\n try:\n bytes = zip.read(filename)\n #print 'Unzipping file:', filename, 'with', len(bytes), 'bytes..'\n file((join(dest_folder,filename)), 'wb').write(zip.read(filename))\n accesstime = time.time()\n timeTuple=(int(zip.getinfo(filename).date_time[0]),\\\n int(zip.getinfo(filename).date_time[1]),\\\n int(zip.getinfo(filename).date_time[2]),\\\n int(zip.getinfo(filename).date_time[3]) ,\\\n int(zip.getinfo(filename).date_time[4]),\\\n int(zip.getinfo(filename).date_time[5]),\\\n int(0),int(0),int(0))\n modifiedtime = mktime(timeTuple)\n utime((join(dest_folder,filename)), (accesstime,modifiedtime))\n except IOError:\n pass\n else:\n os.makedirs(normpath((abspath(dest_folder)+'/'+dirname(filename))))\n zip.close",
"def unzip_file(path_to_zip_file, directory_to_extract_to):\n \n with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:\n zip_ref.extractall(directory_to_extract_to)\n\n return",
"def unzip(input_file, output_file):\n output_file = validator.validate_unzip(input_file, output_file)\n process = subprocess.Popen([PBWT_BIN, 'unzip', input_file, output_file],\n stdout=subprocess.PIPE)\n process_results(str(process.communicate()[0]))",
"def extract_file(self):\n# path_destination = os.path.join(\n# self.root, self.resources.replace(\".zip\", \"\"))\n# os.makedirs(path_destination, exist_ok=True)\n shutil.unpack_archive(os.path.join(\n self.root, self.resources), self.root)\n os.remove(os.path.join(self.root, self.resources))",
"def decompress_file(path, temp_dir='tmp'):\n if path.endswith('.gz'):\n logger.info('Decompressing {} to {}'.format(path, temp_dir))\n return decompress_gzip(\n path,\n os.path.join(temp_dir,\n os.path.splitext(os.path.basename(path))[0])\n )\n else:\n return path",
"def unzip_file(self, filename, location, flatten=True):\n if not os.path.exists(location):\n os.makedirs(location)\n zipfp = open(filename, 'rb')\n try:\n zip = zipfile.ZipFile(zipfp)\n leading = has_leading_dir(zip.namelist()) and flatten\n for name in zip.namelist():\n data = zip.read(name)\n fn = name\n if leading:\n fn = split_leading_dir(name)[1]\n fn = os.path.join(location, fn)\n dir = os.path.dirname(fn)\n if not os.path.exists(dir):\n os.makedirs(dir)\n if fn.endswith('/') or fn.endswith('\\\\'):\n # A directory\n if not os.path.exists(fn):\n os.makedirs(fn)\n else:\n fp = open(fn, 'wb')\n try:\n fp.write(data)\n finally:\n fp.close()\n finally:\n zipfp.close()",
"def unzip(file_loc, extract_loc=None):\n try:\n with zipfile.ZipFile(\n file_loc, \"r\"\n ) as file: # opening the zip file using 'zipfile.ZipFile' class\n print(\"Ok\")\n # ZipFile.infolist() returns a list containing all the members of an archive file\n print(file.infolist())\n\n # ZipFile.namelist() returns a list containing all the members with names of an archive file\n print(file.namelist())\n\n # ZipFile.getinfo(path = filepath) returns the information about a member of Zip file.\n # It raises a KeyError if it doesn't contain the mentioned file\n print(file.getinfo(file.namelist()[-1]))\n\n # If extraction directory not given, extracted to 'data/processed/file_name'\n if extract_loc == None:\n base = os.path.dirname(file_loc)\n folder_name = os.path.basename(base)\n extract_loc = \"data/processed/\" + folder_name\n\n # ZipFile.extractall(path = filepath, pwd = password) extracts all\n # the files to current directory\n file.extractall(path=extract_loc)\n # after executing check the directory to see extracted files\n\n except zipfile.BadZipFile: # if the zip file has any errors then it prints the\n # error message which you wrote under the 'except' block\n print(\"Error: Zip file is corrupted\")\n\n except zipfile.LargeZipFile:\n print(\"Error: File size if too large\") # if the file size is too large to\n # open it prints the error you have written\n except FileNotFoundError:\n print(\"Error: File not found\")",
"def unzip(zip_path, cleanup=False):\n zfile = zipfile.ZipFile(zip_path, 'r')\n zfile.extractall(os.path.dirname(zip_path))\n zfile.close()\n if cleanup:\n os.remove(zip_path)"
] | [
"0.7139041",
"0.70639694",
"0.70058393",
"0.6817799",
"0.6761051",
"0.6689558",
"0.6671697",
"0.6654961",
"0.65346366",
"0.64469576",
"0.64213866",
"0.64149886",
"0.6412602",
"0.6362819",
"0.62293935",
"0.6175127",
"0.61503965",
"0.61491144",
"0.61399084",
"0.6136558",
"0.6126004",
"0.6092503",
"0.6077451",
"0.60660017",
"0.60542715",
"0.6024605",
"0.6011563",
"0.6002176",
"0.5948664",
"0.594542"
] | 0.7314113 | 0 |
this function tests if a file is a zip file (extension zip), if it is the case, it unzips it into another file and return the new name, if the unzipped file already exists, the file is not unzipped a second time filename any filename (.zip or not), if txt, it has no effect path_unzip if None, unzip it where it stands, otherwise, place it into path outfile if None, the function will assign a filename unless this parameter is specified the unzipped file or filename if the format was not zip | def _check_zip_file (filename, path_unzip, outfile) :
assert path_unzip is not None
file,ext = os.path.splitext (filename)
ext = ext.lower ()
if ext == ".gz" :
import gzip
if outfile is None :
dest = filename.split ("!")
dest = dest [ len(dest)-1 ]
ext = os.path.splitext (dest) [1]
dest = dest.replace (ext, ".txt")
path = os.path.split (filename)
path = "/".join (path [:len (path)-1])
dest = path + "/" + dest
else :
dest = outfile
if not os.path.exists (dest) :
file = gzip.GzipFile (filename, "r")
if outfile is None :
dest = os.path.split (dest) [1]
dest = os.path.join (path_unzip, dest)
if os.path.exists (dest) :
st1 = datetime.datetime.utcfromtimestamp (os.stat (filename).st_mtime)
st2 = datetime.datetime.utcfromtimestamp (os.stat (dest).st_mtime)
if st2 > st1 :
fLOG ("ungzipping file (already done)", dest)
return dest
fLOG ("ungzipping file", dest)
f = open (dest, "w")
data = file.read (2**27)
size = 0
while len (data) > 0 :
size += len (data)
fLOG ("ungzipping ", size, "bytes")
if isinstance (data, bytes) : f.write (bytes.decode (data))
else : f.write (data)
data = file.read (2**27)
f.close ()
file.close ()
return dest
if ext == ".zip" :
import zipfile
try :
file = zipfile.ZipFile (filename, "r")
except Exception as e :
fLOG ("problem with ", filename)
raise e
if len (file.infolist()) != 1:
if outfile is not None :
raise PQHException ("the archive contains %d files and not one as you expected by filling outfile" % len (file.infolist()))
fLOG ("unzip file (multiple) ", filename)
#message = "\n".join ([ fi.filename for fi in file.infolist() ] )
#raise Exception.YstException("ColumnInfoSet.load_from_file: file %s contains no file or more than one file\n" + message)
folder = os.path.split (filename) [0]
todo = 0
_zip7_path = r"c:\Program Files\7-Zip"
zip7 = os.path.exists (_zip7_path)
wait = [ ]
for info in file.infolist () :
fileinside = info.filename
dest = os.path.join (folder, fileinside)
if not os.path.exists (dest) :
fol = os.path.split (dest) [0]
if not os.path.exists (fol) : os.makedirs (fol)
if os.path.exists (dest) :
st1 = datetime.datetime.utcfromtimestamp (os.stat (filename).st_mtime)
st2 = datetime.datetime.utcfromtimestamp (os.stat (dest).st_mtime)
if st2 > st1 :
continue
if not sys.platform.startswith("win") or not zip7 :
data = file.read (fileinside)
dest = os.path.split (dest) [1]
dest = os.path.join (path_unzip, dest)
fLOG ("unzipping file", dest)
wait.append(dest)
f = open (dest, "w")
if isinstance (data, bytes) :
f.write (str (data))
else :
f.write (data)
f.close ()
else :
todo += 1
if todo > 0 and zip7 :
dest = os.path.realpath (path_unzip)
cmd = '"' + _zip7_path + '\\7z.exe" e -y -o"%s" "%s"' % (dest, os.path.realpath (filename))
out,err = run_cmd (cmd, wait = True)
if len (err) > 0 : raise PQHException ("command {0} failed\n{1}".format(cmd,err))
if "Error" in out : raise PQHException ("command {0} failed\n{1}".format(cmd,out))
else :
dest = path_unzip
file.close ()
ch = False
while not ch :
ch = True
for a in wait :
if not os.path.exists(a) :
ch = False
break
time.sleep(0.5)
return dest
else :
for info in file.infolist () :
fileinside = info.filename
path = os.path.split (filename)
dest = outfile if outfile is not None else path [0] + "/" + fileinside
if not os.path.exists (dest) :
data = file.read (fileinside)
if outfile is None :
dest = os.path.split (dest) [1]
dest = os.path.join (path_unzip, dest)
if os.path.exists (dest) :
st1 = datetime.datetime.utcfromtimestamp (os.stat (filename).st_mtime)
st2 = datetime.datetime.utcfromtimestamp (os.stat (dest).st_mtime)
if st2 > st1 :
fLOG("unzipping one file (already done)", dest)
return dest
fLOG("unzipping one file", dest)
f = open (dest, "w")
if isinstance (data, bytes) :
f.write (bytes.decode (data))
else :
f.write (data)
f.close ()
file.close ()
return dest
return filename | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def unzip_file(zip_file: str) -> None:\n destination = tempfile.mkdtemp(prefix='gaelo_pross_unzip_')\n with ZipFile(zip_file) as my_zip:\n for member in my_zip.namelist():\n filename = os.path.basename(member)\n # skip directories\n if not filename:\n continue\n # copy file (taken from zipfile's extract)\n source = my_zip.open(member)\n target = open(os.path.join(destination, filename), \"wb\")\n with source, target:\n shutil.copyfileobj(source, target)\n # return destination",
"def _unzip_file(zip_file_path: str, unzip_dir: str = \"\") -> None:\n if not unzip_dir:\n unzip_dir = os.path.dirname(zip_file_path)\n op_desc = f\"Extracting: {os.path.basename(zip_file_path)}\"\n try:\n with ZipFile(file=zip_file_path) as zip_file:\n for member_name in tqdm(zip_file.namelist(), desc=op_desc):\n file_name = os.path.basename(member_name)\n if not file_name:\n continue\n target_path = os.path.join(unzip_dir, file_name)\n target_path = open(target_path, \"wb\")\n source_file = zip_file.open(member_name)\n with source_file, target_path:\n shutil.copyfileobj(source_file, target_path)\n os.remove(zip_file_path)\n except Exception as zip_error:\n zip_file_str = os.path.basename(zip_file_path)\n zip_file_str = os.path.splitext(zip_file_str)[0]\n for file_name in os.listdir(unzip_dir):\n if zip_file_str in file_name:\n os.remove(os.path.join(unzip_dir, file_name))\n raise zip_error",
"def unzip_single_file(zip_file_name, output_file_name):\n if not os.path.isfile(output_file_name):\n with open(output_file_name, 'wb') as out_file:\n with zipfile.ZipFile(zip_file_name) as zipped:\n for info in zipped.infolist():\n if output_file_name in info.filename:\n with zipped.open(info) as requested_file:\n out_file.write(requested_file.read())\n return",
"def unzip(zip_file, dest_folder):\n zip = zipfile.ZipFile(zip_file, 'r')\n if os.path.exists(dest_folder):\n pass\n else:\n os.makedirs(dest_folder)\n if dest_folder[-1] != '/':\n dest_folder += '/'\n for filename in zip.namelist():\n # --- Folder?\n if filename.endswith('/'):\n if os.path.exists(join(abspath(dest_folder),filename)):\n pass\n else:\n os.makedirs(join(abspath(dest_folder),filename))\n else:\n try:\n os.makedirs(normpath((abspath(dest_folder)+'/'+dirname(filename))))\n try:\n bytes = zip.read(filename)\n #print 'Unzipping file:', filename, 'with', len(bytes), 'bytes..'\n file((join(dest_folder,filename)), 'wb').write(zip.read(filename))\n accesstime = time.time()\n timeTuple=(int(zip.getinfo(filename).date_time[0]),\\\n int(zip.getinfo(filename).date_time[1]),\\\n int(zip.getinfo(filename).date_time[2]),\\\n int(zip.getinfo(filename).date_time[3]) ,\\\n int(zip.getinfo(filename).date_time[4]),\\\n int(zip.getinfo(filename).date_time[5]),\\\n int(0),int(0),int(0))\n modifiedtime = mktime(timeTuple)\n utime((join(dest_folder,filename)), (accesstime,modifiedtime))\n except IOError:\n pass\n except:\n if os.path.exists(normpath((abspath(dest_folder)+'/'+dirname(filename)))):\n try:\n bytes = zip.read(filename)\n #print 'Unzipping file:', filename, 'with', len(bytes), 'bytes..'\n file((join(dest_folder,filename)), 'wb').write(zip.read(filename))\n accesstime = time.time()\n timeTuple=(int(zip.getinfo(filename).date_time[0]),\\\n int(zip.getinfo(filename).date_time[1]),\\\n int(zip.getinfo(filename).date_time[2]),\\\n int(zip.getinfo(filename).date_time[3]) ,\\\n int(zip.getinfo(filename).date_time[4]),\\\n int(zip.getinfo(filename).date_time[5]),\\\n int(0),int(0),int(0))\n modifiedtime = mktime(timeTuple)\n utime((join(dest_folder,filename)), (accesstime,modifiedtime))\n except IOError:\n pass\n else:\n os.makedirs(normpath((abspath(dest_folder)+'/'+dirname(filename))))\n zip.close",
"def _get_file_txt (zipname) :\n file = os.path.split (zipname) [1]\n file = file.replace (\".zip\", \".txt\")\n file = file.replace (\".ZIP\", \".txt\")\n file = file.replace (\".gz\", \".txt\")\n file = file.replace (\".GZ\", \".txt\")\n return file",
"def _unzip(filename, branch=None):\n try:\n file = zipfile.ZipFile(filename)\n basename = os.path.dirname(filename)\n basename = basename.replace(\".zip\", \"\")\n file.extractall(path=basename)\n return basename, filename\n except Exception as e:\n six.print_(e)",
"def unzip_file(data_zip, path_unzip):\r\n with zipfile.ZipFile(data_zip, \"r\") as zip_temp:\r\n zip_temp.extractall(path_unzip)",
"def unzip(file_path: str) -> str:\n destination_directory, zip_file = os.path.split(file_path)\n extracted_path, _ = os.path.splitext(file_path)\n\n if file_path.endswith('tar.gz') or file_path.endswith('tgz'):\n handle_tar(file_path, 'r:gz', extracted_path, destination_directory)\n elif file_path.endswith('tar'):\n handle_tar(file_path, 'r:', extracted_path, destination_directory)\n return extracted_path",
"def unzip_file(zipfile_path, target_dir, touchfile_path):\r\n with zipfile.ZipFile(zipfile_path, 'r') as zip_ref:\r\n zip_ref.extractall(target_dir)\r\n\r\n with open(touchfile_path, 'w') as touchfile:\r\n touchfile.write(f'unzipped {zipfile_path}')",
"def unzip(zip_path, output_file, data_folder):\n\n print('Unzipping file: {}'.format(zip_path))\n pyunpack.Archive(zip_path).extractall(data_folder)\n\n # Checks if unzip was successful\n if not os.path.exists(output_file):\n raise ValueError(\n 'Error in unzipping process! {} not found.'.format(output_file))",
"def test_unzip_file(self):\n\n # Path to the compressed file\n zipped_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.zip\")\n # Test for correct data\n # NOTE : For this test case to pass the source xml zipped file\n # should be present in the download path\n self.assertTrue(unzip_file(zipped_file, self.xmlfilepath))\n\n # Test for wrong target path\n self.assertFalse(unzip_file(zipped_file, r\"D:\\kqcA CK j \"))\n\n # Test for incorrect compressed file\n self.assertFalse(unzip_file(\"D:\\somerandomfile\", self.xmlfilepath))",
"def replace_extract(self, file_path):\n tmp_file = shutil.copy2(self.path, \"tmpzip\")\n with ZipFile(tmp_file) as src, ZipFile(self.path, \"w\") as dst:\n for src_info in src.infolist():\n _, src_tail = path.split(src_info.filename)\n _, file_tail = path.split(file_path)\n if src_tail == file_tail:\n dst.write(file_path, src_info.filename)\n else:\n with src.open(src_info) as src_file:\n dst.writestr(src_info, src_file.read())\n\n remove(tmp_file)",
"def extract_and_clean(zipper, zip_path, filename):\n zipper.extract(zip_path)\n if \"/\" in zip_path :\n os.rename(zip_path, filename)\n shutil.rmtree(zip_path.split('/')[0])",
"def unzip_oxygen_files(zip_file):\n name_main_content = None\n name_left_menu = None\n list_img_files_to_save = list()\n\n files_unzipped = ZipFile(zip_file)\n for file_unzipped_name in files_unzipped.namelist():\n if not file_unzipped_name.startswith('__MACOSX'):\n if file_unzipped_name.endswith(\".jpeg\"):\n list_img_files_to_save.append(file_unzipped_name)\n elif file_unzipped_name.endswith(\".indexList.html\"):\n name_left_menu = file_unzipped_name\n elif file_unzipped_name.endswith(\"_xsd.html\"):\n name_main_content = file_unzipped_name\n\n return files_unzipped, name_left_menu, name_main_content, list_img_files_to_save",
"def main(self, line):\n # filename with optional destination\n args = bash(line)\n if args is None:\n return\n elif not (1 <= len(args) <= 2):\n print \"unzip: Usage: unzip file [destination]\"\n else:\n filename = os.path.abspath(args[0])\n if not os.path.isfile(filename):\n print \"unzip: %s: No such file\" % args[0]\n else:\n # PK magic marker check\n f = open(filename)\n try:\n pk_check = f.read(2)\n except Exception:\n pk_check = ''\n finally:\n f.close()\n if pk_check != 'PK':\n print \"unzip: %s: does not appear to be a zip file\" % args[0]\n else:\n if (os.path.basename(filename).lower().endswith('.zip')):\n altpath = os.path.splitext(os.path.basename(filename))[0]\n else:\n altpath = os.path.basename(filename) + '_unzipped'\n altpath = os.path.join(os.path.dirname(filename), altpath)\n location = (args[1:2] or [altpath])[0]\n if (os.path.exists(location)) and not (os.path.isdir(location)):\n print \"unzip: %s: destination is not a directory\" % location\n return\n elif not os.path.exists(location):\n os.makedirs(location)\n zipfp = open(filename, 'rb')\n try:\n zipf = zipfile.ZipFile(zipfp)\n # check for a leading directory common to all files and remove it\n dirnames = [os.path.join(os.path.dirname(x), '') for x in zipf.namelist()]\n common_dir = os.path.commonprefix(dirnames or ['/'])\n # Check to make sure there aren't 2 or more sub directories with the same prefix\n if not common_dir.endswith('/'):\n common_dir = os.path.join(os.path.dirname(common_dir), '')\n for name in zipf.namelist():\n data = zipf.read(name)\n fn = name\n if common_dir:\n if fn.startswith(common_dir):\n fn = fn.split(common_dir, 1)[-1]\n elif fn.startswith('/' + common_dir):\n fn = fn.split('/' + common_dir, 1)[-1]\n fn = fn.lstrip('/')\n fn = os.path.join(location, fn)\n dirf = os.path.dirname(fn)\n if not os.path.exists(dirf):\n os.makedirs(dirf)\n if fn.endswith('/'):\n # A directory\n if not os.path.exists(fn):\n os.makedirs(fn)\n else:\n fp = open(fn, 'wb')\n try:\n fp.write(data)\n finally:\n fp.close()\n except Exception:\n zipfp.close()\n print \"unzip: %s: zip file is corrupt\" % args[0]\n return\n finally:\n zipfp.close()",
"def unzip(zipped_file):\n with gzip.open(zipped_file, 'rt', encoding='ISO-8859-1') as file:\n file = file.read()\n return file",
"def unzip_file(path_to_zip_file: str, dir_to_extract_to: str) -> str:\n with zipfile.ZipFile(path_to_zip_file, 'r') as zip_ref:\n zip_ref.extractall(dir_to_extract_to)\n return f'{dir_to_extract_to}/{zip_ref.namelist()[0]}'",
"def unzip (file, path_unzip = None, outfile = None) :\n if path_unzip is None : path_unzip = GetPath ()\n fLOG (\"unzip file\", file)\n file = _check_source (file, path_unzip = path_unzip, outfile = outfile)\n \n nb = 0\n while not os.path.exists (file) and nb < 10 :\n time.sleep(0.5)\n nb += 1\n \n if not os.path.exists (file) :\n raise FileNotFoundError(file)\n \n return file",
"def unzip(local_zip: str, extract_dir: str, pwd: str = None):\n def get_zipinfo_datetime(zipmember: zipfile.ZipInfo) -> datetime:\n zt = zipmember.date_time # tuple: year, month, day, hour, min, sec\n # ZIP uses localtime\n return datetime(zt[0], zt[1], zt[2], zt[3], zt[4], zt[5], tzinfo=tz.tzlocal())\n\n def has_file_changed(zipmember: zipfile.ZipInfo, dst_path):\n st: os.stat_result = None\n try:\n st = os.stat(dst_path, follow_symlinks=False)\n if st.st_size != zipmember.file_size:\n return True\n dst_mtime: datetime = datetime.fromtimestamp(st.st_mtime, tz=tz.tzlocal())\n src_mtime = get_zipinfo_datetime(zipmember)\n if dst_mtime != src_mtime:\n return True\n except (OSError, ValueError):\n return True # does not exist\n return False\n\n def make_symlink(zipmember: zipfile.ZipInfo, symlink_location, is_directory):\n target = zip.read(zipmember, pwd=pwd).decode('utf-8')\n if os.path.lexists(symlink_location):\n os.remove(symlink_location)\n os.symlink(target, symlink_location, target_is_directory=is_directory)\n\n unzipped_files: List[Tuple[zipfile.ZipFile, str]] = []\n\n with zipfile.ZipFile(local_zip, \"r\") as zip:\n for zipmember in zip.infolist():\n dst_path = os.path.normpath(os.path.join(extract_dir, zipmember.filename))\n mode = zipmember.external_attr >> 16\n is_symlink = stat.S_ISLNK(mode)\n #what = 'DIR' if zipmember.is_dir() else 'FILE'\n #what = what + ' LINK' if is_symlink else what\n #print(f'{what} {zipmember.filename} S_IMODE={stat.S_IMODE(mode):0o} S_IFMT={stat.S_IFMT(mode):0o}')\n if zipmember.is_dir(): # make dirs if needed\n if is_symlink:\n make_symlink(zipmember, dst_path, is_directory=True)\n else:\n os.makedirs(dst_path, exist_ok=True)\n elif has_file_changed(zipmember, dst_path): # only extract if file appears to be modified\n unzipped_files.append((zipmember, dst_path))\n if is_symlink:\n make_symlink(zipmember, dst_path, is_directory=False)\n else:\n with zip.open(zipmember, pwd=pwd) as src, open(dst_path, \"wb\") as dst:\n shutil.copyfileobj(src, dst)\n for zipmember, dst_path in unzipped_files:\n # set the correct permissions for files and folders\n perm = stat.S_IMODE(zipmember.external_attr >> 16)\n os.chmod(dst_path, perm)\n # always set the modification date from the zipmember timestamp,\n # this way we can avoid unnecessarily modifying files and causing full rebuilds\n time = get_zipinfo_datetime(zipmember)\n #print(f' | {dst_path} {time}')\n mtime = time.timestamp()\n if System.windows:\n os.utime(dst_path, times=(mtime, mtime))\n else:\n os.utime(dst_path, times=(mtime, mtime), follow_symlinks=False)\n\n return len(unzipped_files)",
"def unzip_first(input_filename: str, extract_dir: str) -> str:\n with zipfile.ZipFile(input_filename) as zip_file:\n zip_file_list = zip_file.infolist()\n zip_index = 0\n while zip_index < len(zip_file_list) and zip_file_list[zip_index].is_dir():\n zip_index += 1\n if zip_index == len(zip_file_list):\n res = ''\n else:\n file_to_extract = zip_file_list[zip_index]\n zip_file.extract(file_to_extract, extract_dir)\n res = os.path.join(extract_dir, file_to_extract.filename)\n return res",
"def extract_file(self):\n# path_destination = os.path.join(\n# self.root, self.resources.replace(\".zip\", \"\"))\n# os.makedirs(path_destination, exist_ok=True)\n shutil.unpack_archive(os.path.join(\n self.root, self.resources), self.root)\n os.remove(os.path.join(self.root, self.resources))",
"def unzip_data(folder, zip_file_basename):\n # path\n filename = os.path.join(folder, zip_file_basename + '.zip')\n new_folder = os.path.join(os.path.dirname(__file__), 'data')\n if not os.path.isdir(new_folder):\n os.mkdir(new_folder)\n # unzip\n if os.path.isfile(os.path.join(new_folder, zip_file_basename + '.csv')):\n return 0\n else:\n zip_file = zipfile.ZipFile(filename, 'r')\n zip_file.extractall(new_folder)\n zip_file.close()\n basename = os.path.join(new_folder, zip_file_basename)\n os.rename(basename + '.txt', basename + '.csv')\n return 1",
"def Extract_zip_file (path_to_zip,dir_to_save_into):\n with zipfile.ZipFile(path_to_zip) as zf:\n \n for member in tqdm(zf.namelist(), desc='Extracting'):\n try:\n if ('annotations' in member) and (member.endswith('.json')): \n zf.extract(member, dir_to_save_into)\n shutil.move(os.path.join(dir_to_save_into,member),dir_to_save_into)\n if ('train' in member):\n zf.extract(member, dir_to_save_into)\n if ('test' in member):\n zf.extract(member, dir_to_save_into)\n if ('val' in member):\n zf.extract(member, dir_to_save_into)\n except zipfile.error as e:\n pass\n\n #delete zip\n os.remove(path_to_zip)\n if(os.path.isdir(os.path.join(dir_to_save_into,'annotations'))):\n # remove the tmp annotations directory\n shutil.rmtree(os.path.join(dir_to_save_into,'annotations'))",
"def unzip_file(zipfilename, unziptodir):\n\tunziptodir = unziptodir.replace(cfg.SEP_DCOMM, cfg.SEP_COMM)\n\tmake_dirs(unziptodir)\n\tzfobj = zipfile.ZipFile(zipfilename)\n\tfor name in zfobj.namelist():\n\t\tname = name.replace(cfg.SEP_DCOMM,cfg.SEP_COMM)\n\t\tif name.endswith(cfg.SEP_COMM):\n\t\t\tos.makedirs(os.path.join(unziptodir, name))\n\t\telse:\n\t\t\text_filename = os.path.join(unziptodir, name)\n\t\t\text_filename = ext_filename.replace(cfg.SEP_DCOMM,cfg.SEP_COMM)\n\t\t\text_dir= os.path.dirname(ext_filename)\n\t\t\tmake_dirs(ext_dir)\n\t\t\toutfile = open(ext_filename, 'wb')\n\t\t\toutfile.write(zfobj.read(name))\n\t\t\toutfile.close()",
"def unzip(file_loc, extract_loc=None):\n try:\n with zipfile.ZipFile(\n file_loc, \"r\"\n ) as file: # opening the zip file using 'zipfile.ZipFile' class\n print(\"Ok\")\n # ZipFile.infolist() returns a list containing all the members of an archive file\n print(file.infolist())\n\n # ZipFile.namelist() returns a list containing all the members with names of an archive file\n print(file.namelist())\n\n # ZipFile.getinfo(path = filepath) returns the information about a member of Zip file.\n # It raises a KeyError if it doesn't contain the mentioned file\n print(file.getinfo(file.namelist()[-1]))\n\n # If extraction directory not given, extracted to 'data/processed/file_name'\n if extract_loc == None:\n base = os.path.dirname(file_loc)\n folder_name = os.path.basename(base)\n extract_loc = \"data/processed/\" + folder_name\n\n # ZipFile.extractall(path = filepath, pwd = password) extracts all\n # the files to current directory\n file.extractall(path=extract_loc)\n # after executing check the directory to see extracted files\n\n except zipfile.BadZipFile: # if the zip file has any errors then it prints the\n # error message which you wrote under the 'except' block\n print(\"Error: Zip file is corrupted\")\n\n except zipfile.LargeZipFile:\n print(\"Error: File size if too large\") # if the file size is too large to\n # open it prints the error you have written\n except FileNotFoundError:\n print(\"Error: File not found\")",
"def unzip_to_temp_dir(zip_file_name):\n if not zip_file_name or not os.path.exists(zip_file_name):\n return None\n\n zf = zipfile.ZipFile(zip_file_name)\n\n if zf.testzip() is not None:\n return None\n\n # Unzip the files into a temporary directory\n LOGGER.info(\"Extracting zipped file: %s\" % zip_file_name)\n tempdir = tempfile.mkdtemp()\n\n try:\n # Create directories that don't exist\n for zip_name in zf.namelist():\n # We have no knowledge on the os where the zipped file was\n # created, so we restrict to zip files with paths without\n # charactor \"\\\" and \"/\".\n name = (zip_name.replace(\"\\\\\", os.path.sep).\n replace(\"/\", os.path.sep))\n dest = os.path.join(tempdir, name)\n if (name.endswith(os.path.sep) and not os.path.exists(dest)):\n os.mkdir(dest)\n LOGGER.debug(\"Directory %s created.\" % dest)\n\n # Copy files\n for zip_name in zf.namelist():\n # We have no knowledge on the os where the zipped file was\n # created, so we restrict to zip files with paths without\n # charactor \"\\\" and \"/\".\n name = (zip_name.replace(\"\\\\\", os.path.sep).\n replace(\"/\", os.path.sep))\n dest = os.path.join(tempdir, name)\n if not (name.endswith(os.path.sep)):\n LOGGER.debug(\"Copying file %s......\" % dest)\n outfile = open(dest, 'wb')\n outfile.write(zf.read(zip_name))\n outfile.close()\n LOGGER.debug(\"File %s copied.\" % dest)\n\n LOGGER.info(\"Unzipped file can be found at %s\" % tempdir)\n return tempdir\n\n except IOError as err:\n LOGGER.error(\"Error in extracting webdriver.xpi: %s\" % err)\n return None",
"def _do_unzip(zipped_file, output_directory):\n z = zipfile.ZipFile(zipped_file)\n for path in z.namelist():\n relative_path = os.path.join(output_directory, path)\n dirname, dummy = os.path.split(relative_path)\n try:\n if relative_path.endswith(os.sep) and not os.path.exists(dirname):\n os.makedirs(relative_path)\n elif not os.path.exists(relative_path):\n dirname = os.path.join(output_directory, os.path.dirname(path))\n if os.path.dirname(path) and not os.path.exists(dirname):\n os.makedirs(dirname)\n fd = open(relative_path, \"w\")\n fd.write(z.read(path))\n fd.close()\n except IOError, e:\n raise e\n return output_directory",
"def ExtractZip(zip_path, dest_dir):\n zip_path = GetWindowsPathWithUNCPrefix(zip_path)\n dest_dir = GetWindowsPathWithUNCPrefix(dest_dir)\n with zipfile.ZipFile(zip_path) as zf:\n for info in zf.infolist():\n zf.extract(info, dest_dir)\n # UNC-prefixed paths must be absolute/normalized. See\n # https://docs.microsoft.com/en-us/windows/desktop/fileio/naming-a-file#maximum-path-length-limitation\n file_path = os.path.abspath(os.path.join(dest_dir, info.filename))\n # The Unix st_mode bits (see \"man 7 inode\") are stored in the upper 16\n # bits of external_attr. Of those, we set the lower 12 bits, which are the\n # file mode bits (since the file type bits can't be set by chmod anyway).\n attrs = info.external_attr >> 16\n if attrs != 0: # Rumor has it these can be 0 for zips created on Windows.\n os.chmod(file_path, attrs & 0o7777)",
"def unpack_first(input_filename: str, extract_dir: str) -> str:\n if not is_archive_file(input_filename):\n raise AttributeError(\"Input_filename must be an archive (ex: .tar.gz, .zip)\")\n if zipfile.is_zipfile(input_filename):\n return unzip_first(input_filename, extract_dir)\n else:\n return untar_first(input_filename, extract_dir)",
"def _extract_if_zip(tmpdir: str, config: CSCConfig) -> str:\n if os.path.isdir(config.reads):\n return config.reads\n else:\n extracted_dir = os.path.join(tmpdir, f\"{config.input_format}s\")\n os.makedirs(extracted_dir)\n with zipfile.ZipFile(config.reads) as zip_file:\n files = [finfo for finfo in zip_file.infolist() if finfo.filename.endswith(f\".{config.input_format}\")]\n for extract_file in files:\n zip_file.extract(extract_file, extracted_dir)\n return extracted_dir"
] | [
"0.670198",
"0.6678802",
"0.65714586",
"0.64862216",
"0.6424468",
"0.63759226",
"0.63185555",
"0.6291177",
"0.6253069",
"0.61296326",
"0.60893184",
"0.5970322",
"0.5967116",
"0.59316283",
"0.59183365",
"0.59079635",
"0.5901411",
"0.5886114",
"0.5879796",
"0.5841547",
"0.5818173",
"0.58044034",
"0.58002716",
"0.57348007",
"0.5725852",
"0.5715808",
"0.57084906",
"0.5698042",
"0.5646078",
"0.5631834"
] | 0.7003948 | 0 |
checks if the first file (opened url) is more recent of the second file (path) f1 opened url path path name boolean | def _first_more_recent (f1, path) :
import datetime
import re
import time
s = str (f1.info ())
da = re.compile ("Last[-]Modified: (.+) GMT").search (s)
if da is None :
return True
da = da.groups () [0]
gr = re.compile ("[\w, ]* ([ \d]{2}) ([\w]{3}) ([\d]{4}) ([\d]{2}):([\d]{2}):([\d]{2})").search (da)
if gr == None : return True
gr = gr.groups ()
da = datetime.datetime ( int (gr [2]), sys.hal_log_values ["month_date"] [gr [1].lower ()], int (gr [0]),
int (gr [3]), int (gr [4]), int (gr [5]) )
p = time.ctime (os.path.getmtime (path))
gr = re.compile ("[\w, ]* ([\w]{3}) ([ \d]{2}) ([\d]{2}):([\d]{2}):([\d]{2}) ([\d]{4})").search (p)
if gr is None :
return True
gr = gr.groups ()
da = datetime.datetime ( int (gr [5]), sys.hal_log_values ["month_date"] [gr [0].lower ()], int (gr [1]),
int (gr [2]), int (gr [3]), int (gr [4]) )
file = da
return da > file | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_newer(filename1, filename2):\n return os.stat(filename1).st_mtime > os.stat(filename2).st_mtime",
"def IsFileNewer(name1, name2):\n\n\tif not os.path.exists(name1):\n\t\treturn 0\n\n\tif not os.path.exists(name2):\n\t\treturn 1\n\n\tmod_time1 = os.stat(name1)[stat.ST_MTIME]\n\tmod_time2 = os.stat(name2)[stat.ST_MTIME]\n\treturn (mod_time1 > mod_time2)",
"def cmp(f1, f2):\n with open(f1) as f1, open(f2) as f2:\n return f1.read() == f2.read()",
"def _compare_file(path1, path2):\n\n try:\n return _open_file(path1) == _open_file(path2)\n except OSError:\n return False",
"def _newer(a: str, b: str) -> bool:\n if not os.path.exists(a):\n return False\n if not os.path.exists(b):\n return True\n return os.path.getmtime(a) >= os.path.getmtime(b)",
"def fileCompare(a, b):\n if a[\"file_run\"] > b[\"file_run\"]:\n return 1\n elif a[\"file_run\"] == b[\"file_run\"]:\n if a[\"file_lumi\"] > b[\"file_lumi\"]:\n return 1\n elif a[\"file_lumi\"] == b[\"file_lumi\"]:\n if a[\"file_first_event\"] > b[\"file_first_event\"]:\n return 1\n if a[\"file_first_event\"] == b[\"file_first_event\"]:\n return 0\n\n return -1",
"def compare_files(fp1, fp2):\n\n line1 = fp1.readline()\n line2 = fp2.readline()\n\n while line1 and line2:\n if line1.startswith('#') and line2.startswith('#'):\n pass\n elif not line1 == line2:\n return False\n \n line1 = fp1.readline()\n line2 = fp2.readline()\n\n if line1 or line2:\n return False\n\n return True",
"def file_newer(check_file: str, base_file: str) -> bool:\n if os.path.isfile(check_file):\n cf_modtime_ts = os.path.getmtime(check_file)\n bf_modtime_ts = os.path.getmtime(base_file)\n else:\n return False\n\n return cf_modtime_ts > bf_modtime_ts",
"def is_more_rencent(filename: str, comparison_filename: str):\n return os.path.getmtime(filename) > os.path.getmtime(comparison_filename)",
"def is_newer(a, b):\n return os.stat(a).st_mtime >= os.stat(b).st_mtime",
"def _file_newer(cls, path, check_mtime):\n path_mtime = os.path.getmtime(path)\n return path_mtime > check_mtime",
"def existing_and_newer(fn0, fn):\n\n if not os.path.isfile(fn0):\n error(\"Dependency '{}' does not exist\".format(fn0))\n\n if not os.path.isfile(fn):\n return False\n\n if os.path.getmtime(fn0) <= os.path.getmtime(fn):\n return True\n else:\n return False",
"def check_duplicate(fp1, fp2):\n try:\n subprocess.check_output(['diff', fp1, fp2])\n return True\n except subprocess.CalledProcessError:\n return False",
"def newer (source, target):\r\n\r\n if not os.path.exists (target):\r\n return 1\r\n\r\n from stat import ST_MTIME\r\n mtime1 = os.stat(source)[ST_MTIME]\r\n mtime2 = os.stat(target)[ST_MTIME]\r\n\r\n return mtime1 > mtime2",
"def compare(file1, file2):\n\tfrom os.path import exists\n\tresult = False\n\t\n\tfile1 = adaptPath(file1)\n\tfile2 = adaptPath(file2)\n\t\n\t# If two files existing\n\tif exists(file1) and exists(file2):\n\t\t# If the date and size equal\n\t\tif getFileSize(file1) == getFileSize(file2):\n\t\t\ttry:\n\t\t\t\t# Read the content of first file\n\t\t\t\tcontent1 = open(file1, \"rb\").read()\n\t\t\t\ttry:\n\t\t\t\t\t# Read the content of second file\n\t\t\t\t\tcontent2 = open(file2, \"rb\").read()\n\t\t\t\t\t# If content differs\n\t\t\t\t\tif content1 == content2:\n\t\t\t\t\t\tresult = True\n\t\t\t\texcept IOError:\n\t\t\t\t\tpass\n\t\t\texcept IOError:\n\t\t\t\tpass\n\treturn result",
"def check(self, name, entry):\n\n f = file(name, self.mode)\n fs = fstat(f.fileno())\n ts1 = fs[-2]\n try:\n ts2 = entry._timestamp\n except AttributeError:\n ts2 = ts1-1\n\n if ts2<ts1:\n entry._timestamp=ts1\n return f\n else:\n return None",
"def exists_and_newer(targetfile, topicfile):\n try:\n if getmtime(targetfile) >= getmtime(topicfile):\n return True\n else:\n return False\n except IOError:\n return False",
"def _compare_files(self, first_file, second_file):\n\n self.log.info('-' * 80)\n self.log.info('Compare files')\n\n code, out = cmd_exec(['cmp', str(first_file), str(second_file)], shell=False, log=self.log)\n if code:\n self.log.warning('md5 checksum IS NOT SAME with ffmpeg sw decode')\n self.log.warning(out)\n return False\n\n self.log.info('md5 checksum IS SAME with ffmpeg sw decode')\n return True",
"def date_older_than_file_date(date_and_time: str, file: str) -> bool:\n # compare_date: 0 is monthday, 1 is monthname, 2 is year, 3 is time, 4 is GMT\n if file[1:] == \"index.html\":\n compare_date = HttpServer.INDEX_DATE_LAST_MOD.split()\n else:\n compare_date = HttpServer.SEA_DATE_LAST_MOD.split()\n\n # 0 is weekday, 1 is monthday, 2 is monthname, 3 is year, 4 is time, 5 is GMT\n split_date_and_time = date_and_time.split()\n\n if split_date_and_time[3] == compare_date[2]:\n if HttpServer.MONTHS.get(split_date_and_time[2]) == HttpServer.MONTHS.get(compare_date[1]):\n if split_date_and_time[1] == compare_date[0]:\n # 0 is hours, 1 is minutes, 2 is seconds\n split_time = split_date_and_time[4].split(\":\")\n split_compare_time = compare_date[3].split(\":\")\n\n if split_time[0] == split_compare_time[0]:\n if split_time[1] == split_compare_time[1]:\n if split_time[2] == split_compare_time[2]:\n return True\n elif split_time[2] < split_compare_time[2]:\n return True\n else:\n return False\n elif split_time[1] < split_compare_time[1]:\n return True\n else:\n return False\n elif split_time[0] < split_compare_time[0]:\n return True\n else:\n return False\n elif split_date_and_time[1] < compare_date[0]:\n return True\n else:\n return False\n elif HttpServer.MONTHS.get(split_date_and_time[2]) < HttpServer.MONTHS.get(compare_date[1]):\n return True\n else:\n return False\n elif split_date_and_time[3] < compare_date[2]:\n return True\n else:\n return False",
"def newer(source, target):\n if not os.path.exists(source):\n raise DistutilsFileError(\"file '%s' does not exist\" %\n os.path.abspath(source))\n if not os.path.exists(target):\n return True\n\n return os.stat(source).st_mtime > os.stat(target).st_mtime",
"def _is_remote_file_different(local_file, remote_file, ftp_connection, fatal_if_nonexistant=False, local_must_be_newer=False):\n # Check for an error, if the error is that the file does not exist. By default, if the remote file does not exist,\n # assume that means that it needs to be uploaded. However, if fatal_if_nonexistant is True, then raise an exception.\n try:\n remote_size, remote_mtime = _remote_file_size_modtime(ftp_connection, remote_file)\n except error_perm: # I'm assuming that error_perm is only raised if the file doesn't exist, which is probably incorrect, but I have no way to test if you don't have permission to access the file\n if not fatal_if_nonexistant:\n return False\n else:\n raise\n\n local_size, local_mtime = _local_file_size_modtime(local_file)\n # We need to remove the sub-second components of the local mtime, because it is not required of the FTP MDTM command\n # that we use to get the remote time that it include smaller time resolution than seconds.\n local_mtime = local_mtime.replace(microsecond=0)\n \n if local_must_be_newer:\n return local_mtime > remote_mtime or local_size != remote_size\n else:\n return local_mtime != remote_mtime or local_size != remote_size",
"def ClosestFileMatch(src, tgtfiles, existing):\n\n result = tgtfiles.get(\"path:\" + src.name)\n if result is not None:\n return result\n\n if not OPTIONS.target_info_dict.get(\"update_rename_support\", False):\n return None\n\n if src.size < 1000:\n return None\n\n result = tgtfiles.get(\"sha1:\" + src.sha1)\n if result is not None and existing.get(result.name) is None:\n return result\n result = tgtfiles.get(\"file:\" + src.name.split(\"/\")[-1])\n if result is not None and existing.get(result.name) is None:\n return result\n return None",
"def is_newer(self, time):\n with self.connection_pool.item() as sftpc:\n return (\n sftpc.stat(self.remote_path).st_mtime > time\n or sftpc.lstat(self.remote_path).st_mtime > time\n )",
"def file_exists(file_name, must_be_more_recent_than_file = None):\n\n if file_name is None or len(file_name) == 0: return False\n existe = os.path.exists(file_name)\n if not existe: return False\n if not must_be_more_recent_than_file or file_name == must_be_more_recent_than_file or \\\n not os.path.exists(must_be_more_recent_than_file): return True\n return os.path.getmtime(file_name) > os.path.getmtime(must_be_more_recent_than_file)",
"def exists_and_is_more_recent(cont, filename, mtime):\n if not os.path.exists(filename):\n return False\n\n mtimes_dir = cont.named_cache_dir(\"mtimes\")\n digest = os.path.join(mtimes_dir,\n hashlib.md5(filename.encode(\"utf-8\")).hexdigest())\n fetched_mtime = fetch_mtime_from(digest)\n\n if fetched_mtime > mtime:\n return True\n elif fetched_mtime == 0:\n # No mtime was stored on disk, get filesystem mtime and store\n # it for caching later. We don't usually use the filesystem\n # mtime since it isn't tar safe.\n store_file_mtime_in(filename, digest)\n if os.stat(filename).st_mtime > mtime:\n return True\n\n return False",
"def are_files_equal(file1, file2):\n input_file_1 = open(file1, \"r\")\n input_file_2 = open(file2, \"r\")\n\n file1 = input_file_1.read()\n file2 = input_file_2.read()\n print(type(file1), file1, type(file2), file2)\n\n result =False\n if file1 == file1:\n result = True\n\n input_file_1.close()\n input_file_2.close()\n return result",
"def file_is_modified(filename, lastupdate):\n now = datetime.datetime.utcnow()\n update = file_get_mdatetime(filename)\n return now >= update and update >= lastupdate",
"def fileCmp (working, ref, compare_content=0, verbose=0):\n\tif verbose and working and ref:\n\t\tprint \"fileCmp\\n\\t working: %s\\n\\tref: %s\" % (\n\t\t\tworking.path or \"no working path\", \n\t\t\tref.path or \"no reference path\")\n\t\t\n\tflag = \"UNASSIGNED\"\n\tdebugging = 0\n\t\n\tif ref and not working:\n\t\tflag = \"missing\"\n\t\n\telif not ref: # or not os.path.exists(ref.path):\n\t\tflag = \"new\"\n\t\t\n\telif isinstance (working, JloFile):\n\t\t# print \"ref: %s\" % ref.__class__.__name__\n\t\tif debugging:\n\t\t\tif not working.equals (ref):\n\t\t\t\tprint \"working file is different\"\n\t\t\t\t\n\t\t\tif not working.newerthan (ref):\n\t\t\t\tprint \"working file has same date as ref\"\n\t\t\n\t\t\tif working.modtime == ref.modtime:\n\t\t\t\tprint \"mods dates match\"\n\t\t\telse:\n\t\t\t\t# print \"wrk: %d ref: %d\" % (working.modtime,ref.modtime)\n\t\t\t\tprint \"wrk: %s ref: %s\" % \\\n\t\t\t\t\t(working.ppDate (working.modtime),\n\t\t\t\t\t working.ppDate (ref.modtime))\n\t\t\n\t\tif compare_content:\n\t\t\tif working.equals (ref):\n\t\t\t\tflag = \"\"\n\t\t\telse:\n\t\t\t\tflag = \"modified\"\n\t\t\t\t\n\t\telse:\n\t\t\tflag = \"\"\n\n\t\t\t\n\t\t\t\n\t\t\t# elif not working.newerthan (ref):\n\t\t\t# flag = \"obsolete-check\"\n\t\t# elif working.newerthan (ref) and not working.equals (ref):\n\t\t\t# flag = \"modified\"\n\t\t# elif not working.equals (ref):\n\t\t\t# print \"not modified\"\n\t\t\t# flag = \"different\"\n\t\t# elif working.newerthan (ref):\n\t\t\t# flag = \"modified\"\n\tif verbose and working:\n\t\tprint \"%s --> %s\" % (working.name, flag)\n\treturn flag",
"def checkFiles(self): \r\n mdate_filenames_list = []\r\n mdate_filenames_tuple = {}\r\n last24 = []\r\n now = datetime.datetime.now() \r\n noise,ft = file_type.split('.')\r\n ## note can do an entry bg color stoplight thing >24 hrs = red, 12-24 hrs = yellow < 12 = green nice little if loop\r\n for f in filenames_list:\r\n if os.path.isfile(f):\r\n lastmod_date = datetime.datetime.fromtimestamp(os.path.getmtime(f))\r\n mdate_filenames_tuple = lastmod_date, f\r\n mdate_filenames_list.append(mdate_filenames_tuple)\r\n \r\n if now - lastmod_date < file_age:\r\n \r\n #print (\"{} was last modified on {:%a %b %d %Y, %H:%M:%S, %Z}. Moving to 'destinaiton' transfer folder.\".format(f, lastmod_date))\r\n last24.append(f)\r\n shutil.copy2(f, destination)\r\n xferTime=time.time()\r\n \r\n fa = str(file_age) \r\n with sqlite3.connect('fileTransfer.db') as connection:\r\n c = connection.cursor()\r\n c.execute(\"INSERT INTO tbl_lastRun(col_timestamp, col_source, col_destination, col_file_type, col_file_age) VALUES (?,?,?,?,?)\",(xferTime, source, destination, ft, hrs))\r\n connection.commit()\r\n connection.close \r\n\r\n clear(self)\r\n ask_quit(self)",
"def cmp_lines(path_1, path_2):\n l1 = l2 = ' '\n with open(path_1, 'U') as f1:\n with open(path_2, 'U') as f2:\n while l1 != '' and l2 != '':\n l1 = f1.readline()\n l2 = f2.readline()\n if l1 != l2:\n return False\n return True"
] | [
"0.7023369",
"0.67877334",
"0.66630054",
"0.66364807",
"0.6428826",
"0.63028544",
"0.62502795",
"0.6238821",
"0.6196319",
"0.61771303",
"0.617126",
"0.6169872",
"0.6125706",
"0.61131996",
"0.609536",
"0.6085871",
"0.6009534",
"0.5816924",
"0.5813216",
"0.5807121",
"0.58070153",
"0.5761381",
"0.57526875",
"0.57416373",
"0.57350856",
"0.5733885",
"0.57175344",
"0.5673116",
"0.56453586",
"0.5618493"
] | 0.6840222 | 1 |
return the interpreter path | def get_interpreter_path () :
if sys.platform.startswith("win") : return sys.executable.replace ("pythonw.exe", "python.exe")
else : return sys.executable | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_python_path():\n\n return get_executable_path('python')",
"def executable_path(self):\n prepend = self._active_environment(ActiveEnvironment).prepend\n return prepend.get(\"PATH\", \"\")",
"def get_path(self):\r\n path = [\"/bin\", \"/usr/bin\", \"/usr/local/bin\"]\r\n if \"PATH\" in os.environ:\r\n p = os.environ[\"PATH\"]\r\n if p:\r\n path = p.split(os.pathsep)\r\n return path",
"def _get_interpreter(python=None) -> Path:\n if not python:\n return Path(sys.executable)\n\n # Maybe the path is already supplied\n if Path(python).exists():\n return python\n\n # Guess path\n paths = [Path(path) for path in os.getenv('PATH', '').split(':')]\n\n # Assume that python is a version if it doesn't start with p\n python = f'python{python}' if not python.startswith('p') else python\n interpreters = [python]\n\n # Build potential interpreter paths\n interpreter_paths = [p / i for p in paths for i in interpreters]\n for path in interpreter_paths:\n if path.absolute().exists():\n # return the first one found\n return path\n raise InterpreterNotFound(version=python)",
"def get_exec_path():\n if hasattr(sys, \"frozen\"): # compiled by py2exe\n return os.path.dirname(sys.executable)\n else:\n return os.path.dirname(sys.path[0]) # should be path to /fpdb",
"def _executable(self) -> str:\n return sys.executable",
"def get_executable(self) -> str:\n ...",
"def GetLauncherPath(self):\n return os.path.dirname(__file__)",
"def GetLauncherPath(self):\n return os.path.dirname(__file__)",
"def location(self):\n\n p = os.path.abspath(__file__)\n pathSP = os.path.split(p)\n return pathSP",
"def module_path():\r\n if hasattr(sys, \"frozen\"):\r\n return os.path.dirname(sys.executable)\r\n return os.path.dirname(__file__)",
"def _spdr_engine_location():\n return os.path.realpath(__file__).rpartition('/')[0]",
"def get_python():\n return path.join(TaskCreator.bin_dir, \"python\")",
"def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._R_script)",
"def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))",
"def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._r_script)",
"def _get_R_script_path(self):\r\n return join(self._get_R_script_dir(), self._r_script)",
"def path(self):\n if not self._path:\n logger.spam(\"Checking for helper executable %s\", self.name)\n self._path = distutils.spawn.find_executable(self.name)\n if self._path:\n logger.debug(\"%s is at %s\", self.name, self.path)\n self._installed = True\n else:\n logger.debug(\"No path to %s found\", self.name)\n return self._path",
"def path():\n # Exclude path to this script from path.\n this_file = os.path.realpath(__file__)\n this_path = os.path.dirname(this_file)\n return os.pathsep.join(p for p in sys.path if p != this_path)",
"def get_interpreter_quintuplet(python: typing.Union[str, pathlib.Path]) -> str:\n return _get_command_output([os.fspath(python), \"-c\", _VENV_NAME_CODE])",
"def pyexec():\n return sys.executable",
"def getScriptPath():\n\treturn os.path.dirname(os.path.realpath(sys.argv[0]))",
"def module_path():\n return os.path.dirname(unicode(__file__, sys.getfilesystemencoding( )))",
"def get_gui_path():\n if frozen_project():\n return os.path.dirname(sys.executable)\n else:\n return os.path.dirname(__file__)",
"def path(self):\n if self._path:\n return self._path\n path = os.environ[\"PATH\"].split(os.pathsep)\n path = [os.path.expanduser(x) for x in path]\n path = [os.path.abspath(x) for x in path]\n path = [x for x in path if os.path.exists(x)]\n self._path = path\n return self._path",
"def launcher_path() -> Optional[str]:\n return u.resource(LAUNCHER_SCRIPT)",
"def module_path() -> Path:\n if hasattr(sys, \"frozen\"):\n return Path(sys.executable).resolve().parent\n else:\n return (Path(__file__) / \"..\").resolve().parent",
"def which():\n\n location = None\n if os.path.basename(_git_path) != _git_path:\n if os.path.isfile(_git_path):\n location = _git_path\n else:\n paths = [x for x in os.environ[\"PATH\"].split(os.pathsep) if not x.isspace()]\n for path in paths:\n exe = os.path.join(path, _git_path)\n if os.path.isfile(exe):\n location = exe\n break\n return location",
"def module_path(self):\n return self.config['cwd']",
"def _GetSystemPath():\n return encoding_util.GetEncodedValue(os.environ, \"PATH\")"
] | [
"0.7354625",
"0.72487605",
"0.70609933",
"0.70519686",
"0.69643044",
"0.69575727",
"0.6889702",
"0.68254983",
"0.68254983",
"0.67586106",
"0.67049986",
"0.6704595",
"0.66673666",
"0.6665867",
"0.66172516",
"0.66151",
"0.66151",
"0.655819",
"0.65428525",
"0.65145975",
"0.6498407",
"0.6465521",
"0.6462605",
"0.64526695",
"0.64253336",
"0.64247155",
"0.64108825",
"0.64062816",
"0.64036465",
"0.6374577"
] | 0.7817029 | 0 |
splits a command line cmd command line remove_quotes True by default list | def split_cmp_command(cmd, remove_quotes = True) :
if isinstance (cmd, str) :
spl = cmd.split()
res = []
for s in spl :
if len(res) == 0 :
res.append(s)
elif res[-1].startswith('"') and not res[-1].endswith('"') :
res[-1] += " " + s
else :
res.append(s)
if remove_quotes :
nres = [ ]
for _ in res :
if _.startswith('"') and _.endswith('"'):
nres.append(_.strip('"'))
else:
nres.append(_)
return nres
else:
return res
else :
return cmd | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def split_and_honor_quotation_marks(cmdline):\n\n # See\n # http://msdn.microsoft.com/library/en-us/vccelng/htm/progs_12.asp\n\n # Step 1: Translate all literal quotes into QUOTE. Justify number\n # of backspaces before quotes.\n tokens = []\n bs_buf = \"\"\n QUOTE = 1 # \\\", literal quote\n for c in cmdline:\n if c == '\\\\':\n bs_buf += c\n elif c == '\"' and bs_buf:\n # A quote preceded by some number of backslashes.\n num_bs = len(bs_buf)\n tokens.extend([\"\\\\\"] * (num_bs//2))\n bs_buf = \"\"\n if num_bs % 2:\n # Odd. Quote should be placed literally in array\n tokens.append(QUOTE)\n else:\n # Even. This quote serves as a string delimiter\n tokens.append('\"')\n\n else:\n # Normal character (or quote without any preceding\n # backslashes)\n if bs_buf:\n # We have backspaces in buffer. Output these.\n tokens.extend(list(bs_buf))\n bs_buf = \"\"\n\n tokens.append(c)\n\n # Step 2: split into arguments\n result = [] # Array of strings\n quoted = False\n arg = [] # Current argument\n tokens.append(\" \")\n for c in tokens:\n if c == '\"':\n # Toggle quote status\n quoted = not quoted\n arg.append('\"')\n elif c == QUOTE:\n arg.append('\"')\n elif c in (' ', '\\t'):\n if quoted:\n arg.append(c)\n else:\n # End of argument. Output, if anything.\n if arg:\n result.append(''.join(arg))\n arg = []\n else:\n # Normal character\n arg.append(c)\n \n return result",
"def split_command(command): # pylint: disable = redefined-outer-name\n if not check(command):\n raise ValueError(\"Invalid command string %r\" % (command,))\n\n return [\n strip(arg[1:-1]) if arg.startswith('\"') else arg\n for arg in split(command)\n ]",
"def test_split_arguments_and_remove_quotes(self, unused_test_name,\n input_string, expected):\n result = shlex.split(input_string)\n self.assertEqual(result, expected)",
"def split_str(cmdline_str, has_options):\n return Splitter.split_list(shlex.split(cmdline_str), has_options)",
"def _make_split_command():\n argre = r'[^\"\\s]\\S*|\"[^\\\\\"]*(?:\\\\[\\\\\"][^\\\\\"]*)*\"'\n check = _re.compile(\n r'\\s*(?:%(arg)s)(?:\\s+(?:%(arg)s))*\\s*$' % dict(arg=argre)\n ).match\n split = _re.compile(argre).findall\n strip = _ft.partial(_re.compile(r'\\\\([\\\\\"])').sub, r'\\1')\n\n def split_command(command): # pylint: disable = redefined-outer-name\n \"\"\"\n Split generic commandline into single arguments\n\n The command splitter splits between tokens. Tokens are non-whitespace\n sequences or double quoted strings. Inside those double quotes can be\n escaped with a backslash. So have to be backslashes.\n\n Stolen from <http://opensource.perlig.de/svnmailer/>.\n\n :Return: Parser for generic commandlines\n :Rtype: callable\n \"\"\"\n if not check(command):\n raise ValueError(\"Invalid command string %r\" % (command,))\n\n return [\n strip(arg[1:-1]) if arg.startswith('\"') else arg\n for arg in split(command)\n ]\n\n return split_command",
"def SplitCommandLineIntoArgv(space_delimited_argv, posix=True):\n try:\n return map(lambda s: s.decode('utf-8'),\n shlex.split(space_delimited_argv.encode('utf-8'),\n comments=FLAGS.pyatdl_allow_command_line_comments,\n posix=posix))\n except ValueError as e:\n raise ShlexSyntaxError('Cannot parse command line. %s' % str(e))",
"def argv(self) -> List[str]:\n if self.command:\n rtn = [utils.strip_quotes(self.command)]\n for cur_token in self.arg_list:\n rtn.append(utils.strip_quotes(cur_token))\n else:\n rtn = []\n\n return rtn",
"def _parse_command(self, cmd):\n if isinstance(cmd, list):\n args = [str(x) for x in cmd]\n assert args\n else:\n args = shlex.split(cmd)\n return args",
"def split_args(args):\n words = []\n quoted_words = []\n\n quoted = re.compile('\"([^\"]+)\"')\n for value in quoted.findall(args):\n quoted_words.append(value)\n \n new_str = args\n\n for i in quoted_words:\n new_str = re.sub('\"[^\"]+\"', '', new_str)\n\n for i in new_str.split():\n words.append(i)\n \n words.extend(quoted_words)\n \n return words",
"def parse_commands(command_list: List[str]) -> List[str]:\n return [' '.join(x.split('-')) for x in command_list]",
"def parse_cmdline(self, command_line):\n components = shlex.split(\n io.StringIO(unicode(command_line)), posix=False\n )\n return components[0].strip('\"'), components[1:]",
"def clean_command_lines(cmd):\r\n cmd = ' '.join(cmd.split())\r\n return cmd",
"def list2cmdline(seq):\n\n result = []\n needquote = False\n for arg in seq:\n bs_buf = []\n\n # Add a space to separate this argument from the others\n if result:\n result.append(' ')\n\n needquote = (\" \" in arg) or (\"\\t\" in arg) or (not arg) or (\"(\" in arg) or (\")\" in arg)\n if needquote:\n result.append('\"')\n\n for c in arg:\n if c == '\\\\':\n # Don't know if we need to double yet.\n bs_buf.append(c)\n elif c == '\"':\n # Double backslashes.\n result.append('\\\\' * len(bs_buf) * 2)\n bs_buf = []\n result.append('\\\\\"')\n else:\n # Normal char\n if bs_buf:\n result.extend(bs_buf)\n bs_buf = []\n result.append(c)\n\n # Add remaining backslashes, if any.\n if bs_buf:\n result.extend(bs_buf)\n\n if needquote:\n result.extend(bs_buf)\n result.append('\"')\n\n return ''.join(result)",
"def split_args(args):\n double_dash_pos = [i for i, x in enumerate(args) if x == '--']\n if not double_dash_pos:\n return (args, [])\n else:\n double_dash_pos = double_dash_pos[0]\n return (args[:double_dash_pos], args[double_dash_pos+1:])",
"def splitcmdline(cmdline):\r\n\r\n # Replace tabs and newlines with spaces\r\n cmdline = cmdline.strip(' \\r\\n\\t').replace('\\t', ' ').replace('\\r', ' ').replace('\\n', ' ')\r\n\r\n # Handle special cases first\r\n if \" \" not in cmdline:\r\n # Nothing to parse if there is no space, it's filename only\r\n return cmdline, []\r\n elif \"\\\"\" not in cmdline:\r\n # There are spaces but no quotes\r\n # Handle special cases of long filename not enclosed in quotes\r\n if os.path.isfile(expand_win_path_variables(cmdline)):\r\n return cmdline, []\r\n else:\r\n # otherwise split it by spaces\r\n parts = cmdline.split(\" \")\r\n return parts[0], [part for part in parts[1:] if len(part) > 0]\r\n else:\r\n # Spaces and quotes are present so parse it carefully\r\n part = \"\"\r\n parts = []\r\n between_quotes = False\r\n\r\n for c in cmdline:\r\n if c == \"\\\"\":\r\n between_quotes = not between_quotes\r\n if not between_quotes:\r\n # Just ended quotes, append part\r\n parts.append(part)\r\n part = \"\"\r\n elif c in (\" \", \"\\t\", \"\\n\") and not between_quotes:\r\n if part:\r\n parts.append(part)\r\n part = \"\"\r\n else:\r\n part += c\r\n\r\n if part:\r\n parts.append(part)\r\n\r\n return parts[0], [part for part in parts[1:] if len(part) > 0]",
"def argumentsParser(args):\n\targuments = []\n\tif args.find('\"') > -1:\n\t\tt_arguments = args.split('\"')\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args.find(\"'\") > -1:\n\t\tt_arguments = args.split(\"'\")\n\t\tfor a in t_arguments:\n\t\t\tif a == '' or a == ' ':\n\t\t\t\tpass\n\t\t\telif a[-1] == ' ':\n\t\t\t\targuments.append(a[:-1])\n\t\t\telse:\n\t\t\t\targuments.append(a)\n\telif args == ' ':\n\t\tpass\n\telse:\n\t\targuments = args.split(' ')\n\treturn arguments",
"def expand_args(cmd_args):\n if isinstance(cmd_args, (tuple, list)):\n args_list = list(cmd_args)\n else:\n args_list = shlex.split(cmd_args)\n return args_list",
"def _get_parameter_list(self, raw_command): # pylint: disable=no-self-use\n contents = raw_command.split(' ')\n return [item for item in contents if item.startswith('-')]",
"def split_list(cmdline, has_options):\n token_list = []\n in_positional_params = False if has_options else True\n for token in cmdline:\n if in_positional_params or token == \"--\":\n token_list.append(token)\n in_positional_params = True\n elif token[0] != '-': # then it is a value\n token_list.append(token)\n elif token.startswith(\"--\"):\n token_list.extend(Splitter._handle_long_form(token))\n else:\n token_list.extend(Splitter._handle_short_form(token))\n return Stack(token_list)",
"def split_cmdline_filter_items(string):\n filter_items = string.split(',')\n return filter_items",
"def parse_command_list(config_str):\n return [command for command in config_str.splitlines() if command]",
"def command(self):\n with open(self.x, 'rt') as fi:\n line = next(fi) # the first line\n\n version, cmd_line = line.strip().split(';')\n version = version.split(' ')[2]\n cmd_line = re.sub('\"', '', cmd_line.strip())\n\n return [version, cmd_line]",
"def _merge_quote_args(self, args_list):\n\n if len(args_list) <= 1:\n return args_list\n \n index = 0\n while index < len(args_list):\n # if the current argument starts with a quote but does not end with a quote,\n # then the argument must have been wrongly split.\n if args_list[index].startswith(\"\\\"\"):\n while index+1 < len(args_list):\n if _ends_in_unescaped_quote(args_list[index].strip(\".\")):\n break\n args_list[index] += \", \" + args_list[index+1]\n args_list.pop(index+1)\n index += 1\n\n return args_list",
"def strip_variables(*args):\n return [arg.strip(\" '\\\"\") if arg is not None else arg for arg in args]",
"def line_split(self, line):\n\t\tline = re.sub(r\"`(.*?)'\", quote_replace, line)\n\t\tline = line.translate(None, '.:,()+*')\n\t\treturn line.split()",
"def _get_argv_after_doubledash(self):\n try:\n idx = sys.argv.index(\"--\")\n return sys.argv[idx+1:] # the list after '--'\n except ValueError as e: # '--' not in the list:\n return []",
"def cmd_list(args):",
"def shell_split(text):\n assert is_text_string(text) # in case a QString is passed...\n pattern = r'(\\s+|(?<!\\\\)\".*?(?<!\\\\)\"|(?<!\\\\)\\'.*?(?<!\\\\)\\')'\n out = []\n for token in re.split(pattern, text):\n if token.strip():\n out.append(token.strip('\"').strip(\"'\"))\n return out",
"def gen_command_line(self, testcase):\n found_double_at = False\n new_args = []\n\n for arg in self.target_cmdline:\n if arg == '@@':\n found_double_at = True\n new_args.append(testcase)\n else:\n new_args.append(arg)\n\n if found_double_at:\n stdin = None\n else:\n with open(testcase, 'rb') as inf:\n stdin = inf.read()\n\n return new_args, stdin",
"def splitLine(line):\n splitLine = shlex.split(line)\n return splitLine"
] | [
"0.7222235",
"0.68670875",
"0.68347687",
"0.68330204",
"0.6794882",
"0.66859853",
"0.6655126",
"0.66494465",
"0.66008824",
"0.6600495",
"0.6596832",
"0.6523082",
"0.64387125",
"0.625679",
"0.62543994",
"0.62076086",
"0.6084892",
"0.60701495",
"0.60104656",
"0.6010308",
"0.6009509",
"0.59494984",
"0.5903532",
"0.5885575",
"0.5864672",
"0.58596975",
"0.5856872",
"0.5848379",
"0.58306146",
"0.581803"
] | 0.7064252 | 1 |
decode the output or the error after running a command line instructions outerr output or error encoding encoding encerror how to handle errors msg part of the error to add message converted string | def decode_outerr(outerr, encoding, encerror, msg):
if not isinstance(outerr,bytes):
raise TypeError("only able to decode bytes, not " + str(type(outerr)))
try :
out = outerr.decode(encoding, errors=encerror)
return out
except UnicodeDecodeError as exu :
try :
out = outerr.decode("utf8" if encoding != "utf8" else "latin-1", errors=encerror)
return out
except Exception as e :
out = outerr.decode(encoding, errors='ignore')
raise Exception("issue with cmd (" + encoding +"):" + str(msg) + "\n" + str(exu) + "\n-----\n" + out) from e
raise Exception("complete issue with cmd:" + str(msg)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error_to_text(ex):\n\tif isinstance(ex, FailedProcessError) and ex.args[0] == 'youtube-dl' and ex.exitcode == 1:\n\t\treturn 'Download error: {}'.format(ex.stderr)\n\treturn \"Internal error {}: {}\".format(type(ex).__name__, ex)",
"def handle_error(e, error_response_str):\n error_output = e.decode(encoding='UTF-8')\n print_error(error_response_str)\n print_error(error_output)",
"def process_error_data(error):\n lines = [\n '[Command failed] %s' % processwrappers.future_shlex_join(error.cmd),\n 'Returncode: %s' % error.returncode]\n if error.stderr:\n lines.append('___ Standard error ___')\n lines.extend(error.stderr.decode().splitlines())\n #\n if error.stdout:\n lines.append('___ Standard output ___')\n lines.extend(error.stdout.decode().splitlines())\n #\n return '\\n'.join(lines)",
"def get_encoded_msg():\n print(\"Enter text you would like to decode:\\n\")\n e_msg = input(\">\")\n return e_msg",
"def transformErr2Str(self,*args):\n error_code = c_int32(args[0])\n error_str = create_string_buffer(\"\\000\"*1024)\n status = self.__acqiris_QuantroDLL1.transformErr2Str(self.__instrumentID,error_code,error_str) \n return str(error_str)",
"def main(ctx, **kwargs):\n ctx.meta['decode-errors'] = kwargs['errors'] or 'strict'\n ctx.meta['output-logmsg'] = kwargs['logmsg'] or 'normal'",
"def processError(sMsg, sOutput):\n if sOutput == \"\":\n errorMsg(sMsg)\n else:\n errorMsg(sMsg + \":\\n \" + ' '.join(sOutput.splitlines(True)))",
"def error(err):\n\n return str(err) + '\\n'",
"def get_output_error(cmd, **kwargs):\n if not isinstance(cmd, list):\n cmd = [cmd]\n logging.debug(\"Running: %s\", ' '.join(map(quote, cmd)))\n try:\n result = Popen(cmd, stdout=PIPE, stderr=PIPE, **kwargs)\n except OSError as e:\n return -1, '', f'Failed to run {cmd!r}: {e!r}'\n so, se = result.communicate()\n # unicode:\n so = so.decode('utf8', 'replace')\n se = se.decode('utf8', 'replace')\n\n return result.returncode, so, se",
"def errormessage(self, msg) :\n\t\tif msg != self.__olderror :\n\t\t\tself.__stderr.write(\"%s\\n\" % msg)\n\t\t\tself.htmlmessage(msg)\n\t\tself.__olderror = msg[:]\n\t\treturn -1",
"def to_stdout(string, errors=\"replace\"):\n\n\tencoded = string.encode(sys.stdout.encoding, errors)\n\tdecoded = encoded.decode(sys.stdout.encoding)\n\treturn decoded",
"def print_toml_decodeerror(cls, excep_obj):\n print(f\"{cls.ERROR_PREFIX} {cls.TOML_DECODEERROR_MESSAGE}\")\n print(excep_obj)",
"def _process_error_response(self, toc, buf):\n\n\t\terrorSev = None\n\t\terrorMsg = None\n\t\terrorDet = None\n\n\t\tif toc != 'E' and toc != 'N':\n\t\t\treturn\n\n\t\tparts = buf.split(b'\\0')\n\n\t\tfor part in parts:\n\t\t\tpart = part.decode()\n\t\t\tif len(part) < 1:\n\t\t\t\tcontinue\n\t\t\t_type = part[0]\n\t\t\tif _type == 'M':\n\t\t\t\terrorMsg = part[1:]\n\t\t\telif _type == 'S':\n\t\t\t\terrorSev = part[1:]\n\t\t\telif _type == 'D':\n\t\t\t\terrorDet = part[1:]\n\t\t\n\t\tif not errorSev and not errorMsg:\n\t\t\treturn\n\n\t\tif toc != 'E':\t\t\t\t# This is not an error report it as debug\n\t\t\tif self.Pfdebug:\n\t\t\t\tself.Pfdebug.write(f'BACKEND {errorSev}: {errorMsg}\\n')\n\t\t\t\tif errorDet:\n\t\t\t\t\tself.Pfdebug.write(f'DETAIL: {errorDet}\\n')\n\t\telse:\n\t\t\tif errorDet:\n\t\t\t\tself.pcp_internal_error(f'{errorSev}: {errorMsg}\\nDETAIL: {errorDet}\\n')\n\t\t\telse:\n\t\t\t\tself.pcp_internal_error(f'{errorSev}: {errorMsg}\\n')\n\t\t\tself._setResultStatus(ResultStateType.BACKEND_ERROR)",
"def doDecode(self):\n raise CipherError(\"override this funct and return the decoded msg\")",
"def command_failed_error(cmd):\n\n output_1 = colored(' - Error: Failed to run command ', 'red')\n output_2 = command(cmd)\n return output_1 + output_2 + '\\n'",
"def decode_output(self, to_decode):\n if to_decode is not None:\n # return to_decode.decode(self.decode_type)\n return str(to_decode, self.decode_type)\n return False",
"def error(text, exitcode=1):\n\n # If we get passed something like an Exception, we can convert\n # it down to a string.\n text = str(text)\n\n # If the message starts with whitespace, assume that it should come\n # *before* the command-name prefix.\n text_nows = text.lstrip()\n ws = text[:len(text) - len(text_nows)]\n\n # This has to be a constant value as we can't reliably get our actual\n # program name on all platforms.\n emsg(ws + \"pkgfmt: \" + text_nows)\n\n if exitcode != None:\n sys.exit(exitcode)",
"def error_exit(self, msg):\n wrappedmsg = textwrap.fill(msg, 78)\n fullmsg = \"%s\\n%s\" % (wrappedmsg, self.get_usage_command())\n raise SBToolError(fullmsg, True)",
"def decode_error_code(err_code, s, d):\n\n config.logger.warn('Failure: %d %s %s', err_code, s, d)\n\n return {\n 0: 'Request completed successfully. No error',\n 1: 'Invalid API key',\n 2: 'Unknown Request',\n 3: 'Invalid arguements',\n 4: 'Invalid service',\n 5: 'Invalid session',\n 6: 'Insufficient bandwidth available',\n 7: 'No path between src and dst with that service type',\n 8: 'Internal VELOX error',\n 9: 'Nothing to modify',\n -1: 'Server comms error',\n }.get(err_code, 'Unknown error code')",
"def cmd_error_check(self, cmd_out):\n for err in self.err_strings:\n if re.search('\\\\b%s\\\\b' % (err), cmd_out, re.I):\n _log.info(cmd_out)\n _log.info(\n \"Cmd execution failed! with this Return Error: \\n%s\" % (\n cmd_out))\n return 0",
"async def convert_error(ctx, error):\n print(error)\n if isinstance(error, commands.UserInputError):\n await ctx.send(\"Invalid input.\")\n else:\n await ctx.send(\"Oops, something bad happened..\")",
"def _extract_openssl_error():\n\n error_num = libcrypto.ERR_get_error()\n buffer = buffer_from_bytes(120)\n libcrypto.ERR_error_string(error_num, buffer)\n\n # Since we are dealing with a string, it is NULL terminated\n error_string = byte_string_from_buffer(buffer)\n\n return _try_decode(error_string)",
"def __readStderr(self):\n if self.process is not None:\n self.errorGroup.show()\n s = str(self.process.readAllStandardError(),\n Preferences.getSystem(\"IOEncoding\"),\n 'replace')\n self.errors.insertPlainText(s)\n self.errors.ensureCursorVisible()",
"def result_of(cmd):\n cmd_list_arr = cmd.split(\" \")\n result = check_output(cmd_list_arr).decode(\"utf-8\")\n return result",
"def excecute_command(command):\n command = command.split(' ')\n process_respose = subprocess.Popen(command,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n output, error = process_respose.communicate()\n\n if output:\n output = output.decode('utf-8')\n if error:\n error = error.decode('utf-8')\n\n return output, error",
"def write_err(self, text): # pragma: no cover\n # type: (str) -> None\n stderr = self.stderr\n if self.stderr.closed:\n stderr = sys.stderr\n stderr.write(decode_output(u\"\\r\", target_stream=stderr))\n stderr.write(decode_output(CLEAR_LINE, target_stream=stderr))\n if text is None:\n text = \"\"\n text = decode_output(u\"{0}\\n\".format(text), target_stream=stderr)\n self.stderr.write(text)\n self.out_buff.write(decode_output(text, target_stream=self.out_buff))",
"def get_cmd_output(cmd):\n try:\n data = subprocess.check_output(cmd, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as ex:\n data = ex.output\n try:\n data = data.decode(\"utf-8\")\n except UnicodeDecodeError:\n data = data.decode(\"latin1\")\n return data",
"def decode(decode_format):\n return output_from_decode",
"def parsed_error_msg(self):\r\n return self.error_msg",
"def check_output_contains(context, text, err_msg):\n res = re.search(text, context.output.decode('utf-8'))\n if res is None:\n print(context.output.decode('utf-8'))\n raise Exception(err_msg)"
] | [
"0.64598864",
"0.62169755",
"0.6093039",
"0.6090095",
"0.6007613",
"0.5802997",
"0.5792216",
"0.57130677",
"0.5682947",
"0.5641948",
"0.5634072",
"0.56156194",
"0.55839014",
"0.5551853",
"0.5543839",
"0.55256593",
"0.5465533",
"0.5462396",
"0.545408",
"0.5436861",
"0.54050845",
"0.53662956",
"0.53603464",
"0.5327872",
"0.5320236",
"0.5316218",
"0.5312477",
"0.5305213",
"0.5296695",
"0.52883863"
] | 0.75364584 | 0 |
run a script script script to execute l other parameters | def run_script (script, *l) :
if not os.path.exists (script) :
raise PQHException ("file %s not found" % script)
py = get_interpreter_path ()
cmd = "%s %s" % (py, script)
if len (l) > 0 :
cmd += " " + " ".join ( [str (x) for x in l])
out,err = run_cmd (cmd)
return out,err | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run_script(self, params, config_no):\n raise NotImplementedError()",
"def runScript(self, script):\n c = self\n game = self.game\n app = self.game.app\n shell = self.shell\n sprite = self.sprite\n s = shell\n self = self.env\n exec(open(\"script/\" + script).read())",
"def run(self, script, *args, **kwargs):\n return self._run('run', script, *args, **kwargs)",
"def script(self):",
"def run_script(self):\n pass",
"def run_script():\n # pylint: disable=unsupported-assignment-operation\n script_source.data['script'] = [inp_script.value]",
"def runScript(self, script):\n data = FilePath(__file__).parent().child('data')\n sample_file = data.child('1.input.ofx')\n\n args = (script, [sample_file.path])\n log.msg('executing %r' % (args,))\n out, err, rc = yield utils.getProcessOutputAndValue(*args, env=None)\n log.msg('rc: %r' % (rc,))\n log.msg('out: %r' % (out,))\n log.msg('err: %r' % (err,))\n if rc != 0:\n self.fail(\"Failed: %s\\n\\n%s\" % (out, err))",
"def runscript(host, script, list_scripts, multi_host, hosts_filter):\n if list_scripts:\n pprint(menu_generator(cs.get_scripts()))\n if host:\n session = cs.init_session(host)\n response = cs.execute_active_responder_command(\"runscript\", f\"-CloudFile={script}\", session)\n pprint(response)\n if multi_host:\n batch_id = cs.new_batch_job(hosts_string=multi_host)\n response = cs.execute_batch_job(\"runscript\", batch_id, f\"-CloudFile={script}\")\n pprint(response)\n if hosts_filter:\n query_filter = hosts_filter.split(\":\")\n batch_id = cs.new_batch_job(filter_parameter=query_filter[0], filter_value=query_filter[1])\n response = cs.execute_batch_job(\"runscript\", batch_id, f\"-CloudFile={script}\")\n pprint(response)",
"def execute_script(self, action, *args):\n self.host.cmd(('./%s' + len(args) * ' %s') % (action, *args))",
"def call_script(name, args=[]):\n def fn(): lib.call([env.script(name)] + args)\n time_duration(fn)",
"def execute_script(self, script, enterpreter='/bin/sh'):\n destination = '/tmp/' + ''.join(\n random.choice(string.lowercase) for i in range(16))\n\n self.upload(script, destination)\n self.execute('%s %s' % (enterpreter, destination))\n self.execute('rm %s' % destination)",
"def script_run(ctx: click.Context, name, script_arguments):\n subcommand_script.cmd_run(ctx.obj, name, script_arguments)",
"def do_run_script(self, arg):\n try:\n with open(os.path.join(os.getcwd(), arg), 'r') as fin:\n script = fin.readlines()\n for line in script:\n self.onecmd(line)\n except (FileNotFoundError) as exc:\n print(exc)",
"def execute(self, args=\"\"):\r\n return super(PythonScript, self).execute(_EXECUTABLE, args)",
"def execute(self, task, script, **kwargs):\n locals().update(kwargs)\n exec(script)",
"def call_script_on_staging(name, args=[]):\n def fn(): lib.call([os.path.join(env.staging_path, \"scripts\", name)] + args)\n time_duration(fn)",
"def parse_script_cmd(self, line):\n line, _ = self.find_vars_in_str(line)\n words = line.split()\n words[1] = gen_parse.rm_quotation_marks(words[1])\n filepath = gen_io.get_abs_path(words[1])\n if len(words) == 2:\n self.exec_python_script(filepath)\n else:\n if words[2] == 'python':\n self.exec_python_script(filepath)\n elif words[2] == \"bash\":\n self.exec_bash_script(filepath)\n else:\n self.print_error(f\"'{words[2]}' scripts not yet suported\")",
"def run(self, script, **kwargs):\r\n # don't return a value from a script\r\n kwargs['nout'] = 0\r\n return self.call(script, **kwargs)",
"def run_script(extension_invocation_info):\n acm.RunModuleWithParameters(__name__, acm.GetDefaultContext())",
"def run_script(input_file, run_dir, script_name, interpreter='python'):\n from paver.runtime import sh\n from paver.path import path\n docdir = path(input_file).dirname()\n output_text = sh('cd %(docdir)s/%(run_dir)s;%(interpreter)s %(script_name)s 2>&1' % vars(),\n capture=True)\n response = '\\n::\\n\\n\\t$ %(interpreter)s %(script_name)s\\n\\t' % vars()\n response += '\\n\\t'.join(output_text.splitlines())\n while not response.endswith('\\n\\n'):\n response += '\\n'\n return response",
"def prepare_sub_script(i):\n\n run_cmd=''\n\n target_os_cfg=i['target_os_cfg']\n\n remote=False\n if target_os_cfg.get('remote','')=='yes': \n remote=True\n\n script_name=i['run_script']\n\n script_path=''\n if 'run_script_uoa' in i and i['run_script_uoa']!='':\n# cm_kernel.print_for_con('')\n# cm_kernel.print_for_con('Preparing path for OS script '+i['run_script_uoa']+' ...')\n\n ii={'cm_run_module_uoa':ini['cfg']['cm_modules']['os.script'],\n 'cm_action':'load',\n 'cm_data_uoa':i['run_script_uoa']}\n r=cm_kernel.access(ii)\n if r['cm_return']>0: return r\n\n script_cfg=r['cm_data_obj']['cfg']\n script_path=r['cm_path']\n\n if 'scripts' not in script_cfg or i['run_script'] not in script_cfg['scripts']:\n return {'cm_return':1, 'cm_error':'can\\'t find script in os.script configuration'}\n\n script_name=script_cfg['scripts'][script_name]\n\n script_name+=target_os_cfg['script_ext']\n\n run_name=script_name\n if script_path!='':\n run_name=os.path.join(script_path, run_name)\n elif 'exec_prefix' in target_os_cfg and target_os_cfg['exec_prefix']!='': \n run_name=target_os_cfg['exec_prefix']+run_name\n\n if target_os_cfg.get('set_executable','')!='':\n p=target_os_cfg['set_executable']+' '+run_name\n x=os.system(p)\n\n run_cmd=''\n if remote and target_os_cfg.get('no_script_execution','')=='yes':\n r=cm_kernel.load_array_from_file({'cm_filename':run_name})\n if r['cm_return']>0: return r\n a=r['cm_array']\n for x in a:\n xx=x.strip()\n if xx!='' and not xx.startswith(target_os_cfg['rem']):\n if run_cmd!='': run_cmd+=target_os_cfg['env_separator']+' '\n run_cmd+=xx\n run_name=''\n else:\n run_cmd=run_name\n\n if i.get('run_cmd','')!='': run_cmd+=' '+i['run_cmd']\n\n if i.get('run_cmd_out1','')!='': run_cmd+=' 1>'+i['run_cmd_out1']\n if i.get('run_cmd_out2','')!='': run_cmd+=' 2>'+i['run_cmd_out2']\n\n\n return {'cm_return':0, 'run_cmd':run_cmd}",
"def run_script(input_file, script_name, interpreter='python'):\r\n from paver.easy import sh\r\n from paver.path import path\r\n rundir = path(input_file).dirname()\r\n output_text = sh('cd %(rundir)s && %(interpreter)s %(script_name)s 2>&1' % vars(), capture=True)\r\n response = '\\n::\\n\\n\\t$ %(interpreter)s %(script_name)s\\n\\t' % vars()\r\n response += '\\n\\t'.join(output_text.splitlines())\r\n while not response.endswith('\\n\\n'):\r\n response += '\\n'\r\n return response",
"def run_script(script_id, params=None):\n # I p1 script id\n # I p2 number of parameters (0-10)\n ## (optional) extension ##\n # I[] params\n\n if params is not None:\n msg = \"\"\n for p in params:\n msg += struct.pack(\"I\", p)\n nump = len(params)\n extents = [msg]\n else:\n nump = 0\n extents = []\n\n return _u2i(_pigpio_command_ext(\n _control, _PI_CMD_PROCR, script_id, nump, extents))",
"def run_this(self, script):\n for line in script.strip().split(\"\\n\"):\n # TODO Interpret lines more than just calling functions\n if line.startswith(\"#\"):\n # Skip lines that start with #\n continue\n retval = self.call_function(line.strip())\n #print retval",
"def script_cmd(cmd, cnt, args):\n if cnt > 1:\n cmd_file = args[1]\n start_script(cmd_file)\n else:\n stop_scripting()",
"def execute():",
"def Non_VASP_Script(my_project):\n\n WORKFLOWS = my_project['Workflow']\n Workflow_Params = WORKFLOWS['Steps'][2]\n Workflow_name = Workflow_Params['NAME']\n job_dir = my_project['NAME'] + Workflow_Params['NAME']\n chkpt = job_dir + '.json'\n prev_filter = Workflow_Params['Continue']['Filter']\n prev_chkpt = Workflow_Params['Continue']['Source']\n Script = Workflow_Params['Script']\n executable = Script['Executable']\n non_arg_inputs = Script['NonArgInput']\n arg_inputs = Script['ArgInput']\n\n rerun_paths = continue_job_inputs(chkpt_files= prev_chkpt,\\\n user_filters=prev_filter)\n\n # Run the script now at the rerun_paths\n for r in rerun_paths:\n if inputs:\n shutil.copy(inputs, r)\n os.chdir(r)\n print ('Running {0} in {1}'.format(executable, r))\n script_output = sp.run([executable]+ arg_inputs, stdout=sp.PIPE).stdout.decode('utf-8')\n \n\n return None",
"def call_script(self, script):\n filename, callable = script.rsplit(':', 1)\n filename = os.path.abspath(filename)\n module = imp.load_source('script', filename)\n script = getattr(module, callable.strip())\n\n try:\n script(self.options, self.buildout, self.augmented_environment())\n except TypeError:\n # BBB: Support hook scripts that do not take the environment as\n # the third parameter\n script(self.options, self.buildout)",
"def sendjob(self,bashscript):",
"def exec_script(self, script):\n filename = os.path.join(self.script_dir, script + \".sh\")\n # http://docs.python.org/library/os.html#os.X_OK\n if os.access(filename, os.X_OK):\n with open(filename):\n subprocess.call(filename)\n self.vibrate(0.1)"
] | [
"0.70747274",
"0.6891864",
"0.67501086",
"0.6722625",
"0.6709817",
"0.6659978",
"0.6600384",
"0.6596219",
"0.6582011",
"0.6568014",
"0.65097755",
"0.6352416",
"0.63355607",
"0.6309203",
"0.62741786",
"0.62620753",
"0.6250904",
"0.6215831",
"0.61932105",
"0.6190089",
"0.61530215",
"0.6131735",
"0.61043733",
"0.6101243",
"0.60980976",
"0.6091777",
"0.60871875",
"0.60618323",
"0.60407805",
"0.60204864"
] | 0.83252746 | 0 |
return a prefix for a file based on time | def get_prefix () :
t = datetime.datetime (2010,1,1).now ()
t = str(t).replace (":", "_").replace ("/", "_").replace (" ", "_")
t += "_" + str (random.randint (0,1000)) + "_"
return os.path.join (GetPath (), "temp_" + t) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def prefix_from_filename(input_file):\n prefix = \"\"\n \n if str(input_file).find(\"medication_bner_\") != -1:\n prefix = \"drugbank\"\n elif str(input_file).find(\"_bc5cdr\") != -1:\n prefix = \"bc5cdr\"\n elif str(input_file).find(\"_bionlp13cg\") != -1:\n prefix = \"bionlp13cg\"\n \n return prefix",
"def makeTimeFilename(prefix, ext): \n suffix = time.strftime(\"%b%d_%H%M\") + ext\n return prefix + suffix",
"def time_stamping(file):\n time_stamp = datetime.now().date()\n\n # 1st remove path like /home/\n path_file = file.split(\"/\")\n # 2nd removes file formats\n file_ = path_file[len(path_file)-1].split(\".\", 1)\n path_file.pop()\n # 3rd add time_stamp\n file_[0] = str(file_[0])+\"_\"+str(time_stamp)\n # 4th all is back together\n file = '.'.join(map(str, file_))\n\n path_file.append(file)\n file = '/'.join(map(str, path_file))\n print(file)\n return file",
"def _unique_path(prefix):\n suffix = ''.join([\n random.choice(string.ascii_letters) for i in range(8)\n ])\n return '%s/%r.%s' % (prefix, time.time(), suffix)",
"def get_prefix() :\n \n import glob\n filenames = glob.glob(picture_folder+'*.jpg')\n max_prefix = 0\n for filename in filenames :\n parts = filenames.split('_')\n if len(parts) > 1 :\n possible_prefix = parse_int(parts[0])\n if possible_prefix > max_prefix :\n max_prefix = possible_prefix\n \n max_prefix += 1\n return '%04d' % max_prefix",
"def time_key(file_name):\n splits = file_name.split('/')\n [date] = re.findall(r'(\\d{4}_\\d{2}_\\d{2})', splits[-2])\n date_id = [int(token) for token in date.split('_')]\n recording_id = natural_key(splits[-1])\n session_id = session_key(splits[-2])\n \n return date_id + session_id + recording_id",
"def get_starttime(self):\n filetime = datetime.datetime.strptime(self.filenametime,\n \"%Y%m%d_%H%M%S\")\n if self.ldat_type != 'acc':\n starttime = filetime\n else:\n starttime = filetime - datetime.timedelta(seconds=512)\n return starttime",
"def find_newest_matching_prefix(path, prefix):\n entries = os.listdir(path)\n result = None\n for entry in entries:\n if prefix.match(entry):\n fq_entry = os.path.join(path, entry)\n if result is None:\n result = fq_entry\n else:\n result_mtime = os.path.getmtime(result)\n entry_mtime = os.path.getmtime(fq_entry)\n if entry_mtime > result_mtime:\n result = fq_entry\n\n return result",
"def find_newest_matching_prefix(path, prefix):\n entries = os.listdir(path)\n result = None\n for entry in entries:\n if prefix.match(entry):\n fq_entry = os.path.join(path, entry)\n if result is None:\n result = fq_entry\n else:\n result_mtime = os.path.getmtime(result)\n entry_mtime = os.path.getmtime(fq_entry)\n if entry_mtime > result_mtime:\n result = fq_entry\n\n return result",
"def get_prefix(log_type, query_time):\n time_prefix = query_time.strftime('year=%Y/month=%m/date=%d/')\n return '%s/%s' % (log_type, time_prefix)",
"def format_prefix(meta):\n ts = meta.time.strftime('%H:%M:%S.%f')[:-3]\n if meta.comm and meta.pid:\n return \"%s %s[%d]: \" % (ts, meta.comm, meta.pid)\n else:\n return ts + \": \"",
"def prefix(files):\n\tfrom os import sep\n\t\n\t# Initializes counters\n\tcounters = []\n\t\n\t# For all files\n\tfor file in files:\n\t\tfile = normalizePath(file)\n\t\t\n\t\t# Split the file name into a piece\n\t\tpaths = file.split(sep)\n\t\t\n\t\t# For each piece\n\t\tfor i in range(0,len(paths)):\n\t\t\ttry:\n\t\t\t\ttry:\n\t\t\t\t\t# Test if counters exist\n\t\t\t\t\tcounters[i][paths[i]] += 1\n\t\t\t\texcept:\n\t\t\t\t\t# Creates a path counters\n\t\t\t\t\tcounters[i][paths[i]] = 1\n\t\t\texcept:\n\t\t\t\t# Adds a new level of depth\n\t\t\t\tcounters.append({paths[i] : 1})\n\t\n\t# Constructs the prefix of the list of files\n\ttry:\n\t\tresult = \"\"\n\t\tamount = list(counters[0].values())[0]\n\t\tfor counter in counters:\n\t\t\tif len(counter.keys()) == 1 and list(counter.values())[0] == amount:\n\t\t\t\tresult += list(counter.keys())[0] + sep\n\t\t\telse:\n\t\t\t\treturn result [:-1]\n\t\t\t\tbreak\n\t\treturn result\n\texcept IndexError:\n\t\treturn \"\"",
"def datetime_filename(prefix='output_',extension='.txt'):\n outputname = prefix + '{:%Y%m%d%H%M%S}utc{}'.format(\n datetime.datetime.utcnow(),extension)\n return outputname",
"def getPrefix(self):\n return( self.id.split('.')[0] )",
"def getTimeToFileName(self):\n return self.sNow.strftime(\"%d-%m-%Y_%H-%M-%S\")",
"def get_default_prefix(path):\n if path in prefixes_dict.keys():\n return prefixes_dict[path]\n else:\n return ''",
"def get_pids(self, file_path, pid):\n if 'cuhk03' in file_path: \n prefix = 'cuhk'\n else: \n prefix = file_path.split('/')[1]\n return prefix + '_' + str(pid)",
"def getPrefix(self):\n return \"20gig\"",
"def gettempprefix():\n\tpass",
"def get_timestamp_from_path(file_path):\n return int(file_path.split('_')[1].split('.')[0])",
"def network_prefix(path):\n\n from glob import glob\n dir_path, file_name = os.path.split(path)\n path = os.path.join(os.path.abspath(dir_path), file_name)\n\n for extension in ['index', 'meta', 'data*']:\n file_name = '%s.%s' % (path, extension)\n\n # use glob instead of os because we need to expand the wildcard\n if len(glob(file_name)) == 0:\n raise IOError('File %s does not exist.' % file_name)\n\n return path",
"def fileTimeRange(padFileName):\n padFileName = split(padFileName, '/')[-1] # throw away path \n padFileName = split(padFileName, '.header')[0] # throw away '.header'\n sensor = split(padFileName, '.')[-1]\n padFileName = join(split(padFileName, '.')[:-1], '.')\n pair = split(padFileName, '-')\n if len(pair) == 1:\n pair = split(padFileName, '+')\n joiner = '-'\n else:\n joiner = '+'\n return stringTimeToUnix(pair[0]), joiner, stringTimeToUnix(pair[1])",
"def getNamePrefix(name):\n try:\n location = len(name) - \"\".join(reversed(name)).index(\".\")\n # index is never used, but this line ensures that the index is an int.\n index = int(name[location:])\n prefix = name[:location-1]\n except Exception:\n prefix = name\n return prefix",
"def get_file_name(image_dir, image_name_prefix, current_count):\n if imageNumOn:\n # you could also use os.path.join to construct image path file_path\n file_path = image_dir+ \"/\"+image_name_prefix+str(current_count)+\".jpg\"\n else:\n right_now = datetime.datetime.now()\n file_path = (\"%s/%s%04d%02d%02d-%02d%02d%02d.jpg\"\n % (image_dir, image_name_prefix,\n right_now.year, right_now.month, right_now.day,\n right_now.hour, right_now.minute, right_now.second))\n return file_path",
"def standardized_name(path, filename):\n path_file = os.path.join(path, filename)\n stat = os.stat(path_file)\n extension = path_file.split('.')[-1]\n creation_time = datetime.fromtimestamp(stat.st_mtime).strftime('%m-%d-%Y_%H:%M:%S')\n return '{}.{}'.format(creation_time, extension)",
"def getFIRSTPrefix(modelfile):\n\n if not modelfile.endswith('first.vtk'):\n raise ValueError('Not a first vtk file: {}'.format(modelfile))\n\n modelfile = op.basename(modelfile)\n prefix = modelfile.split('-')\n prefix = '-'.join(prefix[:-1])\n\n return prefix",
"def prefixer(self,sn):\n\n\t\t#---\"spot\" is a tuple of spotname and the part name\n\t\t#---namer takes the spotname (called spot in the yaml defn of namer) and the simulation name\n\t\t#---we include the partname when accessing self.spots\n\t\ttry: \n\t\t\tspot = spotname,partname = (self.spotname_lookup(sn),self.trajectory_format)\n\t\t\tprefix = self.spots[spot]['namer'](spotname,sn)\n\t\texcept: raise Exception('[ERROR] prefixer failure on simulation \"%s\" (check your namer)'%sn)\n\t\treturn prefix",
"def get_pids(self, file_path, pid):\n if 'cuhk03' in file_path:\n prefix = 'cuhk'\n else:\n prefix = file_path.split('/')[1]\n return prefix + '_' + str(pid)",
"def prefix_file(filename, prefix):\n path, file_or_dir = os.path.split(filename)\n new_filename = os.path.join(path, prefix + file_or_dir)\n os.rename(filename, new_filename)",
"def _get_time_name(self, base_name):\n return base_name + '_' + datetime.utcnow().strftime('%Y%m%d_%H%M%S')"
] | [
"0.6537689",
"0.6446945",
"0.634372",
"0.62493694",
"0.6221671",
"0.6185408",
"0.61169964",
"0.60952646",
"0.60952646",
"0.60697746",
"0.60191506",
"0.5899234",
"0.587953",
"0.5868552",
"0.58580756",
"0.5856218",
"0.57993394",
"0.5782173",
"0.5760891",
"0.5745738",
"0.5724817",
"0.5715584",
"0.56993204",
"0.5670337",
"0.5664074",
"0.5662471",
"0.56570244",
"0.56500435",
"0.5640504",
"0.5620828"
] | 0.7512885 | 0 |
remove all files and folder in folder folder folder silent silent mode or not list of not remove files or folders | def removedirs (folder, silent = False) :
file, rep = [], []
for r, d, f in os.walk (folder) :
for a in d :
rep.append (os.path.join (r, a))
for a in f :
file.append (os.path.join (r, a))
impos = []
file.sort ()
rep.sort (reverse = True)
for f in file :
try :
if os.path.exists (f):
os.remove (f)
except Exception as e :
fLOG ("unable to remove file", f, " --- ", str(e).replace("\n", " "))
if silent : impos.append (f)
else : raise
for f in rep :
try :
if os.path.exists (f):
os.removedirs (f)
except Exception as e :
fLOG ("unable to remove folder", f, " --- ", str(e).replace("\n", " "))
if silent : impos.append (f)
else : raise
if os.path.exists (folder) :
try :
os.rmdir(folder)
except Exception as e:
impos.append(folder)
return impos | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clean_all_folder():\n LOGGER.warning('removal of old files has been temporarily disabled')\n # paths_to_clean = CFG.remove_files\n # if paths_to_clean: # pylint: disable=using-constant-test\n # for remove_config in paths_to_clean: # pylint: disable=not-an-iterable\n # name = tuple(remove_config.keys())[0]\n # LOGGER.info(f'processing: {name}')\n # remove_config = remove_config[name]\n # if 'folder' not in remove_config.keys():\n # LOGGER.error(f'missing \"folder\" in {name}')\n # return\n # if 'age' not in remove_config.keys():\n # LOGGER.error(f'missing \"age\" in {name}')\n # return\n # if not os.path.exists(remove_config['folder']):\n # LOGGER.error(f'path does not exist: {remove_config[\"folder\"]}')\n # return\n # _remove_old_files_from_folder(**remove_config)\n # else:\n # LOGGER.debug('no folder to clean')",
"def clean():\n folders = ['utils_dfn/temp', 'utils_dfn/img', 'utils_dfn/mask', 'utils_dfn/output']\n for folder in folders:\n for item in os.listdir(folder):\n item_path = os.path.join(folder, item)\n if os.path.isdir(item_path):\n shutil.rmtree(item_path)\n elif os.path.isfile(item_path):\n os.remove(item_path)",
"def clear_directory(folder_path):\n for the_file in os.listdir(folder_path):\n try:\n file_path = os.path.join(folder_path, the_file)\n if os.path.isfile(file_path) \\\n and the_file != RefreshListScript \\\n and not the_file.endswith(('.txt', 'py')):\n os.unlink(file_path)\n except Exception as e:\n print(e)",
"def remove(args):\n files = []\n for path in args.files:\n if os.path.isdir(path):\n ft = filetree(path)\n files.extend(ft.filelist())\n else:\n files.append(path)\n for path in files:\n relpath = os.path.normpath(os.path.relpath(path, args.base))\n if relpath in args.cache:\n del args.cache[args.cache.index(relpath)]\n if args.delete and os.path.exists(path):\n os.remove(path)\n args.update = True\n return",
"def _rm(folder):\n import os\n import shutil\n for the_file in os.listdir(folder):\n file_path = os.path.join(folder, the_file)\n try:\n if os.path.isfile(file_path):\n os.unlink(file_path)\n elif os.path.isdir(file_path):\n shutil.rmtree(file_path)\n except Exception as e:\n print(e)\n return",
"def remove_files(file_list):\n###############################################################################\n for fpath in file_list:\n if os.path.exists(fpath):\n os.remove(fpath)\n # End if\n # End for",
"def __removeFiles(self, pathToRemove, verbose=False):\n if os.path.isdir(pathToRemove):\n if not verbose:\n shutil.rmtree(pathToRemove)\n else:\n for root, dirs, files in os.walk(pathToRemove, topdown=False):\n for name in files:\n fileToRemove = os.path.join(root, name)\n print('Removing file ', fileToRemove)\n os.remove(fileToRemove)\n for name in dirs:\n dirToRemove = os.path.join(root, name)\n print('Removing directory ', dirToRemove)\n os.rmdir(dirToRemove)\n print('Removing directory ', pathToRemove)\n os.rmdir(pathToRemove)\n else:\n print('Removing File ', pathToRemove)\n os.remove(pathToRemove)",
"def _clean_files(self, in_subdirectory=False):\n files = self._file_explorer.ls()\n if not in_subdirectory:\n LOG.info(f\"Cleaning {len(files)} file(s) on the device\")\n for file_ in files:\n try:\n self._file_explorer.rm(file_)\n except Exception as e:\n # Try to explore subdirectory\n LOG.info(f\"Attempting to clean directory {file_}\")\n self._file_explorer.cd(file_)\n self._clean_files(in_subdirectory=True)\n if in_subdirectory:\n self._file_explorer.cd('..')\n else:\n LOG.info(\"Done cleaning FS\")",
"def _safe_clear_dirflow(path):\n print(\"Clearing {}...\".format(path))\n assert os.path.isdir(path), \"Didn't pass a folder to be cleaned\"\n list_dir = [f for f in os.listdir(path) if not f.startswith('.')]\n for folder in list_dir:\n cat_folder = os.path.join(path, folder)\n assert os.path.isdir(cat_folder), \\\n \"Dir contains Non-Folder File!\"\n cat_folder_item = [f for f in os.listdir(cat_folder)\n if not f.startswith('.')]\n for file in cat_folder_item:\n # For every file, confirm is PNG or error.\n # DONT DELETE YET, IN CASE OF ERRORS!\n assert \".png\" in file, \"Folder has Non PNG Contents!\"\n # If we got though that with no error, then now we can delete!\n # for folder in os.listdir(the_path):\n # cat_folder = os.path.join(the_path, folder)\n # for file in os.listdir(cat_folder):\n # os.remove(os.path.join(cat_folder, file))\n # os.rmdir(cat_folder)\n # os.rmdir(the_path)\n return True",
"def clearRunDirectory(self):\n for root, dirs, files in os.walk(self.run_dir, topdown=False):\n for name in files:\n if name.lower().endswith(('.cps', '.txt', '.sbml', '.csv')):\n os.remove(os.path.join(root, name))\n for name in dirs:\n if len(os.listdir(os.path.join(root, name)))==0:\n os.rmdir(os.path.join(root, name))",
"def _unstage_folder(dir_path):\n for dir_item in os.listdir(dir_path):\n full_path = os.path.join(dir_path, dir_item)\n if os.path.isfile(full_path) and dir_item != 'load.go':\n os.remove(full_path)",
"def _remove_files_dirs(self):\n if self.remove_remote_files_dirs:\n self._remove_remote_files_dirs()",
"def delete_playlists_in(path):\n\n for f in [f for f in os.listdir(path) if f.endswith('.m3u')]:\n os.remove(os.path.join(path, f))",
"def clear_debug_files(root_path_):\n\n ext_file = [\n \".sdf\",\n \".VC.db\",\n \".idb\",\n \".exp\",\n \".aps\",\n \".pdb\",\n \".obj\",\n \".res\",\n \".log\",\n \".tlog\",\n \".manifest\",\n \".lastbuildstate\",\n \".pch\",\n \".ipch\",\n \".cache\",\n \".ilk\",\n \".ipdb\",\n \".iobj\",\n \".aps\",\n ]\n\n ext_dir = [\n \"ipch\",\n\n ]\n if os.path.exists(root_path_):\n for root, dirs, files in os.walk(root_path_, topdown=True):\n for file in files:\n filename = os.path.join(root, file)\n delete_file(filename, ext_file)\n \n for dir in dirs:\n dir_path = os.path.join(root, dir)\n if dir.lower() in ext_dir:\n print(dir_path);\n shutil.rmtree(dir_path)\n\n for a_dir in ext_dir:\n path = os.path.join(root_path_, a_dir)\n if os.path.exists(path):\n shutil.rmtree(path)",
"def remove_unused_files(self):\n\n response_list = self.client.api_call(\n f'files.list?'\n f'count=1000&'\n )\n assert response_list['ok']\n\n for file in [\n f for f in response_list['files']\n if not f['channels'] and not f['groups'] and not f['ims']\n ]:\n response_delete = self.client.api_call(\n f'files.delete?'\n f'file={file[\"id\"]}'\n )\n assert response_delete['ok']",
"def clean_data():\n for clean_file in clean_files:\n file_list = [f for f in os.listdir(\".\") if f.endswith(clean_file)]\n for f in file_list:\n os.remove(f)",
"def clean():\n for dirpath, dirnames, filenames in os.walk('.'):\n for filename in filenames:\n if filename.endswith('.pyc') or filename.endswith('.pyo'):\n full_pathname = os.path.join(dirpath, filename)\n click.echo('Removing {}'.format(full_pathname))\n os.remove(full_pathname)",
"def process_delete_mp3_output_files(stand_alone_flag):\n\n if stand_alone_flag == 1:\n print(\"Deleting mp3 and output file. Value of stand_alone_flag : \", str(stand_alone_flag))\n mp3_files = glob.glob('*.mp3')\n output_files = glob.glob('*_Output.txt')\n for files in mp3_files:\n try:\n os.remove(files)\n except OSError:\n print(\"Cannot able delete the old mp3 files.\")\n\n for files in output_files:\n try:\n os.remove(files)\n except OSError:\n print(\"Cannot able delete the old output text files.\")",
"def clean():\n for f in [f for f in os.listdir() if f.endswith(\".part\")]:\n os.remove(f)",
"def cleanup(folder):\n os.system('rm -rf %s/*' % folder)",
"def clean_files(ftype, remove=False):\n import os\n files = os.listdir()\n found_files = [f for f in files if ftype in f]\n if remove:\n for ff in found_files:\n os.remove(ff)\n print(\"Removed {}\".format(ff))\n else:\n return found_files",
"def clean_retrosheet_files(self):\n # Get zipped and unzipped folder names\n zippedFileFolder = Filepath.get_retrosheet_folder(folder='zipped')\n unzippedFileFolder = Filepath.get_retrosheet_folder(folder='unzipped')\n\n # Clean out all files in both folders\n for folder in (zippedFileFolder, unzippedFileFolder):\n os.chdir(folder)\n for file in os.listdir(os.getcwd()): \n if os.path.isdir(file): \n shutil.rmtree(file)\n else: \n os.remove(file)",
"def _rm_glob(self, path):\r\n for path in glob.glob(path):\r\n shutil.rmtree(path)",
"def remove_files(files):\n for file in files:\n if os.path.exists(file):\n if file.startswith(\"./\") or file.startswith(\".\\\\\"):\n file = file[2:]\n if os.path.isdir(file):\n rmtree(file)\n else:\n os.unlink(file)",
"def cleanFiles(a_file_list):\n for entry in a_file_list:\n cmd = 'sudo rm ' + entry\n os.system(cmd)",
"def remove_temp_folders(self) -> None:\n if self.args.deletefolders:\n time.sleep(2)\n for f in self.created_folders:\n shutil.rmtree(path.join(self.args.output, f))\n print(f\"{self.args.output}/{f} was deleted\")",
"def _clear_audio_files(self):\n try:\n shutil.rmtree(self.audio_file_folder)\n except:\n print('Failure to clear audio files in {self.audio_file_folder}')",
"def clean_list(path):\n # Remove directories \n clean_file_list = [f for f in os.listdir(path)\n if os.path.isfile(os.path.join(path, f))]\n\n # List files to ignore\n bad_files = ['desktop.ini',\n os.path.basename(__file__)]\n # TODO: Ignore hidden files & self when compiled\n\n # Loop through bad files and remove from list\n for found_file in bad_files:\n if found_file in clean_file_list:\n clean_file_list.remove(found_file)\n return clean_file_list",
"def clean():\n clean_files()",
"def clean_filesystem(files=[]):\n remove_files(files + find_cache_files())"
] | [
"0.68569785",
"0.6794698",
"0.6593606",
"0.64101917",
"0.6376012",
"0.630781",
"0.6273378",
"0.6262631",
"0.6248296",
"0.62475854",
"0.6217406",
"0.61763805",
"0.6172198",
"0.61609787",
"0.6159008",
"0.615758",
"0.6146862",
"0.6136629",
"0.6123533",
"0.6111762",
"0.6106777",
"0.6099111",
"0.6096412",
"0.60846704",
"0.6051706",
"0.604957",
"0.60409963",
"0.60328585",
"0.5998901",
"0.5980306"
] | 0.68708473 | 0 |
guess the type of a value x type none if True and all values are empty, return None type if an integer starts with a zero, then it is a string | def guess_type_value (x, none = None) :
try :
int (x)
if x [0] == '0' and len (x) > 1 : return str
else : return int if len (x) < 9 else str
except :
try :
x = float (x)
return float
except :
if none :
if x is None : return None
try :
if len (x) > 0 : return str
else : return None
except :
return None
else :
return str | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def guess_type_value_type (none = True) :\n return [ None, str, int, float ] if none else [ str, int, float ]",
"def get_default_value_type (ty, none = True) :\n if ty is None and none : return None\n elif ty == str : return \"\"\n elif ty == int : return 0\n elif ty == decimal.Decimal : return decimal.Decimal(0)\n elif ty == float : return 0.0\n else :\n raise PQHException (\"type expected in \" + str (guess_type_value_type ()))",
"def convert_or_none(value, type_):\n try:\n return type_(value)\n except Exception:\n return None",
"def data_type(value):\n if type(value) == type(None):\n return 'no value'\n elif type(value) == list:\n if len(value) >= 3:\n return value[2]\n else:\n return None\n elif type(value) == bool:\n return value\n elif type(value) == int:\n if value < 100:\n return 'less than 100'\n elif value > 100:\n return 'more than 100'\n else:\n return 'equal to 100'\n elif type(value) == str:\n return len(value)\n else:\n return value",
"def _proper_type_return(val):\n if len(val) == 0:\n return None\n elif len(val) == 1:\n return list(val.values())[0]\n else:\n return val",
"def infer_value_type(self, value):\n if isinstance(value, str):\n if self.TIMESTAMP_MATCHER.match(value):\n return 'TIMESTAMP'\n elif self.DATE_MATCHER.match(value):\n return 'DATE'\n elif self.TIME_MATCHER.match(value):\n return 'TIME'\n elif not self.quoted_values_are_strings:\n # Implement the same type inference algorithm as 'bq load' for\n # quoted values that look like ints, floats or bools.\n if self.INTEGER_MATCHER.match(value):\n if (int(value) < self.INTEGER_MIN_VALUE\n or self.INTEGER_MAX_VALUE < int(value)):\n return 'QFLOAT' # quoted float\n else:\n return 'QINTEGER' # quoted integer\n elif self.FLOAT_MATCHER.match(value):\n return 'QFLOAT' # quoted float\n elif value.lower() in ['true', 'false']:\n return 'QBOOLEAN' # quoted boolean\n else:\n return 'STRING'\n else:\n return 'STRING'\n # Python 'bool' is a subclass of 'int' so we must check it first\n elif isinstance(value, bool):\n return 'BOOLEAN'\n elif isinstance(value, int):\n if value < self.INTEGER_MIN_VALUE or self.INTEGER_MAX_VALUE < value:\n return 'FLOAT'\n else:\n return 'INTEGER'\n elif isinstance(value, float):\n return 'FLOAT'\n elif value is None:\n return '__null__'\n elif isinstance(value, dict):\n if value:\n return 'RECORD'\n else:\n return '__empty_record__'\n elif isinstance(value, list):\n if value:\n return '__array__'\n else:\n return '__empty_array__'\n else:\n raise Exception(\n f'Unsupported node type: {type(value)} (should not happen)'\n )",
"def _get_value_type(self, value):\n\n value = value.strip()\n\n if value == 'True':\n return True\n elif value == 'False':\n return False\n else:\n try:\n return_value = int(value)\n except ValueError:\n try:\n return_value = float(value)\n except ValueError:\n return value\n\n return return_value",
"def resolve_type(value: t.Any) -> t.Any:\n value = str(value).strip()\n if value.lower() == \"true\":\n return True\n elif value.lower() == \"false\":\n return False\n elif value.lower() == \"none\":\n return None\n else:\n # attempt to cast\n try:\n return int(value)\n except:\n pass\n try:\n return float(value)\n except:\n pass\n # attempt to parse\n try:\n return literal_eval(value)\n except ValueError:\n pass\n except SyntaxError: # happens with single topics starting with '/'\n pass\n # unparseable, return as str\n return value",
"def try_int_or_float(value: Any) -> Union[int, float, None]:\n return try_int(str(value)) or try_float(str(value))",
"def smart_coerce(value: str) -> ValueType:\n try:\n return int(value)\n except ValueError:\n pass\n try:\n return float(value)\n except ValueError:\n pass\n if value.lower() in ('null', 'none', ):\n return None\n elif value.lower() in ('true', ):\n return True\n elif value.lower() in ('false', ):\n return False\n else:\n return value",
"def noneType(value):\r\n return ''",
"def NoneOrType(type_):\n def coercer(value):\n if value is None:\n return value\n else:\n return type_(value)\n return coercer",
"def get_type(value):\n\n if isinstance(value, str) or value is None:\n return Type.STRING\n elif isinstance(value, bool):\n return Type.BOOLEAN\n elif isinstance(value, (int, float)):\n return Type.NUMBER\n\n raise exceptions.Error(\"Value of unknown type: {value}\".format(value=value))",
"def _deduceValueType(value):\n\n\tif value.lower() == 'null':\n\t\treturn None\n\n\tif value.startswith(\"0x\"):\n\t\treturn bytes.fromhex(value[2:])\n\n\t# If value can be an int, float() will not raise\n\t# exception too\n\tresult = value\n\ttry:\n\t\tresult = float(value)\n\t\tresult = int(value)\n\texcept:\n\t\tpass\n\n\treturn result",
"def test_string_or_number():\n assert is_string_or_number(None) is None\n assert is_string_or_number(1) is None\n assert is_string_or_number(1.1) is None\n assert is_string_or_number('1.1') is None\n assert is_string_or_number([])",
"def GuessDataType(value, column_id=None):\n stripped_value = value.strip().replace('\"', '')\n\n if re.search('^-?[0-9]+$', stripped_value):\n if column_id == 'year':\n return 'date'\n else:\n return 'integer'\n elif re.search('^-?[0-9]+\\.[0-9]+$', stripped_value):\n return 'float'\n elif re.search('^[0-9]+(/|-)[0-9]+((/|-)[0-9]+){0,1}$', stripped_value):\n return 'date'\n else:\n return 'string'",
"def infer_type_and_convert(value:str) -> Any:\n if value.lower() == 'true':\n return True\n elif value.lower() == 'false':\n return False\n elif value.isdigit():\n return int(value)\n elif is_float(value):\n return float(value)\n else:\n return value",
"def check_type(val):\n\n try:\n a = float(val)\n return type(a)\n except ValueError:\n pass\n\n try:\n a = int(val)\n return type(a)\n except ValueError:\n pass\n\n try:\n a = dt.datetime.strptime(val, '%Y-%m-%dT%H:%M:%SZ')\n return type(a)\n except ValueError:\n pass\n\n return type(val)",
"def value_or_none(value):\n if value or value == 0:\n return value\n return None",
"def type_(type_):\n\n try:\n type_ = float(type_)\n if type_.is_integer():\n return int\n if not type_.is_integer():\n return float\n except ValueError:\n return str",
"def __type_checker(value: object) -> str:\n if type(value) == str:\n return value\n if type(value) == list:\n return value[0]",
"def data_type(arg):\n if isinstance(arg, str):\n return len(arg)\n elif isinstance(arg, bool):\n return arg\n elif isinstance(arg, list):\n return arg[2] if len(arg) >= 3 else None\n elif isinstance(arg, int):\n if arg == 100:\n return \"equal to 100\"\n else:\n if arg < 100:\n return \"less than 100\"\n else:\n return \"greater than 100\"\n else:\n return \"no value\"",
"def process_value(self, value: str) -> Any:\n\n if not value:\n if self.data_required:\n raise ValueError('A value is required')\n return None\n return self.data_type(value)",
"def valueOrDefault(x):\n if isNumber(x):\n return x\n else:\n return x.valueOrDefault()",
"def get_type_string(data):\r\n data_type = type(data)\r\n\r\n if data_type in (int, long):\r\n return 'integer'\r\n elif data_type == float:\r\n return 'float'\r\n elif data_type == bool:\r\n return 'boolean'\r\n elif data_type in (list, tuple):\r\n return 'list'\r\n elif data_type == dict:\r\n return 'hash'\r\n elif data is None:\r\n return 'null'\r\n elif isinstance(data, basestring):\r\n return 'string'",
"def get_default_value_of_type(self, primitive_type):\n if primitive_type == primitives.FUZZABLE_STRING:\n return 'fuzzstring'\n elif primitive_type == primitives.FUZZABLE_INT:\n return '0'\n elif primitive_type == primitives.FUZZABLE_BOOL:\n return 'false'\n elif primitive_type == primitives.FUZZABLE_OBJECT:\n return '{ \"fuzz\" : false }'\n else:\n logger.raw_network_logging(f'Unknown type {primitive_type} for default')\n return 'null'",
"def guessDataType(value):\n try: # see if the element is a float()\n if \".\" in value: # if no decimal point, prefer to save as a int.\n return(float(value))\n else:\n raise ValueError\n except ValueError:\n try: # see if it's actually an int?\n return(int(value))\n except ValueError:\n try: # see if I can cooerce it into a location:\n return(location(loc=value))\n except (TypeError, IndexError, AttributeError, AssertionError, ValueError): # this is not working, just store it as a string\n return(str(value))",
"def valueOrDefault(x):\n\tif isNumber(x): return x\n\telse: return x.valueOrDefault()",
"def null_value_handler(datatype, value, null_format):\n if DataType.Name(datatype) == \"STRING\":\n if NullValues.STRING == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DOUBLE\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"FLOAT\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT32\":\n if NullValues.INT32 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT64\":\n if NullValues.INT64 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DURATION\":\n if NullValues.DURATION.equals(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"TIMESTAMP\":\n if NullValues.TIMESTAMP.equals(value):\n return null_format\n return value\n else:\n return value",
"def null_value_handler(datatype, value, null_format):\n if DataType.Name(datatype) == \"STRING\":\n if NullValues.STRING == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DOUBLE\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"FLOAT\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT32\":\n if NullValues.INT32 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT64\":\n if NullValues.INT64 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DURATION\":\n if NullValues.DURATION.equals(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"TIMESTAMP\":\n if NullValues.TIMESTAMP.equals(value):\n return null_format\n return value\n else:\n return value"
] | [
"0.7983696",
"0.7474755",
"0.723659",
"0.71754414",
"0.6950673",
"0.69060194",
"0.6875436",
"0.68705696",
"0.6865456",
"0.6744836",
"0.6703542",
"0.66924196",
"0.6653156",
"0.6632004",
"0.6625986",
"0.66196996",
"0.6605846",
"0.65747064",
"0.6553645",
"0.6553337",
"0.65027356",
"0.6497354",
"0.64488715",
"0.64241827",
"0.6406543",
"0.6363119",
"0.6348604",
"0.6342304",
"0.63385403",
"0.63385403"
] | 0.87734115 | 0 |
none if True and all values are empty, return None the list of types recognized by guess_type_value | def guess_type_value_type (none = True) :
return [ None, str, int, float ] if none else [ str, int, float ] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _get_datatypes(input_dict):\n return set(filter(None.__ne__, set(input_dict.keys())))",
"def _proper_type_return(val):\n if len(val) == 0:\n return None\n elif len(val) == 1:\n return list(val.values())[0]\n else:\n return val",
"def _get_type_to_one_of():\n\n return {\n 'primitive': Settings._is_in_prim,\n 'list': Settings._is_sublist_in_one_of_lists,\n 'dict': Settings._is_dict_in_one_of_dicts\n }",
"def _compute_out_types(fields, type_list):\n if fields == 'all':\n return type_list\n return [type_list[i] for i in fields]",
"def get_check_types():",
"def get_object_types(default_val=False):\n result = []\n if g.user:\n if default_val:\n result.append((\"1\", \"Please Select\"))\n\n for name, data in entity.Entity.__dict__.items():\n if not isinstance(data, str):\n continue\n if \"__\" in name[:2]:\n continue\n result.append((name, name))\n\n result.sort()\n\n return result",
"def noneType(value):\r\n return ''",
"def ntypes(self): # -> list[None]:\n ...",
"def etypes(self): # -> list[None]:\n ...",
"def get_types(self) -> ColumnTypes:\n if self._types is None:\n return {}\n return {\n key: Types.STRING if value == Types.NONE else value\n for key, value in self._types.items()\n }",
"def getTypesList():\n return Gw2Spidy._request('types')['results']",
"def values(self):\n return [entry.value for entry in self.table if entry.value is not None]",
"def _or_types(field):\n return '|'.join(field.get('type', {}).get('names', []))",
"def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetBulletNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")",
"def get_valid_mi_types():\n valid_types = []\n for ltype in ww.type_system.registered_types:\n if \"category\" in ltype.standard_tags:\n valid_types.append(ltype)\n elif \"numeric\" in ltype.standard_tags:\n valid_types.append(ltype)\n elif (\n ltype == ww.logical_types.Datetime\n or ltype == ww.logical_types.Boolean\n or ltype == ww.logical_types.BooleanNullable\n ):\n valid_types.append(ltype)\n\n return valid_types",
"def getImmediatelyAddableTypes(self, context=None):\n return self.getLocallyAllowedTypes()",
"def availableValues(self):\n return [x.name for x in self._field.enum_type.values]",
"def parse_if_substitutions(\n value: StrSomeValueType\n) -> NormalizedValueType:\n data_types = set()\n\n def _parse_if(value):\n if isinstance(value, str):\n output = parse_substitution(value)\n if len(output) == 1 and isinstance(output[0], TextSubstitution):\n data_types.add(str)\n return output[0].text\n return output\n data_types.add(type(value))\n return value\n if isinstance(value, list):\n output = [_parse_if(x) for x in value]\n else:\n output = _parse_if(value)\n if len(data_types) > 1:\n raise ValueError('The result is a non-uniform list')\n return output",
"def value_type_check(table_rows):\n types = table_rows[0].types\n rst = True\n lst = []\n row_num = 0\n for row in table_rows:\n for i in range(0, len(row.values)):\n data_type = types[i].strip().upper()\n value = row.values[i].strip()\n if(data_type == \"INT\"):\n if(value != \"\" and _is_int(value) == False):\n rst = False\n lst.append(\"(col:{0},row:{1},value:{2})\".format(\n i, row_num, row.values[i]\n ))\n\n elif(data_type == \"FLOAT\"):\n if(value != \"\" and _is_float(value) == False):\n rst = False\n lst.append(\"(col:{0},row:{1},value:{2})\".format(\n i, row_num, row.values[i]\n ))\n row_num += 1\n return rst,\",\".join(lst)",
"def get_types(self) :\n\n return list(self.types)[1:]",
"def get_string_values(param_values, full_signature):\n strings, complex_types = [], []\n for i, value in enumerate(param_values):\n # If value is string, append value to strings List\n if COMPLEX_TYPES[\"STRING\"] in full_signature[i]:\n strings.append(value)\n\n # Else if unknown type, append to complex types List\n elif (\n not any(\n full_signature[i].startswith(val)\n for val in COMPLEX_TYPES.values()\n )\n and full_signature[i] not in SIMPLE_TYPES.keys()\n ):\n complex_types.append(full_signature[i])\n strings.append(value)\n\n return strings, list(set(complex_types))",
"def get_fuzzable_values(self, tag, primitive_type):\n # initialize\n fuzzable_values = []\n\n # add examples values\n if self.use_examples and self.get_examples_values:\n fuzzable_values += self.get_examples_values(tag)\n\n # add response values\n if self.use_response and self.get_response_values:\n fuzzable_values += self.get_response_values(tag)\n\n # add wordbook values\n if self.use_wordbook and self.get_wordbook_values:\n fuzzable_values += self.get_wordbook_values(primitive_type)\n\n # add the default value\n if self.use_embedded:\n fuzzable_values += [\n self.get_default_value_of_type(primitive_type)]\n\n return fuzzable_values",
"def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetPyNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")",
"def get_default_value_type (ty, none = True) :\n if ty is None and none : return None\n elif ty == str : return \"\"\n elif ty == int : return 0\n elif ty == decimal.Decimal : return decimal.Decimal(0)\n elif ty == float : return 0.0\n else :\n raise PQHException (\"type expected in \" + str (guess_type_value_type ()))",
"def initTypes(self):\n self.types = [ty.NoneType]*self.numcols()\n for k,row in enumerate(self.data):\n for i in range(self.numcols()):\n val = row[i]\n typ = self.types[i]\n if not val is None:\n if typ in [ty.NoneType,ty.IntType]:\n if val.isdigit():\n row[i] = int(val)\n if val.startswith('-') and val[1:].isdigit():\n row[i] = -int(val[1:])\n self.types[i] = ty.IntType\n continue\n if typ in [ty.NoneType,ty.IntType,ty.FloatType]:\n try:\n row[i] = float(val)\n if not typ == ty.FloatType:\n self.types[i] = ty.FloatType\n # Convert already existing values\n for j in range(k):\n elt = self.data[j][i]\n self.data[j][i] = None if elt is None else float(elt)\n continue\n except ValueError:\n pass\n if typ in [ty.NoneType,utils.Date]:\n try:\n row[i] = utils.Date(val)\n self.types[i] = utils.Date\n continue\n except ValueError:\n pass\n row[i] = unicode(val)\n if not typ == ty.UnicodeType:\n self.types[i] = ty.UnicodeType\n # Convert already existing values\n for j in range(k):\n elt = self.data[j][i]\n self.data[j][i] = None if elt is None else unicode(elt)",
"def missing_types():\n\n return ...",
"def _variable_single_types(self):\n return [\n 'Binary',\n 'KeyValue',\n 'String',\n 'TCEntity',\n 'TCEnhancedEntity',\n ]",
"def convert_or_none(value, type_):\n try:\n return type_(value)\n except Exception:\n return None",
"def _check_types(variables):\n for var in variables:\n if not isinstance(var, (str, type(None))):\n raise ValueError(\"You supplied a value of type %s, where a \"\n \"string or None was expected.\" % type(var))",
"def null_values(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['OneDashboardPageWidgetJsonNullValueArgs']]]]:\n return pulumi.get(self, \"null_values\")"
] | [
"0.6287529",
"0.6126239",
"0.58844453",
"0.57816195",
"0.57365364",
"0.57071155",
"0.5659363",
"0.5649177",
"0.5534212",
"0.55004627",
"0.54905343",
"0.54832405",
"0.54824364",
"0.5471524",
"0.5426873",
"0.5413267",
"0.54029876",
"0.5387826",
"0.53806627",
"0.53801656",
"0.5379198",
"0.53785026",
"0.53500783",
"0.5349826",
"0.53399473",
"0.5307495",
"0.53047335",
"0.5297348",
"0.5288031",
"0.52850974"
] | 0.812747 | 0 |
ty type in guess_type_value_type none if True and all values are empty, return None a default value for this type | def get_default_value_type (ty, none = True) :
if ty is None and none : return None
elif ty == str : return ""
elif ty == int : return 0
elif ty == decimal.Decimal : return decimal.Decimal(0)
elif ty == float : return 0.0
else :
raise PQHException ("type expected in " + str (guess_type_value_type ())) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def guess_type_value_type (none = True) :\n return [ None, str, int, float ] if none else [ str, int, float ]",
"def _get_default_value(type_name, is_simple, is_iterative, is_required):\n # Iterables: convert via pre-defined mappings.\n if is_iterative:\n if is_required:\n return _get_iterative_default_value()\n else:\n return _get_iterative_null_value()\n # Simple types: convert via pre-defined mappings.\n elif is_simple:\n if is_required:\n return _get_simple_default_value(type_name)\n else:\n return _get_simple_null_value(type_name)\n # Complex types: convert via pre-defined mappings.\n else:\n if is_required:\n return _get_complex_default_value(type_name)\n else:\n return _get_complex_null_value(type_name)",
"def convert_or_none(value, type_):\n try:\n return type_(value)\n except Exception:\n return None",
"def get_default_value_of_type(self, primitive_type):\n if primitive_type == primitives.FUZZABLE_STRING:\n return 'fuzzstring'\n elif primitive_type == primitives.FUZZABLE_INT:\n return '0'\n elif primitive_type == primitives.FUZZABLE_BOOL:\n return 'false'\n elif primitive_type == primitives.FUZZABLE_OBJECT:\n return '{ \"fuzz\" : false }'\n else:\n logger.raw_network_logging(f'Unknown type {primitive_type} for default')\n return 'null'",
"def NoneOrType(type_):\n def coercer(value):\n if value is None:\n return value\n else:\n return type_(value)\n return coercer",
"def _proper_type_return(val):\n if len(val) == 0:\n return None\n elif len(val) == 1:\n return list(val.values())[0]\n else:\n return val",
"def if_none(value: Any, default: Any):\n return value if value is not None else default",
"def _infer_default_value_type(default_value):\n if default_value is Missing:\n return DefaultValue.missing\n elif default_value is Self:\n return DefaultValue.object\n elif isinstance(default_value, TraitListObject):\n return DefaultValue.trait_list_object\n elif isinstance(default_value, TraitDictObject):\n return DefaultValue.trait_dict_object\n elif isinstance(default_value, TraitSetObject):\n return DefaultValue.trait_set_object\n elif isinstance(default_value, list):\n return DefaultValue.list_copy\n elif isinstance(default_value, dict):\n return DefaultValue.dict_copy\n else:\n return DefaultValue.constant",
"def guess_type_value (x, none = None) :\n try :\n int (x)\n if x [0] == '0' and len (x) > 1 : return str\n else : return int if len (x) < 9 else str\n except :\n try :\n x = float (x)\n return float\n except :\n if none :\n if x is None : return None\n try :\n if len (x) > 0 : return str\n else : return None\n except :\n return None\n else :\n return str",
"def noneType(value):\r\n return ''",
"def get_default_value(self, tag, primitive_type, hint=None):\n # initialize\n default_value = self.get_default_value_of_type(primitive_type)\n\n # use example value as default (if exist)\n if self.use_examples_for_default and self.get_examples_values:\n examples_values = self.get_examples_values(tag)\n if examples_values:\n default_value = list(examples_values)[0]\n\n # use response value as default (if exist)\n if self.use_response_for_default and self.get_response_values:\n response_values = self.get_response_values(tag, hint)\n if response_values:\n default_value = response_values[0]\n\n return default_value",
"def empty_value(self, context):\n if self._default is NoValueSet:\n return None\n return self._default",
"def _default_value(self):\n return None",
"def _defaulted(cls, value, default):\n return default if value is None else value",
"def get_default_value(self):\n dv = self.default_value\n dvt = self.default_value_type\n if dvt < 0:\n dvt = _infer_default_value_type(dv)\n self.default_value_type = dvt\n\n return (dvt, dv)",
"def _default_field_value(field):\n return field.default or ([field.value_cls()] if field.is_list else field.value_cls())",
"def _maybe_use_hardcoded_type(self, value, name):\n if value is not UNRESOLVED_VALUE and not isinstance(value, MultiValuedValue):\n return value\n\n try:\n typ = self.config.NAMES_OF_KNOWN_TYPE[name]\n except KeyError:\n return value\n else:\n return TypedValue(typ)",
"def value_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"value_type\")",
"def null_value_handler(datatype, value, null_format):\n if DataType.Name(datatype) == \"STRING\":\n if NullValues.STRING == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DOUBLE\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"FLOAT\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT32\":\n if NullValues.INT32 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT64\":\n if NullValues.INT64 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DURATION\":\n if NullValues.DURATION.equals(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"TIMESTAMP\":\n if NullValues.TIMESTAMP.equals(value):\n return null_format\n return value\n else:\n return value",
"def null_value_handler(datatype, value, null_format):\n if DataType.Name(datatype) == \"STRING\":\n if NullValues.STRING == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DOUBLE\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"FLOAT\":\n if math.isnan(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT32\":\n if NullValues.INT32 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"INT64\":\n if NullValues.INT64 == value:\n return null_format\n return value\n elif DataType.Name(datatype) == \"DURATION\":\n if NullValues.DURATION.equals(value):\n return null_format\n return value\n elif DataType.Name(datatype) == \"TIMESTAMP\":\n if NullValues.TIMESTAMP.equals(value):\n return null_format\n return value\n else:\n return value",
"def _get_default_column_value(column_type):\n type_schema = {\n 'datetime': None,\n 'big_integer': 0,\n 'integer': 0,\n 'string': ''\n }\n\n if isinstance(column_type, sa_sql.type_api.Variant):\n return _get_default_column_value(column_type.impl)\n\n return type_schema[column_type.__visit_name__]",
"def value_or_none(value):\n if value or value == 0:\n return value\n return None",
"def _getDefaultValue(self):\n value = self._getDefaultValue()\n return value.getData() if value else None",
"def valueOrDefault(x):\n\tif isNumber(x): return x\n\telse: return x.valueOrDefault()",
"def default_value_scalar(source=None):\n if not default:\n return None\n if not source:\n return default\n else:\n return source",
"def infer_value_type(self, value):\n if isinstance(value, str):\n if self.TIMESTAMP_MATCHER.match(value):\n return 'TIMESTAMP'\n elif self.DATE_MATCHER.match(value):\n return 'DATE'\n elif self.TIME_MATCHER.match(value):\n return 'TIME'\n elif not self.quoted_values_are_strings:\n # Implement the same type inference algorithm as 'bq load' for\n # quoted values that look like ints, floats or bools.\n if self.INTEGER_MATCHER.match(value):\n if (int(value) < self.INTEGER_MIN_VALUE\n or self.INTEGER_MAX_VALUE < int(value)):\n return 'QFLOAT' # quoted float\n else:\n return 'QINTEGER' # quoted integer\n elif self.FLOAT_MATCHER.match(value):\n return 'QFLOAT' # quoted float\n elif value.lower() in ['true', 'false']:\n return 'QBOOLEAN' # quoted boolean\n else:\n return 'STRING'\n else:\n return 'STRING'\n # Python 'bool' is a subclass of 'int' so we must check it first\n elif isinstance(value, bool):\n return 'BOOLEAN'\n elif isinstance(value, int):\n if value < self.INTEGER_MIN_VALUE or self.INTEGER_MAX_VALUE < value:\n return 'FLOAT'\n else:\n return 'INTEGER'\n elif isinstance(value, float):\n return 'FLOAT'\n elif value is None:\n return '__null__'\n elif isinstance(value, dict):\n if value:\n return 'RECORD'\n else:\n return '__empty_record__'\n elif isinstance(value, list):\n if value:\n return '__array__'\n else:\n return '__empty_array__'\n else:\n raise Exception(\n f'Unsupported node type: {type(value)} (should not happen)'\n )",
"def valueOrDefault(x):\n if isNumber(x):\n return x\n else:\n return x.valueOrDefault()",
"def getDefaultValue(self) -> Optional[int]:\n try:\n return int(self.placeholderText())\n except ValueError:\n return None",
"def _get_scalar_default_value(dtype, default_value):\n if dtype == tf.string:\n return default_value or \"\"\n elif default_value is None:\n return 0\n elif isinstance(default_value, int) or isinstance(default_value, float):\n return default_value\n elif (isinstance(default_value, list) or\n isinstance(default_value, tuple)) and len(default_value) == 1:\n return default_value[0]\n else:\n raise ValueError(\"Only scalar or equivalent is allowed in default_value.\")",
"def parse_defaults(self, stmt):\r\n spec_type = stmt['spec_type']\r\n if spec_type in self._defaults:\r\n raise ValueError('More than one default for {}'.format(stmt['spec_type']))\r\n self._defaults[spec_type] = Default(spec_type, stmt)\r\n return None"
] | [
"0.76320404",
"0.7274249",
"0.72691476",
"0.69537956",
"0.6808499",
"0.6762997",
"0.6752358",
"0.67162085",
"0.6660357",
"0.6356065",
"0.6338377",
"0.628572",
"0.6268206",
"0.61693776",
"0.6162946",
"0.61595625",
"0.6150493",
"0.6132494",
"0.60981387",
"0.60981387",
"0.60578257",
"0.60542774",
"0.6030484",
"0.59175545",
"0.5898351",
"0.5891468",
"0.5859868",
"0.585513",
"0.58466923",
"0.5791176"
] | 0.7841762 | 0 |
guess the type of a list l list tolerance let's denote m as the frequency of the most representative type, and m2 the second one, if m2 > m tolerance > str none if True and all values are empty, return None type, length (order of preference (int, float, str)) the parameter length has a meaning only for str result | def guess_type_list (l, tolerance = 0.01, none = True) :
defa = None if none else str
length = 0
if l in [str, float, int, None, decimal.Decimal] :
raise PQHException ("this case is unexpected %s" % str (l))
if len (l) == 0 :
res = defa
elif len (l) == 1 :
res = guess_type_value (l[0], none)
if res == str :
length = len (l [0])
else :
count = { }
for x in l :
t = guess_type_value (x, none)
length = max (length, len (x))
if t in count : count [t] += 1
else : count [t] = 1
val = [ (v,k) for k,v in count.items() ]
val.sort (reverse = True)
if len (val) == 1 :
res = val [0][1]
elif val [0][0] * tolerance < val [1][0] :
res = str
else :
res = val [0][1]
if res != str :
olength = 0
else :
if length > 0 :
x = math.log (length) / math.log (2) + 0.99999
x = int (x)
olength = math.exp ( x * math.log (2)) + 0.9999
olength = int (olength)*2
else :
olength = length
return res, olength | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_list_of_equal_len():\n\n @type_checked\n def _run_test(something:[str, int, bool]):\n assert isinstance(something[0], str)\n assert isinstance(something[1], int)\n assert isinstance(something[2], bool)\n\n _run_test(something=[None, \"12\", 1])",
"def lungime_multimi(multime_1, multime_2):\n if len(multime_1) < len(multime_2):\n return len(multime_1)\n else:\n return len(multime_2)",
"def verifyLengths( options, data ):\n types = [ 'maf', 'maf1e2', 'maf1e3', 'maf1e4',\n 'maf1e5', 'maf1e6', 'maf1e7', 'mafCpl1e2', \n 'mafCpl1e3', 'mafCpl1e4', 'mafCpl1e5', \n 'mafCpl1e6', 'mafCpl1e7', 'mafCtg1e2', \n 'mafCtg1e3', 'mafCtg1e4', 'mafCtg1e5', \n 'mafCtg1e6', 'mafCtg1e7', 'mafSpl1e2', \n 'mafSpl1e3', 'mafSpl1e4', 'mafSpl1e5', \n 'mafSpl1e6', 'mafSpl1e7', 'xAxis',\n 'mafCpEdgeCount', 'mafCpErrorCount', \n 'mafCpScafGapCount', 'blockEdgeCount' ]\n if len( data.chrNames ) != len( data.mafWigDict ): \n sys.stderr.write('the expected length of the data wig '\n 'dictionary is %d (i.e. number of chromosomes), but actual is %d\\n' \n % ( len( data.chrNames ), len( data.mafWigDict )))\n sys.exit( 1 )\n for c in data.chrNames:\n if len( types ) + 5 != len( data.mafWigDict[c] ): # extra 5 are from the *Max records\n sys.stderr.write('the expected length of the data wig '\n 'dictionary for %s is %d, but actual is %d\\n' \n % ( c, len( types ) + 5, len( data.mafWigDict[c] )))\n sys.stderr.write( '%s\\n' % str( data.mafWigDict[ c ].keys() ))\n sys.exit( 1 )\n sys.stderr.write('Verify number of records in data structure = %d, OK.\\n' % (len(types) + 4))\n for c in data.chrNames:\n for i in xrange(0, len( types ) - 1):\n if len( data.mafWigDict[c][ types[i] ] ) != len( data.mafWigDict[c][ types[i+1] ]):\n sys.stderr.write('the lengths of all vectors must the '\n 'same for a given chromosome. %s, %s (%d) != %s (%d)\\n' \n % ( c, types[i], len(data.mafWigDict[c][types[i]]), \n types[i+1], len(data.mafWigDict[c][types[i+1]]) ))\n sys.exit( 1 )\n sys.stderr.write('Verify length of records in data structure for chr %s are all %d, OK.\\n' \n % ( c, len(data.mafWigDict[c][ types[0] ])))\n sys.stderr.write('Verify lengths of arrays inside data structure, OK.\\n')",
"def _verify_data_types(self, min_elements=5) -> None:\n\t\t# Cancel if typestring ends with ellipsis, already run\n\t\tif self._data_types[-1] == \"...\":\n\t\t\treturn\n\n\t\t# Check special cases for the _data_types list\n\t\tdata_len = len(self._data_types)\n\t\t# Check if both floats and ints present\n\t\tif \"Int\" in self._data_types and \"Float\" in self._data_types:\n\t\t\t# Replace all \"Int\" types with \"Float\"\n\t\t\tfor index in range(data_len):\n\t\t\t\tif self._data_types[index] == \"Int\":\n\t\t\t\t\tself._data_types[index] = \"Float\"\n\n\t\t# Check if list at least min_elements and repeats, shorten with ellipsis if so.\n\t\tif data_len >= min_elements:\n\t\t\t# Check if sequence repeats, up to 2 elemtents for repeat check.\n\t\t\trepeat_num = len(set(self._data_types))\n\t\t\t# Get the sequence to check for repeat checks\n\t\t\tif (data_len / 2) >= repeat_num and not data_len % repeat_num:\n\t\t\t\trepeat_item = self._data_types[:repeat_num]\n\t\t\t\trepeat_list = []\n\t\t\t\t# Repeat the character sequence to match the length of the current list\n\t\t\t\tfor i in range(int(data_len / repeat_num)):\n\t\t\t\t\trepeat_list.extend(repeat_item)\n\t\t\t\t# Check equivilance\n\t\t\t\tif self._data_types == repeat_list:\n\t\t\t\t\t# Set to two iterations of the item, then stop\n\t\t\t\t\tself._data_types = self._data_types[:(repeat_num * 2)]\n\t\t\t\t\t# Add an ellipsis to the end\n\t\t\t\t\tself._data_types.append(\"...\")",
"def pick_length(self, ak_spec: Union[str, BKT]) -> Tuple[Optional[List[Hedron]], Optional[BKT]]:\n ...",
"def dict_judge1(_str1):\n\tglobal final_output\n\tif _str1==\"\":\n\t\treturn 'Finished.'\n\t_list0=dict_check34(_str1)\n\t#Judge1: Longest\n\t_list=[]\n\t_list1=[]\n\tfor i in range(len(_list0)):\n\t\tn=0\n\t\tfor j in range(3):\n\t\t\tn+=len(_list0[i][j])\n\t\t_list.append(n)\n\n\t_max=max(_list)\n\tfor i in range(len(_list0)):\n\t\tif _list[i]==_max:\n\t\t\twhile '' in _list0[i]:\n\t\t\t\t_list0[i].remove('')\n\t\t\tif not _list0[i] in _list1:\n\t\t\t\t_list1.append(_list0[i])\n\n\t#Judge2: Max Average Length\n\tif len(_list1)==1:\n\t\t_list2=_list1\n\telse:\n\t\t_list=[]\n\t\t_list2=[]\n\t\tfor i in range(len(_list1)):\n\t\t\tn=0\n\t\t\tfor j in range(len(_list1[i])):\n\t\t\t\tn+=len(_list1[i][j])\n\t\t\t_list.append(n/len(_list1[i]))\n\n\t\t_max=max(_list)\n\t\tfor i in range(len(_list1)):\n\t\t\tif _list[i]==_max:\n\t\t\t\t_list2.append(_list1[i])\n\n\t#Judge3: Take Variance for guarantee they're same patern\n\tif len(_list2)==1:\n\t\t_list3=_list2\n\telse:\n\t\t_list=[]\n\t\t_list3=[]\n\t\tfor i in range(len(_list2)):\n\t\t\tn=0\n\t\t\tfor j in range(len(_list2[i])):\n\t\t\t\tn+=len(_list2[i][j])**2\n\t\t\t_list.append(n/len(_list2[i]))\n\n\t\t_max=max(_list)\n\t\tfor i in range(len(_list2)):\n\t\t\tif _list[i]==_max:\n\t\t\t\t_list3.append(_list2[i])\n\n\t#Judge4: Single Word Frequency\n\tif len(_list3)==1:\n\t\t_list4=_list3\n\telse:\n\t\t_min=4\n\t\tfor i in range(len(_list3)):\n\t\t\tfor j in range(len(_list3[i])):\n\t\t\t\tif len(_list3[i][j])<_min:\n\t\t\t\t\t_min=len(_list3[i][j])\n\t\t_list=[]\n\t\t_list4=[]\n\t\tfor i in range(len(_list3)):\n\t\t\tn=0\n\t\t\tfor j in range(len(_list3[i])):\n\t\t\t\tif len(_list3[i][j])==_min:\n\t\t\t\t\tn+=_dict_ori[_list3[i][j]]\n\t\t\t_list.append(n)\n\n\t\t_max=max(_list)\n\t\tfor i in range(len(_list3)):\n\t\t\tif _list[i]==_max:\n\t\t\t\t_list4.append(_list3[i])\n\n\t#Output\n\tif len(_list4)!=1:\n\t\t_list4=_list4[0]\n\tif len(''.join(_list4[0]))==len(_str1):\n\t\tfinal_output=final_output+(' '.join(_list4[0]))\n\telse:\n\t\tfinal_output=final_output+_list4[0][0]+' '\n\t\tdict_judge1(_str1[len(_list4[0][0]):])",
"def longest_ORF_unit_tests():\n data_list = [\n [\"ATGCGAATGTAGCATCAAA\", \"ATGCTACATTCGCAT\"],\n [\"ATG\", \"ATG\"],\n [\"CAT\", \"ATG\"],\n [\"CATCAT\", \"ATGATG\"],\n [\"CATGGGCAT\", \"ATGCCCATG\"],\n [\"CATGCAGTGCACGGATGGCAT\", \"ATGCCATCCGTGCACTGCATG\"],\n# CATGCAGTGCACGGATGGCAT\n# CAT GCA GTG CAC GGA TGG CAT\n# ATG CAG TGC ACG GAT GGC AT\n# TGC AGT GCA CGG ATG GCA T\n# ATGCCATCCGTGCACTGCATG\n# ATG CCA TCC GTG CAC TGC ATG\n# TGC CAT CCG TGC ACT GCA TG\n# GCC ATC CGT GCA CTG CAT G\n [\"CATATGTAG\", \"ATG\"],\n [\"ATGGGGCATCATTAG\", \"ATGGGGCATCAT\"],\n# ATGGGGCATCATTAG\n# ATG GGG CAT CAT TAG\n# TGG GGC ATC ATT AG\n# GGG GCA TCA TTA G\n# CTAATGATGCCCCAT\n# CTA ATG ATG CCC CAT\n# TAA TGA TGC CCC AT\n# AAT GAT GCC CCA T\n ]\n for data in data_list:\n if len(data) == 2:\n print \"input: \" + str(data[0]) + \",\",\n print \"expected output: \" + str(data[1]) + \",\",\n o = longest_ORF(data[0])\n print \"actual output: \" + str(o)\n if o != data[1]:\n print \"## Test Fail Here!\"",
"def create_length_comparer_function(check_equal):\n equal = lambda x,y: len(x) == len (y)\n inequal = lambda x,y: not equal(x,y) \n return equal if check_equal else inequal",
"def check_format(score):\n try:\n for i in score:\n pass\n length = len(score)\n return length\n except TypeError:\n return 1",
"def test_within_length(self):\r\n\r\n flow1 = Flowgram(\"0 1.2 2.1 3.4 0.02 0.01 1.02 0.08\") # len 7\r\n flow2 = Flowgram('0.5 1.0 4.1 0.0 0.0 1.23 0.0 3.1') # len 10\r\n\r\n self.assertTrue(within_length(flow1, 0, 10))\r\n self.assertFalse(within_length(flow1, 10, 20))\r\n self.assertFalse(within_length(flow2, 0, 5))\r\n self.assertTrue(within_length(flow2, 5, 20))\r\n self.assertTrue(within_length(flow2, 5, 11))",
"def _common_length_of(l1, l2=None, l3=None):\n args = [];\n if l1 != None: args.append(l1)\n if l2 != None: args.append(l2)\n if l3 != None: args.append(l3)\n\n length = None\n num = 0\n for l in args:\n for i in l:\n num += 1\n length_i = len(i)\n if length!=None and length_i!=length:\n raise ValueError, \"Argument lengths differ!\"\n length = length_i\n\n return num, length",
"def num_accepts(self, max_len: int, bound: Sequence[Text] = ()) -> Tuple[int, int, int]:\n lt1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n lt2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n gt1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n gt2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq1[frozenset(self.start_nodes)] = 1\n num_accepted_le = int(self.accepts(\"\"))\n num_accepted_gt = 0\n for c in itertools.islice(itertools.chain(bound, itertools.repeat(None)), 0, max_len):\n for nodes, count in lt1.items():\n for element in self.possible_transitions(nodes):\n next_nodes = frozenset(self.next_nodes(nodes, element))\n lt2[next_nodes] += count\n for nodes, count in eq1.items():\n for element in self.possible_transitions(nodes):\n next_nodes = frozenset(self.next_nodes(nodes, element))\n if c is None or (element is not None and element > c):\n gt2[next_nodes] += count\n elif element == c:\n eq2[next_nodes] += count\n else:\n lt2[next_nodes] += count\n for nodes, count in gt1.items():\n for element in self.possible_transitions(nodes):\n next_nodes = frozenset(self.next_nodes(nodes, element))\n gt2[next_nodes] += count\n num_accepted_le += self._sum_tables(eq2)\n num_accepted_le += self._sum_tables(lt2)\n num_accepted_gt += self._sum_tables(gt2)\n if not lt2 and not eq2 and not gt2:\n break # Exit early if we know this regex cannot accept anymore strings.\n lt1, lt2 = lt2, collections.defaultdict(int)\n eq1, eq2 = eq2, collections.defaultdict(int)\n gt1, gt2 = gt2, collections.defaultdict(int)\n num_accepted_eq = int(len(bound) <= max_len and self.accepts(bound))\n return num_accepted_le - num_accepted_eq, num_accepted_eq, num_accepted_gt",
"def longest_ORF(dna):\n both_strings=find_all_ORFs_both_strands(dna)\n L=max(both_strings,key=len)\n Q=len(L)\n return Q\n\n #save out put of find all orfboth string to some variable",
"def longest_ORF_unit_tests():\n\n # YOUR IMPLEMENTATION HERE",
"def longest_ORF_unit_tests():\n\n # YOUR IMPLEMENTATION HERE",
"def test_get_length(t_list):\n if not get_length(t_list) == 10:\n raise ValueError(\"Wrong number of transactions\")",
"def length(memoryManager, paramsList):\n handleEmpty(paramsList, \"cannot get length of\")\n head = paramsList[0]\n\n if not validateList(head):\n raise Exception('Tried to get length of non-list')\n # if type(head) == float:\n # return [1.0]\n\n return [float(len(head))]",
"def find_note_type(self,words):\r\n\t\trates = []\r\n\t\tfixed_rate = {'Fixed Rate':{'distance': 5,\r\n\t\t\t\t\t\t\t\t\t'regex': 'Applicable',\r\n\t\t\t\t\t\t\t\t\t'search_direction': 'right'}\r\n\t\t\t\t\t }\r\n\t\tfr, fr_pos, fr_kw = self.min_dist_solution(words,fixed_rate)\r\n\t\tif fr_pos == None:\r\n\t\t\trates.append(None)\r\n\t\telif words[fr_pos-1] == 'Not':\r\n\t\t\trates.append('not_fixed')\r\n\t\telse:\r\n\t\t\trates.append('fixed')\r\n\t\tfloating_rate = {'Floating Rate':{'distance': 5,\r\n\t\t\t\t\t\t\t\t\t 'regex': 'Applicable',\r\n\t\t\t\t\t\t\t\t\t 'search_direction': 'right'}\r\n\t\t\t\t\t }\r\n\t\tflr, flr_pos, flr_kw = self.min_dist_solution(words,floating_rate)\r\n\t\tif flr_pos == None:\r\n\t\t\trates.append(None)\r\n\t\telif words[flr_pos-1] == 'Not':\r\n\t\t\trates.append('not_floating')\r\n\t\telse:\r\n\t\t\trates.append('floating')\r\n\t\tfixed_to_floating = {'Fixed to Floating':{'distance': 5,\r\n\t\t\t\t\t\t\t\t\t \t\t 'regex': 'Applicable',\r\n\t\t\t\t\t\t\t\t\t \t\t 'search_direction': 'right'}\r\n\t\t\t\t\t\t\t}\r\n\t\tftflr, ftflr_pos, ftflr_kw = self.min_dist_solution(words,fixed_to_floating)\r\n\t\tif ftflr_pos == None:\r\n\t\t\trates.append(None)\r\n\t\telif words[ftflr_pos-1] == 'Not':\r\n\t\t\trates.append('not_fixed_to_floating')\r\n\t\telse:\r\n\t\t\trates.append('fixed_to_floating')\r\n\t\tzero_coupon = {'Zero Coupon':{'distance': 5,\r\n\t\t\t\t\t\t\t\t\t 'regex': 'Applicable',\r\n\t\t\t\t\t\t\t\t\t 'search_direction': 'right'}\r\n\r\n\t\t\t\t\t }\r\n\t\tzc, zc_pos, zc_kw = self.min_dist_solution(words,zero_coupon)\r\n\t\tif zc_pos == None:\r\n\t\t\trates.append(None)\r\n\t\telif words[zc_pos-1] == 'Not':\r\n\t\t\trates.append('not_zero_coupon')\r\n\t\telse:\r\n\t\t\trates.append('zero_coupon')\r\n\t\t# Find 'Structured' tokens not following 'Hybrid' token\r\n\t\tstruc_indexes = [i for i,w in enumerate(words) if w == 'Structured' and words[i-1] != 'Hybrid']\r\n\t\tstruc_words = []\r\n\t\t# Extract 10 tokens after each 'Structured' token\r\n\t\tfor i in struc_indexes:\r\n\t\t\tstruc_words += words[i:i+10]\r\n\t\tstructured_note = {'Structured':{'distance': 5,\r\n\t\t\t\t\t\t\t\t\t 'regex': 'Applicable',\r\n\t\t\t\t\t\t\t\t\t 'search_direction': 'right'},\r\n\t\t\t\t\t\t }\r\n\t\tsn, sn_pos, sn_kw = self.min_dist_solution(struc_words,structured_note)\r\n\t\tif sn_pos == None:\r\n\t\t\trates.append(None)\r\n\t\telif words[sn_pos-1] == 'Not':\r\n\t\t\trates.append('not_structured')\r\n\t\telse:\r\n\t\t\trates.append('structured')\r\n\r\n\t\treturn rates",
"def longest_word_length(words):",
"def assert_len_eq(lists):\n # Sanity check\n max_len = max(len(p) for p in lists)\n for i, p in enumerate(lists):\n assert len(\n p\n ) == max_len, \"Length check failed!\\nl[{}] has {} elements != {} ({!r})\\n{!r}\".format(\n i, len(p), max_len, p, lists\n )",
"def len2(x):\n \n if hasattr(x, '__len__'):\n \n length = len(x)\n \n elif isinstance(x, (int,float,long,complex)):\n \n length = 1\n \n return length",
"def minimum_length(char_list, length):\n return len(char_list)**length + length - 1",
"def get_best_guess(self, lst):\n maxlen = 0\n pass\n #for elem in lst:",
"def validate_result(result, reference, t_start, t_stop):\n\n import numpy as np\n\n t_ref = reference[reference.dtype.names[0]]\n t_res = result[result.dtype.names[0]]\n\n # at least two samples are required\n if result.size < 2:\n return 'The result must have at least two samples'\n\n # check if stop time has been reached\n if t_res[0] > t_start:\n return 'The result starts at %g after the start time (%g s)' % (t_res[0], t_start)\n\n # check if stop time has been reached\n if t_res[-1] < t_stop:\n return 'The result ends at %g s before the stop time (%g s)' % (t_res[-1], t_stop)\n\n # check if all reference signals are contained in the result\n for name in reference.dtype.names[1:]:\n if name not in result.dtype.names:\n return 'Variable \"%s\" is missing' % name\n\n # find the signal with the most outliers\n for name in result.dtype.names[1:]:\n\n if name not in reference.dtype.names:\n continue\n\n y_ref = reference[name]\n y_res = result[name]\n _, _, _, outliers = validate_signal(t=t_res, y=y_res, t_ref=t_ref, y_ref=y_ref, t_start=t_start, t_stop=t_stop)\n\n # calculate the relative number of outliers\n rel_out = np.sum(outliers) / float(len(outliers))\n\n if rel_out > 0.1:\n return 'More than 10%% of the samples outside epsilon band for variable \"%s\"' % name\n\n return None",
"def test_len(self):\n\t\t# for 2 sample lists, I test that the len of the list is the len\n\t\t# of the LinkedList that is constructed with the list.\n\t\tl1 = [1]\n\t\tself.assertEqual(len(from_list_(l1).print()), len(l1))\n\t\tl2 = [1, 2, 3, 4, 5, 6, 7, 8, 9, 0]\n\t\tself.assertEqual(len(from_list_(l2).print()), len(l2))",
"def _get_length_const(self, length, constraints, name):\n # Check length and resolve wildcard\n lengths = [num for num, code in constraints]\n wilds = lengths.count(WILDCARD)\n assert wilds <= 1, \"Too many wildcards in sequence %s\" % name\n if wilds == 0: # no wildcards\n seq_lengths = sum(lengths)\n if length:\n assert length == seq_lengths, \"Length mismatch for sequence %s (%r != %r)\" % (name, length, seq_lengths)\n else: # If length was not specified (None), we set it\n length = seq_lengths\n else: # one wildcard\n if length == None: raise WildError(\"Sequence %s has a ?. but no length specified\" % name)\n check_length = sum([x for x in lengths if x != WILDCARD])\n wild_length = length - check_length # Wildcard is set so that total length is right\n assert wild_length >= 0, \"Sequence %s too short (%r > %r)\" % (name, length, check_length)\n i = lengths.index(WILDCARD)\n constraints[i] = (wild_length, constraints[i][1])\n \n const = \"\"\n for (num, base) in constraints:\n const += base * num # We represent constriants in long-form\n \n return length, const",
"def check_lengths(self, length: Expr) -> bool:\n for point1 in self.points:\n for point2 in self.points - {point1}:\n if abs(point2 - point1) == length:\n print(f'Length {length} found between points: {point1} and {point2}')\n return True\n return False",
"def classify(cls, i):\r\n # tweet_length \r\n if i[3] == None:\r\n return 1\r\n elif (float(i[3])) <= 14.5:\r\n return 1\r\n else:\r\n return 1",
"def _getLongestLength(self, listOfLists):\n\t\tmax = -1\n\t\tfor list in listOfLists:\n\t\t\tif len(list) > max:\n\t\t\t\tmax = len(list)\n\t\treturn max",
"def test_length(self):\n geometric = [x for x in iterators.GeometricIterator(limit=10, ratio=2)]\n self.assertEqual(len(geometric), 10)"
] | [
"0.57079786",
"0.56396145",
"0.5636325",
"0.55957866",
"0.55709916",
"0.54808486",
"0.5406149",
"0.5359623",
"0.5297381",
"0.52449346",
"0.5231122",
"0.5199308",
"0.51951873",
"0.5139221",
"0.5139221",
"0.5138084",
"0.51315254",
"0.51269794",
"0.51135665",
"0.510849",
"0.50960946",
"0.50889945",
"0.5055281",
"0.5053795",
"0.50461733",
"0.5031898",
"0.502622",
"0.5016553",
"0.5011472",
"0.50110614"
] | 0.7832862 | 0 |
determine many parameters on this machine machine name user name domain... | def guess_machine_parameter () :
val = [ "COMPUTERNAME", "NUMBER_OF_PROCESSORS", "OS",
"PATH", "USERDOMAIN", "USERNAME", "USERPROFILE",
"windir", "TEMP" ]
res = { }
for v in val :
if v == "PATH" :
x = os.getenv (v)
x = x.split (";")
res [v] = x
else : res [v] = os.getenv (v)
if not sys.platform.startswith("win") :
if "TEMP" not in res or res["TEMP"] is None :
res["TEMP"] = "/tmp"
return res | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def getHostInfo():",
"def domainparams(self):\n\t\traise Exception(NotImplemented)",
"def test_parameters(self):\n # Try to create a machine without an image.\n status = self.proxy.server.create(PROVIDER_ID)\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine without a flavor.\n status = self.proxy.server.create(PROVIDER_ID, IMAGE)\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong image format.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size\"], \"flavor\"\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong flavor format.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], \"flavor\"\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong machine_numbers.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"flavor=flavor\"], -1\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong userdata.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"name=flavor\"], 1,\n \"availability_zone\", \"key_name\", \"security_group\",\n \"private_network\", \"public_network\", \"userdata\"\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong scheduler_hints.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"name=flavor\"], 1,\n \"availability_zone\", \"key_name\", \"security_group\",\n \"private_network\", \"public_network\", \"setup.py\", \"swap\",\n \"scheduler_hints\"\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with wrong meta.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"name=flavor\"], 1,\n \"availability_zone\", \"key_name\", \"security_group\",\n \"private_network\", \"public_network\", \"setup.py\", \"swap\",\n [\"name=scheduler_hints\"], [\"meta\"]\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Try to create a machine with reserved meta.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"name=flavor\"], 1,\n \"availability_zone\", \"key_name\", \"security_group\",\n \"private_network\", \"public_network\", \"setup.py\", \"swap\",\n [\"name=scheduler_hints\"], [\"mysql-fabric=True\"]\n )\n self.check_xmlrpc_command_result(status, has_error=True)\n\n # Create a machine.\n status = self.proxy.server.create(\n PROVIDER_ID, [\"name=image\", \"size=20\"], [\"name=flavor\"], 1,\n \"availability_zone\", \"key_name\", \"security_group\",\n \"private_network\", \"public_network\", \"setup.py\", \"swap\",\n [\"name=scheduler_hints\"], [\"name=meta\"]\n )\n self.check_xmlrpc_command_result(status)\n\n # TODO: Test other parameters that were included with database.",
"def do_machines(self, args):\n lex_parser = shlex.shlex(args)\n lex_parser.whitespace += \",\"\n lex_parser.wordchars += \"-\"\n\n machine_list = [m for m in lex_parser]\n #TODO check if the hostname has space in it, if yes, invalid.\n if not machine_list:\n print(\"Invalid. See the help.\")\n return False\n\n self.hostname_list = machine_list\n print(f\"Set to run in {len(self.hostname_list)} machines.\")",
"def par_domain(self):",
"def fp_meta(self):\n for server in self.machines:\n s = self.machines[server]\n print \"%s: %s (%s)\" % (s.id, s.adminPass, s)",
"def get_machine_info(**kwargs):\n # Set machine names, using local info if connected to localhost\n if kwargs[\"conn_machine_name\"] == \"localhost\":\n local_uname = os.uname()\n # If --machine-name passed in, override pymapd con value\n if kwargs[\"machine_name\"]:\n run_machine_name = kwargs[\"machine_name\"]\n else:\n if kwargs[\"conn_machine_name\"] == \"localhost\":\n run_machine_name = local_uname.nodename.split(\".\")[0]\n else:\n run_machine_name = kwargs[\"conn_machine_name\"]\n # If --machine-uname passed in, override pymapd con value\n if kwargs[\"machine_uname\"]:\n run_machine_uname = kwargs[\"machine_uname\"]\n else:\n if kwargs[\"conn_machine_name\"] == \"localhost\":\n run_machine_uname = \" \".join(local_uname)\n else:\n run_machine_uname = \"\"\n machine_info = {\n \"run_machine_name\": run_machine_name,\n \"run_machine_uname\": run_machine_uname,\n }\n return machine_info",
"def getRequestHostname():",
"def get_site_parameters(self, site):\n parms = {}\n parms[\"converter\"] = self.supported_sites[site].converter\n parms[\"decoder\"] = self.supported_sites[site].decoder\n parms[\"hudbgcolor\"] = self.supported_sites[site].hudbgcolor\n parms[\"hudfgcolor\"] = self.supported_sites[site].hudfgcolor\n parms[\"hudopacity\"] = self.supported_sites[site].hudopacity\n parms[\"screen_name\"] = self.supported_sites[site].screen_name\n parms[\"site_path\"] = self.supported_sites[site].site_path\n parms[\"table_finder\"] = self.supported_sites[site].table_finder\n parms[\"HH_path\"] = self.supported_sites[site].HH_path\n parms[\"site_name\"] = self.supported_sites[site].site_name\n parms[\"aux_window\"] = self.supported_sites[site].aux_window\n parms[\"font\"] = self.supported_sites[site].font\n parms[\"font_size\"] = self.supported_sites[site].font_size\n parms[\"enabled\"] = self.supported_sites[site].enabled\n parms[\"xpad\"] = self.supported_sites[site].xpad\n parms[\"ypad\"] = self.supported_sites[site].ypad\n return parms",
"def getHost():",
"def getHost():",
"def check_params_set():\n critical = {'machineinfo' : MACHINEID, \n 'error_serverinfo' : ERROR_SERVER, \n 'serverinfo' : SERVER}\n for i, val in critical.iteritems():\n if not val:\n print \"ERROR: Set value for \\\"%s\\\" in baseconfig.cfg file first\\n\" % i\n sys.exit(1)",
"def extractAuthGWInfo(self,dn):\n \n# dn = request.get(self.jid_auth_header, '')\n dn = transfer_codec(dn)\n userName,idNumber = split_idNumber(dn)\n loginid = idNumber\n# loginid = transfer_codec(loginid) \n# creds['remote_host'] = request.get('REMOTE_HOST', '')\n return loginid,userName,idNumber",
"def parse_user_selections(self):\n if \"model2\" in sys.argv:\n self.model_choice = \"model2\"\n else:\n self.model_choice = \"model1\"\n\n if \"Virginia\" in sys.argv:\n self.region = \"Virginia\"\n self.region_name = 'us-east-1'\n elif \"California\" in sys.argv:\n self.region = \"California\"\n self.region_name = 'us-west-1'\n else:\n self.region = \"Oregon\"\n self.region_name = 'us-west-2'\n\n if self.verbose_mode:\n print \"** will run the Machine Learning %s\" % self.model_choice\n print \"\\n** Running on %s Elastic Map Reduce server\" % self.region",
"def __getLocalAndRemoteMachineNames(self):\n hostNameMapping = {}\n ## collect the qualified hostnames for each remote node\n for nodeId in list(set(self.runInfoDict['Nodes'])):\n hostNameMapping[nodeId.strip()] = socket.gethostbyname(nodeId.strip())\n self.raiseADebug('Host \"'+nodeId.strip()+'\" identified with IP: ', hostNameMapping[nodeId.strip()])\n\n return hostNameMapping",
"def parameters(self):",
"def get_parameters(self):\r\n raise Exception(\"Not implemented (server-side parameter initialization)\")",
"def get_details():\n if not hasattr(env, \"site_name\"):\n env.site_name = prompt(\"Enter site domain name:\")\n env.site_is_secure = confirm(\"Do you need SSL? (Yes/No)\", default=False)\n env.app_server = prompt(\"Enter app server you wish to use (apache/uwsgi/gunicorn):\")\n if env.site_is_secure:\n env.ip_address = prompt(\"Enter server IP address:\")\n else:\n env.ip_address = \"0.0.0.0\"\n\n # Find out project name\n project_name = env.site_name.split('.')\n try:\n if project_name[1] == 'com':\n # Sample case - abc.com\n env.project_name = project_name[0]\n else:\n # Sample case - shop.abc.com\n env.project_name = project_name[1]\n except IndexError:\n env.project_name = env.site_name",
"def get_arguments(self, local_machine):\n\n parser = argparse.ArgumentParser()\n\n if local_machine == \"client\":\n parser.add_argument(\"host\", help=\"target machine's host\")\n parser.add_argument(\"port\", help=\"target machine's port\", type=int)\n\n all_requests = parser.add_subparsers(help='all commands for server', dest='request', required=True)\n put_request = all_requests.add_parser('put', help='puts the specified file onto server')\n get_request = all_requests.add_parser('get', help='retrieves the specified file from server')\n all_requests.add_parser('list', help='lists the server directory')\n\n for request in put_request, get_request:\n request_help = \"file to transfer to server\" if request == put_request else \"file to retrieve from server\"\n request.add_argument('filename', help=request_help)\n\n elif local_machine == \"server\":\n parser.add_argument(\"port\", help=\"target port for listening to connections\", type=int)\n\n args = parser.parse_args()\n\n if args.port < 0 or args.port > 65535:\n raise parser.error(StatusCode.code[2002])\n self.port = args.port\n\n if local_machine == \"client\":\n self.host = args.host\n self.request = args.request\n if self.request != \"list\":\n self.file = args.filename",
"def known_domain_data(known_uid, known_verbose_name, known_os_type):\n return {\n 'id': known_uid,\n 'verbose_name': known_verbose_name,\n 'os_type': known_os_type\n }",
"def _domain(self):\n return [self.args[0] >= 0, self.args[1] >= 0]",
"def sysArgs(arguments):\n\n # if no args print usage\n if not arguments:\n print 'usage: [--auto] [--manual user_ID server_IP server_Port]'\n sys.exit()\n\n # --auto flag\n if arguments[0] == '--auto':\n return (USER_NAME, SERVER_HOST, SERVER_PORT)\n\n # --manual flag\n if arguments[0] == '--manual':\n return (arguments[1], arguments[2], int(arguments[3]))",
"def test_arguments_parser(self):\n self.assertEqual('monitoring-dc.app.corp',\n self.plugin.options.hostname)",
"def create_user_domain():\n for host in online_hosts:\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgUserDomain -o cfgUserDomainName <Domain Name> -i 1\")\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for UserDomainName failed \")\n\n result2 = sudo(\"racadm -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgActiveDirectory -o cfgADDomainController1 <Domain Name>\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for DomainController1 failed \")\n\n result3 = sudo(\"racadm -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgActiveDirectory -o cfgADGlobalCatalog1 <Domain Name>\")\n if result3.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for GlobalCatalog1 failed \")",
"def __init__(self, host, username, passwd):\n self.host = host\n self.username = username\n self.passwd = passwd\n self.engines = {}",
"def get_device_data():\n user = input('Username: ')\n password = input('Password: ')\n device_name = input('FQDN or IP of device: ')\n\n return user, password, device_name",
"def get_host_variables(self, host):\n vars = {}\n for i in self.parsers:\n vars.update(i.get_host_variables(host))\n return vars",
"def getdefaultpara(self):\n self.Result_DB = str(ft_utils.get_db_url())\n self.masterusername = str(ft_constants.ONOSBENCH_USERNAME)\n self.masterpassword = str(ft_constants.ONOSBENCH_PASSWORD)\n self.agentusername = str(ft_constants.ONOSCLI_USERNAME)\n self.agentpassword = str(ft_constants.ONOSCLI_PASSWORD)\n self.runtimeout = ft_constants.ONOS_RUNTIMEOUT\n self.OCT = str(ft_constants.ONOS_OCT)\n self.OC1 = str(ft_constants.ONOS_OC1)\n self.OC2 = str(ft_constants.ONOS_OC2)\n self.OC3 = str(ft_constants.ONOS_OC3)\n self.OCN = str(ft_constants.ONOS_OCN)\n self.OCN2 = str(ft_constants.ONOS_OCN2)\n self.installer_master = str(ft_constants.ONOS_INSTALLER_MASTER)\n self.installer_master_username = \\\n str(ft_constants.ONOS_INSTALLER_MASTER_USERNAME)\n self.installer_master_password = \\\n ft_constants.ONOS_INSTALLER_MASTER_PASSWORD\n self.hosts = [self.OC1, self.OCN, self.OCN2]\n self.localhost = self.OCT",
"def get_user_data():\n parser = argparse.ArgumentParser(description='Type your data with whitespace in following format')\n parser.add_argument('depart_iata', type=lambda x: x.upper(), help='AAA')\n parser.add_argument('dest_iata', type=lambda x: x.upper(), help='AAA')\n parser.add_argument('depart_date', type=lambda x: datetime.strptime(x, \"%Y-%m-%d\"), help='YYYY-MM-DD')\n parser.add_argument('return_date', nargs='?', default='', help='YYYY-MM-DD - optional')\n args = parser.parse_args()\n validate_iata(args.depart_iata, args.dest_iata)\n validate_date(args.depart_date.date(), args.return_date)\n return args.depart_iata, args.dest_iata, args.depart_date.date(), args.return_date",
"def parseArgs ( args ) :\n assert len ( args ) == 5\n loginInfo = []\n for s in args :\n loginInfo.append ( s )\n loginInfo.pop ( 0 )\n assert len ( loginInfo ) == 4\n return loginInfo"
] | [
"0.55696225",
"0.55040246",
"0.5490704",
"0.5457476",
"0.53774184",
"0.52768856",
"0.5258111",
"0.51880157",
"0.5074766",
"0.5068662",
"0.5068662",
"0.5053315",
"0.5036607",
"0.50341004",
"0.5030835",
"0.49431947",
"0.49221584",
"0.49181613",
"0.49145168",
"0.4904791",
"0.4892164",
"0.48818937",
"0.48511556",
"0.48335862",
"0.4831428",
"0.48085403",
"0.48060772",
"0.47891447",
"0.47832885",
"0.47703364"
] | 0.60301954 | 0 |
empty string or not? s any string (str, None) is it empty or not? bool PQHException When a type is unexpected | def IsEmptyString (s) :
if s is None : return True
elif isinstance (s, str) :
return len (s) == 0
else :
raise PQHException ("the type is unexpected %s" % str (type (s))) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_typeerror_in_case_of_string(self):\n eq_(None,grepit(\"\",\"\",\"\"))",
"def is_str_null(msg):\n\n if None == msg or \"\" == msg:\n return True\n return False",
"def is_str_none_or_empty(val):\n if val is None:\n return True\n if isinstance(val, string_types):\n val = val.strip()\n if not val:\n return True\n return False",
"def noneType(value):\r\n return ''",
"def _is_bumf(value):\n if type(value) in (unicode, str):\n return value.strip() == ''\n return value is None",
"def not_set(string):\n if string is None:\n return True\n elif string == '':\n return True\n return False",
"def non_empty(val):\n return val is not None and val != \"\"",
"def _is_string(arg):\n return isinstance(arg, types.StringTypes)",
"def is_string(value):\n return isinstance(value, basestring)",
"def is_empty(val):\n if val is None or isinstance(val, Sized) and len(val) == 0: # Empty string is also Sized of len 0\n return True\n return False",
"def query_is_empty(input_string):\n if re.match(r'\\A\\s*\\Z', input_string) is None:\n return True\n else:\n return False",
"def check_integrity(dict):\n if (dict['type'] == 'string') and (dict['value'] == None or dict['value'] == ''):\n dict['value'] = '\"\"'",
"def _validate_str(s):\n if not isinstance(s, str):\n raise TypeError(\"Expected string, got {}\".format(type(s)))\n if len(s) == 0:\n raise ValueError(\"Empty variant string.\")\n return",
"def check_str(val, name, allow_none=False, allow_empty=False):\n\n if val is None:\n if not allow_none:\n raise ValueError(name + ' of value ' + str(val) + ' should not be None.')\n else:\n\n if not isinstance(val, str) and not isinstance(val, unicode):\n raise TypeError(name + ' of value ' + str(val) + ' should be a string.' + ' but is of type ' + type(val).__name__)\n\n elif len(val.strip()) == 0 and not allow_empty:\n raise ValueError(name + ' of value ' + str(val) + ' should not empty string.')",
"def is_empty_str(val):\n s = str(val)\n if not isinstance(s, str):\n return False\n if not s.strip():\n return True\n else:\n return False",
"def _str_validator(arg):\n if arg is None or arg is '' or type(arg) != str:\n raise ValueError('Incorrect value: input should be a string')",
"def is_null_or_empty(string_val):\n if string_val and string_val.strip():\n return False\n return True",
"def is_string(value):\n return isinstance(value, (str, bytes))",
"def validate_str(val, allow_none=False, allow_empty=False):\n\n if val is None:\n if not allow_none:\n return False\n else:\n\n if not isinstance(val, str) and not isinstance(val, unicode):\n return False\n\n elif len(val.strip()) == 0 and not allow_empty:\n return False\n\n return True",
"def test_string_or_number():\n assert is_string_or_number(None) is None\n assert is_string_or_number(1) is None\n assert is_string_or_number(1.1) is None\n assert is_string_or_number('1.1') is None\n assert is_string_or_number([])",
"def isString(s):\r\n if not isinstance(s, util.AtomicString):\r\n return isinstance(s, basestring)\r\n return False",
"def is_string(val):\n return (\n isinstance(val, unicode) or \\\n isinstance(val, str) \n )",
"def isString(s):\r\n try:\r\n return isinstance(s, unicode) or isinstance(s, basestring)\r\n except NameError:\r\n return isinstance(s, str)",
"def is_str ( self, s ):\r\n\t\treturn isinstance ( s, type( str () ) )",
"def isString(x):\n if type(x) == str:\n return True\n else:\n return False",
"def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode",
"def s2s(s):\n if (s is None): return \"\"\n else: return s",
"def is_str(value):\n if not type(value) is str:\n return False\n else:\n return True",
"def check_type(s: pd.Series) -> Tuple[bool, str]:\n\n error_string = (\n \"should be TextSeries: the input Series should consist only of strings in every cell.\"\n \" See help(hero.HeroTypes) for more information.\"\n )\n\n try:\n first_non_nan_value = s.loc[s.first_valid_index()]\n if not isinstance(first_non_nan_value, str):\n return False, error_string\n except KeyError: # Only NaNs in Series -> same warning applies\n return False, error_string\n\n return True, \"\"",
"def is_field_empty(*args):\n for field in args:\n if field == \"\" or field is None:\n return True\n return False\n return \"NONDETERMINISTIC\""
] | [
"0.74072754",
"0.72107166",
"0.7186775",
"0.70678073",
"0.70505446",
"0.6966357",
"0.6778749",
"0.6723711",
"0.67125064",
"0.66950375",
"0.6687345",
"0.66824704",
"0.6678631",
"0.6661755",
"0.6641918",
"0.6618534",
"0.6617177",
"0.6611896",
"0.6606371",
"0.660463",
"0.65938205",
"0.6584578",
"0.65823674",
"0.65764034",
"0.65748334",
"0.65550214",
"0.65490735",
"0.653724",
"0.6536117",
"0.6522754"
] | 0.81841487 | 0 |
try different encoding to load a file, tries utf8, latin1 and None filename filename couple (content, encoding) | def load_content_file_with_encoding (filename) :
error = None
for enc in [ "utf8", "latin1", None ] :
try :
with open(filename, "r", encoding = enc) as f : content = f.read()
return content, enc
except Exception as e :
error = e
raise error | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_file_encoding(file_path):\n with open(file_path, 'r', encoding='utf-8') as f:\n try:\n for line in f:\n pass\n except UnicodeDecodeError:\n return 'ISO-8859-1'\n else:\n return 'utf-8'",
"def detect_encoding(filename, default_to_utf8 = True, **kwargs):\n # Read some of the file\n import os.path\n filename = from_posix(filename)\n file_len = os.path.getsize(filename)\n read_len = min(_READ_CHUNK_SIZE, file_len)\n\n # ... unless we're supposed to!\n if kwargs.get('read_all', False):\n read_len = file_len\n\n # Read the first read_len bytes raw, so we can detect the encoding\n with open(filename, 'rb') as raw_handle:\n raw = raw_handle.read(read_len)\n\n # Detect the encoding the file specfies, if any.\n import codecs\n if raw.startswith(codecs.BOM_UTF8):\n encoding = 'utf-8-sig'\n else:\n # Detect encoding using the best detector available\n try:\n # First try ICU. ICU will report ASCII in the first 32 Bytes as\n # ISO-8859-1, which isn't exactly wrong, but maybe optimistic.\n import icu\n encoding = icu.CharsetDetector(raw).detect().getName().lower()\n except ImportError: # pragma: nocover\n # If that doesn't work, try chardet - it's not got native components,\n # which is a bonus in some environments, but it's not as precise.\n import chardet\n encoding = chardet.detect(raw)['encoding'].lower()\n\n # Chardet is more brutal in that it reports ASCII if none of the first\n # Bytes contain high bits. To emulate ICU, we just bump up the detected\n # encoding.\n if encoding == 'ascii':\n encoding = 'iso-8859-1'\n\n # Both chardet and ICU may detect ISO-8859-x, which may not be possible\n # to decode as UTF-8. So whatever they report, we'll try decoding as\n # UTF-8 before reporting it.\n if default_to_utf8 and encoding in ('ascii', 'iso-8859-1', 'windows-1252'):\n # Try decoding as utf-8\n try:\n raw.decode('utf-8')\n # If this worked... well there's no guarantee it's utf-8, to be\n # honest.\n encoding = 'utf-8'\n except UnicodeDecodeError:\n # Decoding as utf-8 failed, so we can't default to it.\n pass\n\n return encoding",
"def get_encoding_file(fname):\n with io.open(fname, 'rt', encoding='ascii', errors='ignore') as f:\n for unused in range(2):\n line = f.readline()\n match = get_encoding_re.search(line)\n if match:\n return match.group(1)\n return 'ascii'",
"def get_file_encoding(filename):\n\n with open(filename, 'rb') as f:\n # UTF16 or latin1\n f.seek(0)\n if f.read(2) == b'\\xff\\xfe':\n encoding = 'utf16'\n else:\n encoding = 'latin1'\n return encoding",
"def _read(filename, encodings=['ascii', 'utf-8', 'utf-16', 'latin-1']):\n text = None\n\n for encoding in encodings:\n try:\n f = open(filename, encoding=encoding)\n text = f.read()\n f.close()\n except UnicodeDecodeError:\n f.close()\n except UnicodeError:\n f.close()\n except FileNotFoundError:\n raise FileNotFoundError(\"Could not open file.\")\n\n if not text:\n raise UnicodeError(filename)\n\n return text",
"def determineEncoding(self, filepath):\n with open(self.filepath,\"r\",encoding='utf-16') as reader: \n try:\n line = reader.readline()\n return \"utf-16\"\n except:\n return \"utf-8\"",
"def test_file_read_utf8_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_utf8()",
"def check_file_encoding(self, input_file_path):\n self.log([u\"Checking encoding of file '%s'\", input_file_path])\n self.result = ValidatorResult()\n if self._are_safety_checks_disabled(u\"check_file_encoding\"):\n return self.result\n if not gf.file_can_be_read(input_file_path):\n self._failed(u\"File '%s' cannot be read.\" % (input_file_path))\n return self.result\n with io.open(input_file_path, \"rb\") as file_object:\n bstring = file_object.read()\n self._check_utf8_encoding(bstring)\n return self.result",
"def test_load_verify_unicode_cafile(self, tmpfile):\n self._load_verify_cafile(\n tmpfile.decode(getfilesystemencoding()) + NON_ASCII\n )",
"def _test_this_file_encoding(\n fname, test_file,\n unicode_whitelist=unicode_whitelist,\n unicode_strict_whitelist=unicode_strict_whitelist):\n has_unicode = False\n\n is_in_whitelist = False\n is_in_strict_whitelist = False\n for patt in unicode_whitelist:\n if fnmatch.fnmatch(fname, patt):\n is_in_whitelist = True\n break\n for patt in unicode_strict_whitelist:\n if fnmatch.fnmatch(fname, patt):\n is_in_strict_whitelist = True\n is_in_whitelist = True\n break\n\n if is_in_whitelist:\n for idx, line in enumerate(test_file):\n try:\n line.encode(encoding='ascii')\n except (UnicodeEncodeError, UnicodeDecodeError):\n has_unicode = True\n\n if not has_unicode and not is_in_strict_whitelist:\n assert False, message_unicode_D % fname\n\n else:\n for idx, line in enumerate(test_file):\n try:\n line.encode(encoding='ascii')\n except (UnicodeEncodeError, UnicodeDecodeError):\n assert False, message_unicode_B % (fname, idx + 1)",
"def load_text_file(file_name: str) -> str:\r\n try:\r\n with open(file_name, encoding='windows-1251') as file_object:\r\n return file_object.read()\r\n except FileNotFoundError as err:\r\n print(f\"{err}\\n\"\r\n f\"Please make sure the file you are trying to open exists!\")\r\n quit()",
"def open_file(filename, mode = 'r'):\n return open(filename, mode, encoding='latin-1',errors='ignore')",
"def _is_utf8(filename: str) -> bool:\n import codecs\n\n try:\n f = codecs.open(filename, encoding=\"utf-8\", errors=\"strict\")\n for _ in f:\n pass\n return True\n except UnicodeDecodeError:\n return False",
"def get_encoding(fname):\n file = open(fname, 'rb')\n encoding = chardet.detect(file.read())['encoding']\n return encoding",
"def __define_encoding(self, file_name):\n with open(file_name, 'rb') as f:\n raw_data = b''.join([f.readline() for _ in range(self.__num_lines)])\n return self.__chardet.detect(raw_data)['encoding']",
"def test_file_gzip_utf8_readwrite_explicit_decode(self):\n if state.py2:\n FileWriter(self.unicode_path).gzip(self.unicode_string)\n gzip_contents = FileReader(self.unicode_path + \".gz\").read_gzip(\"utf-8\") # when read with explicit utf-8 decoding, strings should match\n self.assertEqual(gzip_contents, self.unicode_string)\n elif state.py3:\n FileWriter(self.unicode_path).gzip(bytes(self.unicode_string, 'utf-8'))\n gzip_contents = FileReader(self.unicode_path + \".gz\").read_gzip(\"utf-8\") # when read with explicit utf-8 decoding, strings should match\n self.assertEqual(gzip_contents, self.unicode_string)",
"def test_file_readlines_utf8_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).readlines_utf8()",
"def _loadf(fname):\n with open(fname, encoding=\"ISO-8859-1\") as f:\n return json.load(f)",
"def guess_encoding(data):\n successful_encoding = None\n # we make 'utf-8' the first encoding\n encodings = ['utf-8']\n #\n # next we add anything we can learn from the locale\n try:\n encodings.append(locale.nl_langinfo(locale.CODESET))\n except AttributeError:\n pass\n try:\n encodings.append(locale.getlocale()[1])\n except (AttributeError, IndexError):\n pass\n try: \n encodings.append(locale.getdefaultlocale()[1])\n except (AttributeError, IndexError):\n pass\n #\n # we try 'latin-1' last\n encodings.append('latin-1')\n for enc in encodings:\n # some of the locale calls \n # may have returned None\n if not enc:\n continue\n try:\n decoded = unicode(data, enc)\n successful_encoding = enc\n\n except (UnicodeError, LookupError):\n pass\n else:\n break\n if not successful_encoding:\n raise UnicodeError(\n 'Unable to decode input data. Tried the following encodings: %s.'\n % ', '.join([repr(enc) for enc in encodings if enc]))\n else:\n return (decoded, successful_encoding)",
"def detect_encoding(data):\n enc_list = ['UTF-8', 'LATIN-1', 'iso8859-1', 'iso8859-2',\n 'UTF-16', 'CP720']\n code = locale.getpreferredencoding(False)\n if code not in enc_list:\n enc_list.insert(0, code)\n for c in enc_list:\n try:\n for line in data:\n line.decode(c)\n except (UnicodeDecodeError, UnicodeError):\n continue\n return c\n print(\"Encoding not detected. Please pass encoding value manually\")",
"def fs_read(file_path):\n try:\n with open(str(file_path), 'r') as f:\n return f.read()\n except UnicodeDecodeError:\n with open(str(file_path), 'r', encoding='latin-1') as f:\n return f.read()\n except IOError as e:\n raise e",
"def test_file_readas_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_as(\"utf-8\")",
"def read(filename, encoding='utf-8'):\r\n text, encoding = decode( file(filename, 'rb').read() )\r\n return text, encoding",
"def _get_encoding_type(file: str) -> Encoding:\n try:\n with open(file, 'r') as f:\n f.read()\n except UnicodeDecodeError:\n encoding = Encoding.DER\n else:\n encoding = Encoding.PEM\n return encoding",
"def test_file_utf8_readas_writeas(self):\n FileWriter(self.unicode2_path).write_as(self.unicode_string, \"utf-8\")\n unicode_text = FileReader(self.unicode2_path).read_as(\"utf-8\")\n self.assertEqual(unicode_text, self.unicode_string)",
"def test_file_utf8_readwrite_noraise_unicodeerror(self):\n FileWriter(self.unicode_path).write(self.unicode_string)\n unicode_text = FileReader(self.unicode_path).read()\n self.assertEqual(self.unicode_string, unicode_text)",
"def read_doc(f):\n \"\"\"XXX Not sure how best to handle all types of encodings. So:\"\"\"\n try:\n document = f.read().decode('utf-8').encode('ascii', 'ignore')\n except UnicodeDecodeError:\n try:\n document = f.read().decode('latin1').encode('ascii', 'ignore')\n except:\n # print 'Figure out next encoding.'\n raise\n return document\n # XXX: Just gonna convert the whole thing to unicode\n # return unicode(f.read())",
"def test_file_utf8_readwrite(self):\n FileWriter(self.unicode_path).write_utf8(self.unicode_string)\n unicode_text = FileReader(self.unicode_path).read_utf8()\n self.assertEqual(unicode_text, self.unicode_string)",
"def unicode_open(filename, *args, **kwargs):\n kwargs['encoding'] = \"utf-8\"\n if PY3:\n return open(filename, *args, **kwargs)\n return codecs.open(filename, *args, **kwargs)",
"def read_file(name):\n with open(name, 'r') as my_file:\n return my_file.read().encode('utf-8')"
] | [
"0.7207956",
"0.7092595",
"0.67391837",
"0.6677408",
"0.66623574",
"0.6612854",
"0.6486865",
"0.64271533",
"0.6374884",
"0.635114",
"0.63398594",
"0.6299602",
"0.6258015",
"0.62392825",
"0.6193841",
"0.6187641",
"0.61355126",
"0.6065543",
"0.6036637",
"0.6028947",
"0.5985771",
"0.5969637",
"0.59411937",
"0.593267",
"0.59129405",
"0.5911703",
"0.59033614",
"0.5895739",
"0.5884986",
"0.5847024"
] | 0.78901494 | 0 |
Convert TheMovieDB movies to fresh tomatoes format | def _convert_to_movies(the_movie_db, raw_movies):
movies = list()
for movie in raw_movies:
poster = ""
if 'poster_path' in movie:
poster = constants.POSTER_URL + movie['poster_path']
success, movie_trailer_url = the_movie_db.get_movie_trailer(movie['id'])
if not success:
print "something went wrong with movie: {}".format(movie['id'])
movies.append(
media.Movie(movie['title'], poster, movie_trailer_url)
)
return movies | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _convert_to_Movie(self, MovieDB):\n return Movie(MovieDB['original_title'].encode('utf-8'),\n MovieDB['overview'].encode('utf-8'),\n self._get_image_url(MovieDB['poster_path']),\n self._get_trailer_url(MovieDB['id']))",
"def __movies_to_titles(mlens_to_tmdb, moviesFile='ml-latest/movies.csv'):\n rdr = csv.reader(open(moviesFile))\n next(rdr, None)\n\n moviesToName = {}\n\n for row in rdr:\n movieLensId = row[0]; movieName=row[1]\n tmdb_id = mlens_to_tmdb[movieLensId]\n moviesToName[tmdb_id] = movieName\n\n return moviesToName",
"def create_movielist():\n # Create the list of movies - let's pick 6\n movielist = []\n # title, box_art, url\n movielist.append(MovieMetadata(\"Toy Story\", \\\n \"I'm from Mattel. Well, I'm not really from Mattel, I'm actually \" \\\n \"from a smaller company that was purchased by Mattel in a leveraged \" \\\n \"buyout.\", \\\n \"http://ia.media-imdb.com/images/M/MV5BMTgwMjI4MzU5N15BMl5BanBnXkFtZ\" \\\n \"TcwMTMyNTk3OA@@._V1_SY317_CR12,0,214,317_AL_.jpg\", \\\n 'https://www.youtube.com/watch?v=KYz2wyBy3kc'))\n movielist.append(MovieMetadata(\"Avatar\", \\\n \"I was hoping for some kind of tactical plan that didn't involve \" \\\n \"martyrdom\", \\\n 'http://ia.media-imdb.com/images/M/MV5BMTYwOTEwNjAzMl5BMl5BanBnXk' \\\n 'FtZTcwODc5MTUwMw@@._V1_SY317_CR0,0,214,317_AL_.jpg', \\\n 'https://www.youtube.com/watch?v=cRdxXPV9GNQ'))\n movielist.append(MovieMetadata(\"The Princess Bride\", \\\n \"When I was your age, television was called books. And this is a \" \\\n \"special book. It was the book my father used to read to me when I \" \\\n \"was sick, and I used to read it to your father. And today I'm gonna\" \\\n \" read it to you.\", \\\n 'http://ia.media-imdb.com/images/M/MV5BMTkzMDgyNjQwM15BMl5BanBnXkFtZ' \\\n 'TgwNTg2Mjc1MDE@._V1_SY317_CR0,0,214,317_AL_.jpg', \\\n 'https://www.youtube.com/watch?v=GNvy61LOqY0'))\n movielist.append(MovieMetadata(\"Serenity\", \\\n \"Shiny. Let's be bad guys.\", \\\n 'http://ia.media-imdb.com/images/M/MV5BMTI0NTY1MzY4NV5BMl5BanBnXkFtZ' \\\n 'TcwNTczODAzMQ@@._V1_SY317_CR0,0,214,317_AL_.jpg', \\\n 'https://www.youtube.com/watch?v=JY3u7bB7dZk'))\n movielist.append(MovieMetadata(\"The Wizard of Speed and Time\", \\\n \"Miss Belair, if you feel compelled to grab part of my body and \" \\\n \"shake it before you can even be friendly, you've got far worse \" \\\n \"problems than you think I have.\", \\\n 'http://ia.media-imdb.com/images/M/MV5BODc3MzA3MDQyN15BMl5BanBnXkFtZ' \\\n 'TYwMzE2MTk5._V1_SX214_AL_.jpg', \\\n 'https://www.youtube.com/watch?v=3ldOTw60Ozg'))\n movielist.append(MovieMetadata(\"Inside Out\", \\\n \"Take her to the moon for me. Okay?\", \\\n 'http://ia.media-imdb.com/images/M/MV5BOTgxMDQwMDk0OF5BMl5BanBnXkFtZ' \\\n 'TgwNjU5OTg2NDE@._V1_SX214_AL_.jpg', \\\n 'https://www.youtube.com/watch?v=yRUAzGQ3nSY'))\n\n return movielist",
"def ready_movies():\n troy = Movie(movie_title=\"Troy\",\n poster_image=\"https://upload.wikimedia.org/wikipedia/en/b/b8/Troy2004Poster.jpg\",\n trailer_youtube=\"https://www.youtube.com/watch?v=znTLzRJimeY\")\n\n kingdom_of_heaven = Movie(movie_title=\"Kingdom of Heaven\",\n poster_image=\"https://upload.wikimedia.org/wikipedia/en/9/9e/KoHposter.jpg\",\n trailer_youtube=\"https://www.youtube.com/watch?v=moNH4N44D28\")\n\n warrior = Movie(movie_title=\"Warrior\",\n poster_image=\"https://upload.wikimedia.org/wikipedia/en/e/e3/Warrior_Poster.jpg\",\n trailer_youtube=\"https://www.youtube.com/watch?v=I5kzcwcQA1Q\")\n\n pulp_fiction = Movie(movie_title=\"Pulp Fiction\",\n poster_image=\"https://upload.wikimedia.org/wikipedia/en/3/3b/Pulp_Fiction_%281994%29_poster.jpg\",\n trailer_youtube=\"https://www.youtube.com/watch?v=s7EdQ4FqbhY\")\n\n fight_club = Movie(movie_title=\"Fight Club\",\n poster_image=\"https://upload.wikimedia.org/wikipedia/en/f/fc/Fight_Club_poster.jpg\",\n trailer_youtube=\"https://www.youtube.com/watch?v=SUXWAEX2jlg\")\n\n the_matrix = Movie(movie_title=\"The Matrix\",\n poster_image=\"https://upload.wikimedia.org/wikipedia/en/c/c1/The_Matrix_Poster.jpg\",\n trailer_youtube=\"https://www.youtube.com/watch?v=vKQi3bBA1y8\")\n\n the_dark_knight = Movie(movie_title=\"The Dark Knight\",\n poster_image=\"https://upload.wikimedia.org/wikipedia/en/8/8a/Dark_Knight.jpg\",\n trailer_youtube=\"https://www.youtube.com/watch?v=vKQi3bBA1y8\")\n\n whiplash = Movie(movie_title=\"Whiplash\",\n poster_image=\"https://upload.wikimedia.org/wikipedia/en/0/01/Whiplash_poster.jpg\",\n trailer_youtube=\"https://www.youtube.com/watch?v=7d_jQycdQGo\")\n\n dredd = Movie(movie_title=\"Dredd\",\n poster_image=\"https://upload.wikimedia.org/wikipedia/en/1/16/Dredd2012Poster.jpg\",\n trailer_youtube=\"https://www.youtube.com/watch?v=qv-6dNqqnMA\")\n\n ai_artificial_intelligence = Movie(movie_title=\"A.I. Artificial Intelligence\",\n poster_image=\"https://upload.wikimedia.org/wikipedia/en/e/e6/AI_Poster.jpg\",\n trailer_youtube=\"https://www.youtube.com/watch?v=_19pRsZRiz4\")\n\n watchmen = Movie(movie_title=\"Watchmen\",\n poster_image=\"https://upload.wikimedia.org/wikipedia/en/b/bc/Watchmen_film_poster.jpg\",\n trailer_youtube=\"https://www.youtube.com/watch?v=PVjA0y78_EQ\")\n\n the_rock = Movie(movie_title=\"The Rock\",\n poster_image=\"https://upload.wikimedia.org/wikipedia/en/8/82/The_Rock_%28movie%29.jpg\",\n trailer_youtube=\"https://www.youtube.com/watch?v=jGVJx5mOtL8\")\n\n movies = [troy, kingdom_of_heaven, warrior, pulp_fiction, fight_club, the_matrix,\n the_dark_knight, whiplash, dredd, ai_artificial_intelligence,\n watchmen, the_rock]\n\n return movies",
"def data_one_movie_to_db(config_db, title: str) -> None:\n try:\n with UseDatabase(config_db) as cursor:\n url = \"http://www.omdbapi.com/?i=tt3896198&apikey=6b513db6&t=\" + title\n headers = {\"Accept\": \"application/json\"}\n req = requests.get(url, headers=headers)\n api_content = json.loads(req.content.decode('utf-8'))\n # Because of no BoxOffice key in API for movie 'Ben Hur' (ID 68 in db):\n api_content.setdefault('BoxOffice', 'N/A')\n json_keys = (api_content['Year'], api_content['Runtime'], api_content['Genre'], api_content['Director'],\n api_content['Actors'], api_content['Writer'], api_content['Language'], api_content['Country'],\n api_content['Awards'], api_content['imdbRating'], api_content['imdbVotes'],\n re.sub(r'[^0-9]', '', api_content['BoxOffice']), title)\n _SQL = \"\"\"UPDATE MOVIES SET YEAR=?, RUNTIME=?, GENRE=?, DIRECTOR=?, ACTORS=?, WRITER=?, LANGUAGE=?,\n COUNTRY=?, AWARDS=?, IMDb_Rating=?, IMDb_votes=?, BOX_OFFICE=? WHERE TITLE=?\"\"\"\n cursor.execute(_SQL, json_keys)\n except KeyError:\n print(\"No API data about this movie.\")\n except Exception as err:\n print(\"Something went wrong:\", str(err))",
"def save_movies(self, filename):\n out_file = open('{}'.format(filename), 'w')\n for movie in self.movies:\n out_file.write(\"{}\\n\".format(movie))\n out_file.close()",
"def movie(response):\n\n response = response.json()\n\n if response.get(\"Error\"):\n raise NotFoundError(response[\"Error\"])\n\n if response[\"Type\"] != \"movie\":\n raise NotFoundError(\"Type is {}, should be movie\".format(response[\"Type\"]))\n\n return [OrderedDict([(\"Title\", response[\"Title\"]),\n (\"ID\", response[\"imdbID\"]),\n (\"Rating\", response[\"imdbRating\"]),\n (\"Year\", response[\"Year\"].split(u\"\\u2013\")[0])])]",
"def __tags_to_movies(mlens_to_tmdb, movies_to_titles, tags_to_names,\n genomeFile='ml-latest/genome-scores.csv'):\n rdr = csv.reader(open(genomeFile))\n next(rdr, None)\n\n tagsToMovies = {}\n\n for row in rdr:\n movieLensId = row[0]; tagId = row[1]; score=row[2]\n tmdb_id = mlens_to_tmdb[movieLensId]\n tagName = tags_to_names[tagId]\n if tagName not in tagsToMovies:\n tagsToMovies[tagName] = []\n\n title = movies_to_titles[tmdb_id]\n tagsToMovies[tagName].append( (tmdb_id, title, score) )\n\n # Sort each by score\n for tagId, scoredMovies in tagsToMovies.items():\n scoredMovies.sort(key=lambda x: x[2], reverse=True)\n\n return tagsToMovies",
"def convert_to_movie(api_movie_obj):\n # Sometimes the api passes back null movies. Weird, I know. - Matt M\n if api_movie_obj is None:\n logger.warn('Blank movie encountered')\n return Movie()\n logger.info('Converting to movie: %s (%s)' % (api_movie_obj['title'], api_movie_obj['id']))\n movie = Movie()\n movie.m_id = api_movie_obj['id']\n movie.title = api_movie_obj['title']\n if 'poster_path' in api_movie_obj.keys() and api_movie_obj['poster_path']:\n # w185 indicates api request for the 185px-width image\n movie.poster_path = '%sw185%s' % (tmdb.get_base_url(), api_movie_obj['poster_path'])\n else:\n movie.poster_path = '/static/img/placeholder-poster.jpg'\n movie_keys = api_movie_obj.keys()\n movie.release_date = api_movie_obj['release_date'] if ('id' in movie_keys) else None\n movie.overview = api_movie_obj['overview'] if ('overview' in movie_keys) else None\n movie.budget = api_movie_obj['budget'] if ('budget' in movie_keys) else None\n movie.revenue = api_movie_obj['revenue'] if ('revenue' in movie_keys) else None\n logger.info('Conversion successful')\n return movie",
"def main():\n the_movie_db = TheMovieDB(API_KEY)\n success, movies = the_movie_db.discover_movies()\n if not success:\n print \"something went wrong with the api, please check\"\n exit(1)\n\n fresh_potatoes = _convert_to_movies(the_movie_db, movies)\n fresh_tomatoes.open_movies_page(fresh_potatoes)",
"def parse_movie_and_add_to_database(movie, db_helper):\n\n def parse(x): return re.sub('\\(.*\\)', '', \" \".join(urlparse(x).path.split('/')[-1].split('_')))\n\n abstract = movie.abstract\n director = parse(movie.director)\n starrings = [parse(starring) for starring in movie.starrings.split('\\n')]\n title = parse(movie.title)\n # Insert movie\n movie_id = db_helper.add_entry(\"movie\", (title, 1, 1, abstract))\n # Insert actors\n for starring in starrings:\n celebrity_id = db_helper.add_entry(\"celebrity\", (starring))\n db_helper.add_entry(\"relationship\", (movie_id, celebrity_id, db_helper.played_relationship_type_id))\n\n # Insert director\n celebrity_id = db_helper.add_entry(\"celebrity\", (director))\n db_helper.add_entry(\"relationship\", (movie_id, celebrity_id, db_helper.directed_relationship_type_id))",
"def load_movies():\n filepath = \"./seed_data/u.item\"\n movies = open(filepath)\n\n for movie in movies:\n movie = movie.rstrip().split('|')\n title = movie[1][:-7]\n title = title.decode(\"latin-1\")\n if movie[2]:\n date = datetime.strptime(movie[2], '%d-%b-%Y')\n else:\n date = None\n db_movie = Movie(\n movie_id = movie[0], title = title, \n released_at = date, imdb_url = movie[4])\n db.session.add(db_movie)\n\n db.session.commit()",
"def transform_data(titles, people):\n \n movies = list()\n\n for t in titles:\n title = t['title']\n year = t['year']\n director = random.choice(people)\n producer = random.choice(people)\n actor = random.choice(people)\n castcrew = {'director' : director, 'producer': producer, 'actor': actor}\n movies.append((title, year, json.dumps(castcrew)))\n\n return movies",
"def convert_to_json(self, rows):\n\t\tjson_list = []\n\t\tfor row in rows:\n\t\t\tjson_record = {}\n\t\t\tjson_record[\"movie_id\"] = row[0]\n\t\t\tjson_record[\"title\"] = change_title(row[1])\n\t\t\tjson_record[\"genres\"] = row[2][:5]\n\t\t\tjson_record[\"imdb_id\"] = row[3]\n\t\t\tjson_record[\"tmdb_id\"] = row[4]\n\t\t\tjson_record[\"rating\"] = row[5]\n\t\t\tjson_record[\"number_of_ratings\"] = row[6]\n\t\t\tjson_record[\"weighted_rating\"] = row[7]\n\t\t\tjson_record[\"release_year\"] = row[8]\n\t\t\tjson_record[\"img_path\"] = row[9]\n\t\t\tjson_record[\"description\"] = row[10]\n\t\t\tjson_record[\"director\"] = row[11]\n\t\t\tjson_record[\"length\"] = row[12]\n\t\t\tjson_list.append(json_record)\n\t\treturn json.dumps(json_list, indent = 4)",
"def create_movie_tiles_content(movies):\n\n content = ''\n for movie in movies:\n # Extract the youtube ID from the url\n youtube_id_match = re.search(r'(?<=v=)[^&#]+', movie.trailer_url)\n youtube_id_match = youtube_id_match or re.search(\n r'(?<=be/)[^&#]+', movie.trailer_url)\n trailer_youtube_id = youtube_id_match.group(\n 0) if youtube_id_match else None\n\n # Append the details for the movie with its content filled in\n content += movie_tile_content.format(\n movie_title=movie.title + \"(\" + movie.releaseYear + \")\",\n poster_url=movie.poster_url,\n trailer_youtube_id=trailer_youtube_id,\n movie_story_Line=movie.story_line,\n movie_director=movie.director,\n movie_cast=movie.cast,\n media_id=movie.media_id,\n media_avg_rating=movie.viewer_rating.average_rating,\n media_pg_rating=movie.pg_rating,\n media_duration=movie.duration,\n media_genres=movie.genres.replace(\"|\", \",\"),\n total_reviews=movie.viewer_rating.total_rating - 1,\n media_reviews=create_movie_reviews_content(movie.viewer_rating)\n )\n return content",
"def unite_imdb_profiles(verbose):\n if verbose:\n print(\"Uniting IMDB movie profiles to one csv file...\")\n if not os.path.exists(_IMDB_DIR_PATH):\n print(\"No IMDB profiles to unite!\")\n return\n profiles = []\n profile_files = os.listdir(_IMDB_DIR_PATH)\n if verbose:\n profile_files = tqdm(profile_files)\n for profile_file in profile_files:\n if verbose:\n profile_files.set_description('Reading {}'.format(profile_file))\n file_path = os.path.join(_IMDB_DIR_PATH, profile_file)\n _, ext = os.path.splitext(file_path)\n if ext == '.json':\n with open(file_path, 'r') as json_file:\n profiles.append(json.load(json_file))\n df = pd.DataFrame(profiles)\n df = _decompose_dict_column(df, 'avg_rating_per_demo', _DEMOGRAPHICS)\n df = _decompose_dict_column(df, 'votes_per_demo', _DEMOGRAPHICS)\n df = _decompose_dict_column(\n df, 'rating_freq', [str(i) for i in range(1, 11)])\n df = _dummy_list_column(df, 'genres')\n unison_fpath = os.path.join(\n _get_dataset_dir_path(), 'imdb_dataset.csv')\n df.to_csv(unison_fpath, index=False)",
"def main():\n\n toy_story = media.Movie(\"Toy Story\",\n \"http://www.gstatic.com/tv/thumb/movieposters/17420/p17420_p_v8_ab.jpg\", # NOQA\n \"November 22, 1995\",\n \"1h 21m\",\n \"Woody, a good-hearted cowboy doll who belongs to \"\n \"a young boy named Andy, sees his position as \"\n \"Andy's favorite toy jeopardized when his parents \"\n \"buy him a Buzz Lightyear action figure.\",\n \"https://youtu.be/4KPTXpQehio\")\n\n avatar = media.Movie(\"Avatar\",\n \"http://t0.gstatic.com/images?q=tbn:ANd9GcQCfmvrE4fMo2cd8esc7mDZPtFSJThAujddMPkRtti1_ij6u-jp\", # NOQA\n \"December 18, 2009\",\n \"2h 42m\",\n \"On the lush alien world of Pandora live the Na'vi, \"\n \"beings who appear primitive but are highly evolved.\",\n \"https://youtu.be/cRdxXPV9GNQ\")\n\n deadpool = media.Movie(\"Deadpool\",\n \"http://t2.gstatic.com/images?q=tbn:ANd9GcTvrIHJfasS6poy34esN1O5hZonXaiqfEZb4WbnbAa9qJCIL8_9\", # NOQA\n \"February 12, 2016\",\n \"1h 48m\",\n \"Wade Wilson is a former Special Forces operative \"\n \"who now works as a mercenary.\",\n \"https://youtu.be/gtTfd6tISfw\")\n\n it_movie = media.Movie(\"IT\",\n \"http://t1.gstatic.com/images?q=tbn:ANd9GcTALjGaaCwNAfgH2Fa0jVpp2mEOhGRRw1v0lkRrHlUtXyKW0buX\", # NOQA\n \"September 8, 2017\",\n \"2h 15m\",\n \"Seven young outcasts in Derry, Maine, are about \"\n \"to face their worst nightmare -- an ancient, \"\n \"shape-shifting evil that emerges from the sewer \"\n \"every 27 years to prey on the town's children.\",\n \"https://youtu.be/xKJmEC5ieOk\")\n\n star_wars_tfa = media.Movie(\"Star Wars: The Force Awakens\",\n \"http://t0.gstatic.com/images?q=tbn:ANd9GcT6nGxj1D4P-9EiVSY32sb6Ql-XQrbeK5FgM37UI6QxcZwfcfVw\", # NOQA\n \"December 18, 2015\",\n \"2h 15m\",\n \"Thirty years after the defeat of the Galactic\"\n \" Empire, the galaxy faces a new threat from \"\n \"the evil Kylo Ren and the First Order. \",\n \"https://youtu.be/sGbxmsDFVnE\")\n\n big_hero6 = media.Movie(\"Big Hero 6\",\n \"http://t2.gstatic.com/images?q=tbn:ANd9GcQzyu98HxFhB68UKqRKSrTKknXHI-gtSTAAX0CGiKBM980CFhI1\", # NOQA\n \"November 7, 2014\",\n \"1h 48m\",\n \"Robotics prodigy Hiro lives in the city of San \"\n \"Fransokyo.\",\n \"https://youtu.be/rD5OA6sQ97M\")\n\n movies = [toy_story, avatar, deadpool, it_movie, star_wars_tfa, big_hero6]\n\n fresh_tomatoes.open_movies_page(movies)",
"def write_kodidb(self, artwork):\n kodi_movie = self.kodidb.movie_by_imdbid(artwork[\"imdb_id\"])\n if kodi_movie:\n params = {\n \"movieid\": kodi_movie[\"movieid\"],\n \"art\": {\"animatedfanart\": artwork[\"animatedfanart\"], \"animatedposter\": artwork[\"animatedposter\"]}\n }\n self.kodidb.set_json('VideoLibrary.SetMovieDetails', params)",
"def parse_movie(self, res):\n url = res.css(SELECTORS['MOVIE_URL'])\n obj = {\n 'id': int(url.re_first(r'[/]([0-9]{1,})[/]')),\n 'title': SelectHelper.get(res, SELECTORS['MOVIE_TITLE']),\n 'description': SelectHelper.get(res, SELECTORS['MOVIE_DESCRIPTION'])[12:-10],\n 'advisory': SelectHelper.get_array(res, SELECTORS['MOVIE_ADVISORY']),\n 'image': SelectHelper.get(res, SELECTORS['MOVIE_IMAGE']),\n 'url': BASE_URL + url.extract_first(),\n }\n return Movie(obj)",
"def save_csv(outfile, movies):\n fieldnames = ['title', 'rating', 'year', 'actors', 'runtime']\n with open('movies.csv', 'w') as outfile:\n writer = csv.DictWriter(outfile, fieldnames=fieldnames)\n writer.writeheader()\n for line in movies:\n writer.writerow(line)\n\n\n # ADD SOME CODE OF YOURSELF HERE TO WRITE THE MOVIES TO DISK",
"def stitchMovie(metricList, metadata, args):\n # Create a movie slicer to access the movie generation routine.\n movieslicer = slicers.MovieSlicer()\n # Identify roots of distinct output plot files.\n outfileroots = []\n for metric in metricList:\n mName = metric.name.replace(' ', ' ').replace(' ', '_').replace('.', '_').replace(',', '')\n dbName = args.opsimDb.replace('_sqlite.db', '')\n dbName = dbName.replace('.db', '')\n if metadata != '':\n outfileroots.append('_'.join([dbName, mName, metadata, 'HEAL']))\n else:\n outfileroots.append('_'.join([dbName, mName, 'HEAL']))\n\n for outfileroot in outfileroots:\n # Identify filenames.\n plotfiles = fnmatch.filter(os.listdir(args.outDir), outfileroot + '*SkyMap.png')\n slicenum = plotfiles[0].replace(outfileroot, '').replace('_SkyMap.png', '').replace('_', '')\n sliceformat = '%s0%dd' %('%', len(slicenum))\n n_images = len(plotfiles)\n if n_images == 0:\n raise Exception('No images found in %s with name like %s' %(args.outDir, outfileroot))\n # Set up ffmpeg FPS/IPS parameters.\n # If a movieLength was specified... set args.ips/fps according to the number of images.\n if args.movieLength != 0.0:\n #calculate images/second rate\n args.ips = n_images/float(args.movieLength)\n print(\"For a movie length of \" + str(args.movieLength) + \" IPS set to: \", args.ips)\n if args.fps == 0.0:\n warnings.warn('(FPS of 0.0) Setting fps equal to ips, up to a value of 30fps.')\n if args.ips <= 30.0:\n args.fps = args.ips\n else:\n args.fps = 30.0\n if args.fps < args.ips:\n warnings.warn('Will create movie, but FPS < IPS, so some frames may be skipped.')\n if args.fps > 30.0:\n warnings.warn('Will create movie, but FPS above 30 reduces performance '\n 'and is undetectable to the human eye.')\n # Create the movie.\n movieslicer.makeMovie(outfileroot, sliceformat, plotType='SkyMap', figformat='png',\n outDir=args.outDir, ips=args.ips, fps=args.fps)",
"def get_non_normalized_movie_data_df(imdb_ids_list=[\"tt1630029\", \"tt0499549\"], no_records_to_display=0):\n\n title_basics_data_df, title_basics_cols = read_imdb_gz_data(directory=\"../../Data/IMDB/\", level_1=\"title\",\n level_2=\"basics\",\n show=False, no_records_to_show=no_records_to_display)\n\n title_basics_data_df = title_basics_data_df[title_basics_data_df[\"tconst\"].isin(imdb_ids_list)]\n title_basics_data_df = split_cols_into_rows(source_df=title_basics_data_df, split_col_name=\"genres\")\n title_basics_data_df = title_basics_data_df[[\"tconst\", \"genres\"]]\n title_basics_data_df[\"type\"] = \"genre\"\n title_basics_data_df.columns = [\"tconst\", \"value\", \"type\"]\n\n\n title_crew_data_df, title_crew_cols = read_imdb_gz_data(directory=\"../../Data/IMDB/\", level_1=\"title\",\n level_2=\"crew\",\n show=False, no_records_to_show=no_records_to_display)\n title_crew_data_df = title_crew_data_df[title_crew_data_df[\"tconst\"].isin(imdb_ids_list)]\n # title_crew_data_df = title_crew_data_df[title_crew_data_df[\"directors\"].str.contains(\",\")]\n\n name_basics_data_df, name_basics_data_cols = read_imdb_gz_data(directory=\"../../Data/IMDB/\", level_1=\"name\",\n level_2=\"basics\",\n show=False, no_records_to_show=no_records_to_display)\n\n title_crew_data_writers_df = title_crew_data_df[[\"tconst\", \"writers\"]].copy().drop_duplicates()\n title_crew_data_writers_df = split_cols_into_rows(source_df=title_crew_data_writers_df, split_col_name=\"writers\")\n\n joined_writers_df = pd.merge(title_crew_data_writers_df, name_basics_data_df,\n left_on=\"writers\", right_on=\"nconst\", how=\"left\")\n joined_writers_df = joined_writers_df[[\"tconst\", \"primaryName\"]]\n joined_writers_df[\"type\"] = \"writer\"\n joined_writers_df.columns = [\"tconst\", \"value\", \"type\"]\n\n\n title_crew_data_directors_df = title_crew_data_df[[\"tconst\", \"directors\"]].copy().drop_duplicates()\n title_crew_data_directors_df = split_cols_into_rows(source_df=title_crew_data_directors_df, split_col_name=\"directors\")\n joined_directors_df = pd.merge(title_crew_data_directors_df, name_basics_data_df,\n left_on=\"directors\", right_on=\"nconst\", how=\"left\")\n joined_directors_df = joined_directors_df[[\"tconst\", \"primaryName\"]]\n joined_directors_df[\"type\"] = \"director\"\n joined_directors_df.columns = [\"tconst\", \"value\", \"type\"]\n\n\n title_principals_data_df, title_principals_cols = read_imdb_gz_data(directory=\"../../Data/IMDB/\", level_1=\"title\",\n level_2=\"principals\",\n show=False, no_records_to_show=no_records_to_display)\n title_principals_data_df = title_principals_data_df[title_principals_data_df[\"tconst\"].isin(imdb_ids_list)]\n title_principals_data_df = title_principals_data_df[[\"tconst\", \"nconst\"]].copy().drop_duplicates()\n joined_principals_df = pd.merge(title_principals_data_df, name_basics_data_df, on=\"nconst\", how=\"left\")\n joined_principals_df = joined_principals_df[[\"tconst\", \"primaryName\"]]\n joined_principals_df[\"type\"] = \"principal\"\n joined_principals_df.columns = [\"tconst\", \"value\", \"type\"]\n\n non_normalized_df = pd.concat([title_basics_data_df, joined_writers_df, joined_directors_df, joined_principals_df])\n non_normalized_df = non_normalized_df.reset_index(drop=True)\n\n if no_records_to_display > 0:\n print(\"title_basics_data_df :\")\n print(title_basics_data_df.head(no_records_to_display))\n print()\n\n print(\"joined_writers_df :\")\n print(joined_writers_df.head(no_records_to_display))\n print()\n\n print(\"joined_directors_df :\")\n print(joined_directors_df.head(no_records_to_display))\n print()\n\n print(\"joined_principals_df :\")\n print(joined_principals_df.head(no_records_to_display))\n print()\n\n print(\"non_normalized_df :\")\n print(non_normalized_df.head(no_records_to_display))\n print()\n\n return non_normalized_df",
"def list_to_movie_objects(self, list_id):\n IMAGE_URL = \"https://image.tmdb.org/t/p/w500\"\n\n parsed_json = self.request_list_json(list_id)\n\n if parsed_json is None or len(parsed_json['items']) == 0:\n return None\n\n movies_json = parsed_json['items']\n\n movies_list = []\n movie_counter = 1\n for mov in movies_json:\n # print(json.dumps(mov, indent=2))\n # print('Movie %d ...' % (movie_counter))\n\n movie_title = mov['title']\n movie_overview = mov['overview']\n movie_image_url = IMAGE_URL + mov['poster_path']\n\n movie_id = str(mov['id'])\n movie_video_url = self.movieid_first_video_url(movie_id)\n\n # print('Movie id: ' + movie_id)\n # print('Title: ' + movie_title)\n # print('Storyline: ' + repr(movie_overview))\n # print('Image url: ' + movie_image_url)\n # print('Video url: ' + movie_video_url)\n # print('')\n\n movie_object = media.Movie(movie_title,\n movie_overview,\n movie_image_url,\n movie_video_url)\n movies_list.append(movie_object)\n\n movie_counter += 1\n\n return movies_list",
"def save_csv(outfile, movies):\n writer = csv.writer(outfile)\n writer.writerow(['Title', 'Rating', 'Year', 'Actors', 'Runtime'])\n for movie in movies:\n writer.writerow(movie)\n\n # ADD SOME CODE OF YOURSELF HERE TO WRITE THE MOVIES TO DISK",
"def make_movie(processed_files_directory='files/', WITH_SUBTITLES=False, WITH_AUDIO=False):\r\n # Declare the text for sub-titles\r\n\r\n if WITH_SUBTITLES: # if the user is willing to have subtitles in the movie\r\n with open(processed_files_directory+'subtitles.txt', 'r', encoding='utf8') as f:\r\n txt = f.read() # read the subtitles file\r\n # Split text to lines.\r\n subtitles = txt.split('\\n')\r\n # Declare VideoFileClip from the movie that I already have.\r\n clip = VideoFileClip(processed_files_directory + \"initial.avi\")\r\n # Declare duration of one sub-title as total duration of the video divided by number of lines.\r\n duration = clip.duration/len(subtitles)\r\n # Set start to zero.\r\n start=0\r\n # Set container for the clips.\r\n videos=[]\r\n # Loop all sub-titles\r\n for line in subtitles:\r\n # Make text clip from the reversed Hebrew text\r\n txt_clip = TextClip(line[::-1], fontsize=30, color='yellow', font='Calibri')\r\n # Set position to the bottom of screen.\r\n txt_clip = txt_clip.set_position('bottom').set_duration(duration)\r\n # Make sub clip of the movie with same duration as text clip.\r\n sub_clip = clip.subclip(start,start+duration)\r\n # Set CompositeVideoClip from the text clip and sub clip.\r\n video = CompositeVideoClip([sub_clip, txt_clip])\r\n # Insert the video to the clips container\r\n videos.append(video)\r\n # Set start time for next sub-title.\r\n start+=duration\r\n # Concatenate all clips of the container.\r\n res = concatenate_videoclips(videos)\r\n clip = res # now the clip is res\r\n else:\r\n clip = VideoFileClip(processed_files_directory+ \"initial.avi\") # the clip won't have subtitles\r\n\r\n\r\n # Set audio clip from mp3 file.\r\n if WITH_AUDIO: # if the user has chosen to include soundtrack in the movie\r\n f = 'audio.mp3' # change to mp3 soundtrack file of the movie\r\n # set the duration of the audioclip to max(duration of clip), even if the audioclip is longer\r\n audioclip = AudioFileClip(processed_files_directory+f)\r\n\r\n # check if the clip length is bigger than the\r\n if clip.duration > audioclip.duration:\r\n number_of_duplicated = int(np.ceil(clip.duration/audioclip.duration))\r\n # duplicate the audioclip in order to later fit the movie's duration\r\n audioclip = concatenate_audioclips([AudioFileClip(processed_files_directory+f) for i in range(number_of_duplicated)])\r\n\r\n # Now fit the audioclip duration to the movie's\r\n audioclip = audioclip.set_duration(clip.duration)\r\n\r\n # Set audio for the container.\r\n if not WITH_SUBTITLES: # if the user wanted to have audio included without subtitles\r\n videoclip = clip.set_audio(audioclip)\r\n else: # if the user wanted to have both audio and subtitles\r\n videoclip = res.set_audio(audioclip)\r\n else:\r\n videoclip = clip # if the user didn't want audio in the movie\r\n\r\n # Write the video file.\r\n f = 'final_movie.mp4' # change to the desired movie filename\r\n videoclip.write_videofile(processed_files_directory+f)",
"def encode_movie(dir):\n root, ext = 'movie', 'avi'\n for i in itertools.count():\n path = '.'.join([root + str(i).zfill(5), ext])\n\n if not os.path.exists(path):\n break\n\n call(['mencoder', 'mf://' + dir + '/*.png', '-mf', 'fps=10', '-o',\n path, '-ovc', 'xvid', '-xvidencopts', 'bitrate=3000'])\n\n shutil.rmtree(dir)\n\n print('movie saved to %s.' % path)",
"def get_test_movie(imdbid_test,finaldata):\n testfeatures = [f for f in finaldata if f['imdbid'] == imdbid_test]\n features = testfeatures[0]['features']\n features_bow = f1_dictionary.doc2bow(features) # dictionary is can be a parameter to this function\n return features_bow # test_IMDB_bow,test_WIKI_bow",
"def load_movies():\n print \"Movies\"\n\n # Delete all rows in table, so if we need to run this a second time,\n # we won't be trying to add duplicate users\n Movie.query.delete()\n\n # Read u.item file and insert data\n for row in open(\"seed_data/u.item\"):\n row =row.rstrip()\n\n movie_id, title_long, released_string, imdb_url = row.split(\"|\")[:4]\n #we modified the datetime format changed released_string into \n #new format by using datetim.strptime to convert it. \n print row\n if released_string: \n release_at = datetime.strptime(released_string, \"%d-%b-%Y\")\n else: \n release_at = None \n\n #here we stripped the title of the (xxxx) year and parenthesis\n #using the slice method. \n title = title_long[:-7]\n\n print movie_id, title_long, released_string, imdb_url\n\n #assign the return values from our for loop to a new variable\n movie = Movie(movie_id=movie_id, title=title, released_at=release_at,\n imdb_url=imdb_url)\n \n # We need to add to the session or it won't ever be stored\n db.session.add(movie)\n\n #Once we're done, we should commit our work\n db.session.commit()",
"def extract_movie(soup: BeautifulSoup) -> Movie:\n\n title, year = extract_movie_header(soup)\n runtime, genres, certificate = extract_movie_meta(soup)\n rating, metascore = extract_movie_rating_bar(soup)\n votes, gross = extract_movie_extra(soup)\n\n return Movie(\n title, genres, rating, year, runtime, votes, metascore, certificate, gross\n )",
"def list_movie():\n if not MOVIES:\n print('No stored movies yet')\n\n for movie in MOVIES:\n print(f\"{movie['name']} ({movie['year']}) - Director by '{movie['director']}'\")"
] | [
"0.64902",
"0.5829091",
"0.5827195",
"0.5817335",
"0.5812168",
"0.57020813",
"0.5697801",
"0.56902397",
"0.5665576",
"0.56347865",
"0.55452704",
"0.545847",
"0.5440439",
"0.54200333",
"0.5404067",
"0.53978586",
"0.5383835",
"0.53561413",
"0.5301769",
"0.5294081",
"0.52641225",
"0.5225335",
"0.5224606",
"0.5199435",
"0.5191248",
"0.51856554",
"0.51798916",
"0.51440173",
"0.51390666",
"0.51121116"
] | 0.71199316 | 0 |
Set a log message in the session log area of the web page | def sessionLog(self, logStr):
if self.ioLoopInst is not None:
cmd = {'cmd': 'sessionLog', 'value': logStr}
self._sendMessageToWeb(cmd)
else:
print("SessionLog: " + logStr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def log_message(self, msg):\n\t\tself.logView.log_message(msg)",
"def add_session_logger(request, log):\n request.session.log = log",
"def logToScreen(message):\n\tif CyGame().isFinalInitialized():\n\t\tCyInterface().addImmediateMessage(escapeXml(message), \"\")",
"def log(self, message):",
"def log(self, message):\n self._log += \"%s\\n\" % message\n print message",
"def log(self, msg):\n\n\t\tself.eyetribe.log_message(msg)",
"def update_log(self, message):\n self.LogOutput_Field.appendPlainText(message)",
"def log(self, message: str):",
"def log(self, _strMessage=\"\"):\n self.edLogging.log(_strMessage)",
"def logprint(self, message):\n print message\n self.log += message+\"\\n\"",
"def log(self, msg):\n print(msg)",
"def _log(self, message):\n pass",
"def userLog(self, logStr):\n if self.ioLoopInst is not None:\n cmd = {'cmd': 'userLog', 'value': logStr}\n self._sendMessageToWeb(cmd)\n else:\n print(\"UserLog: \" + logStr)",
"def _log_message(self, message):\n\t\tif message not in self._logged_messages:\n\t\t\twith open(self._logfile, \"a\") as f:\n\t\t\t\tf.write(message + \"\\n\")\n\t\tself._logged_messages.append(message)",
"def log_to_user(self, level, message):\n if level in ('error', 'info',):\n buffer = self.log_view.get_buffer()\n iter = buffer.get_end_iter()\n buffer.insert(iter, message + '\\n')\n adj = self.log_view.get_parent().get_vadjustment()\n adj.set_value(adj.get_upper() - adj.get_page_size())",
"def msg_handler(self, msg):\n self.view.frame.log.append(msg)",
"def screen(self, _strMessage=\"\"):\n self.edLogging.screen(_strMessage)",
"def _log(self, log, message):\n log_entry = '[%s] %s\\n' % (time.strftime('%Y/%m/%d %H:%M:%S'), message)\n log.write(log_entry)\n if self.verbose:\n print log_entry.rstrip()",
"def log(msg):\n\tfrom http_request import req\n\tif not req: return\n\t\t\n\tif not req.out.get('_log'):\n\t\treq.out['_log'] = []\n\treq.out['_log'].append(msg)",
"def log_page_view(self, page, userid):\n\t\tself.log_page_view.logger_.log('{} {}'.format(page, userid))",
"def log(self, message):\n timestamp = time.strftime(\"[%H:%M:%S]\", time.localtime(time.time()))\n self.file.write('%s %s\\n' % (timestamp, message))\n self.file.flush()",
"def write_log(self, msg: str):\n self.cta_engine.write_log(msg, self)",
"def log(self, msg):\n log(\"[%s]@%s:%s %s\" %(self.username, self.remote_ip, self.remote_port, msg))",
"def log(self, msg):\n log.msg(msg, level=log.INFO)",
"def logMsg(self, logType, logMessage):\n self.ce_proxy.logMessage(self.userName, logType, logMessage)",
"def logline(msg):\n print msg",
"def log(self, msg):\n current_datetime = self.get_date_time()\n self.file.write(\"%s %s\\n\" % (current_datetime, msg))",
"def logIt(self, msg):\n\n\t\tif( self.logger ): self.logger.logIt( msg )",
"def log(message):\n print(\"{0}: {1}\".format(acm.Time.TimeNow(), message))",
"def log( loglevel, message ):\n E.log( loglevel, message )"
] | [
"0.71520805",
"0.7133883",
"0.6981249",
"0.6750899",
"0.67124015",
"0.6626074",
"0.6558042",
"0.65579414",
"0.65232515",
"0.6516738",
"0.64253914",
"0.635889",
"0.6347469",
"0.6347148",
"0.6331337",
"0.62725055",
"0.62225693",
"0.6218292",
"0.62155837",
"0.61898637",
"0.6135466",
"0.6123768",
"0.608115",
"0.6078095",
"0.60668254",
"0.60637665",
"0.6054891",
"0.6037628",
"0.60336787",
"0.6026471"
] | 0.7516804 | 0 |
Set an error message in the debug display area of the web page | def setDebugError(self, errStr):
if self.ioLoopInst is not None:
cmd = {'cmd': 'debugError', 'error': errStr}
self._sendMessageToWeb(cmd)
else:
print("DebugError: " + errStr) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def error(self, text):\n\n debug_text = self._get_debug_text(text)\n if self._live_debug_level < logging.ERROR and self._live_debug_enabled:\n if self.py_cui_root is not None:\n self.py_cui_root.status_bar.set_text(debug_text)\n super().debug(debug_text)\n else:\n super().error(debug_text)",
"def display_error(self, message):\n self.ui_widget.display_error(message=message)",
"def err_message(self, message):\n self.errors.append(1)\n message = \"<b>\" + message + \"</b>\"\n self.timer_id = GLib.timeout_add_seconds(5, self.error_false)\n # Show if is was hidden\n if self.hidden:\n self.toggle()\n self.was_hidden = True\n self.left_label.set_markup(message)",
"def show_error(title, message, print_message=False):\n\n pass",
"def error_page(self, error_message: str, status: int = 400):\n self.set_status(status)\n self.render('error.jinja2', error=error_message)",
"def error(self, message):\n print message",
"def show_error(self, error):\n if (error == \"\"):\n self.ui.errorLabel.setText(\"\")\n else:\n self.ui.errorLabel.setText(\"<span style=\\\"font-weight:600; color:#ff0000;\\\">{0}</span>\".format(error))",
"def set_error_message(msg):\n set_message(msg, TYPE_ERROR)",
"def error(message):\n if DEBUG:\n with print_lock:\n print((Colours.FAIL + 'ERROR: ' + Colours.END_COLOUR + message).strip())",
"def error(self, request):\n if self.debug:\n import cgitb\n request.stdout.write('Content-Type: text/html\\r\\n\\r\\n' +\n cgitb.html(sys.exc_info()))\n else:\n errorpage = \"\"\"<!DOCTYPE HTML PUBLIC \"-//IETF//DTD HTML 2.0//EN\">\n<html><head>\n<title>Unhandled Exception</title>\n</head><body>\n<h1>Unhandled Exception</h1>\n<p>An unhandled exception was thrown by the application.</p>\n</body></html>\n\"\"\"\n request.stdout.write('Content-Type: text/html\\r\\n\\r\\n' +\n errorpage)",
"def _display_error(message: str) -> None:\n print()\n print(message, end='\\n\\n')",
"def error(self, error_msg):\n print(\"ERROR DETECTED\")\n print(error_msg)",
"def renderError(self, error_code):\n\n self.error(error_code)\n self.response.write(\"Oops! Something went wrong.\")",
"def renderError(self, error_code):\n\n self.error(error_code)\n self.response.write(\"Oops! Something went wrong.\")",
"def _error(message):\n\n current.session.error = current.T(message)\n redirect(URL(c=\"default\", f=\"index\"))",
"def set_error_page(self, html):\n return self.manager.set_error_page(self, html)",
"def error():\n title = session.get('title', 'Error')\n error_message = session.get('error_message', 'An error has occurred.')\n level = session.get('level', 'error')\n logger.error(\"Displaying error to the user\", error_message=error_message, level=level)\n return render_template('errors/error.html', title=title, error_message=error_message, level=level)",
"def show_error(self):\n print('LSE Error : {}'.format(self._error))",
"def set_error(errTxt):\r\n core.set_item_color(\"Start\", mvGuiCol_Button, (255, 0, 0, 255))\r\n core.set_item_color(\"Start\", mvGuiCol_ButtonActive, (255, 0, 0, 255))\r\n core.set_item_color(\"Start\", mvGuiCol_ButtonHovered, (255, 0, 0, 255))\r\n if not core.does_item_exist(\"Error##ErrorNoFACEITName\"):\r\n with simple.collapsing_header(\"Error##ErrorNoFACEITName\", parent=\"##GroupStats\",\r\n default_open=True,\r\n closable=False,\r\n bullet=True):\r\n core.add_text(\"ErrorText\", default_value=errTxt, color=(255, 0, 0, 255))",
"def debug_error(self, message):\n self.emit(QtCore.SIGNAL(\"debug_error(QString)\"), message)",
"def _show_error_message(self, error_code):\n self.error_box = tk.Toplevel()\n self.error_box.title(\"Error!\")\n self.error_frame = tk.Frame(self.error_box)\n self.error_frame.grid(column=0, row=0, sticky=\"ew\")\n self.error_frame.columnconfigure(0, weight=1, minsize=100)\n self.error_message = tk.Message(self.error_frame, text=error_code)\n self.error_message.grid(column=0, row=0, sticky=\"ew\")",
"def error(cls, message):\n print('[ERROR] {0}'.format(message))",
"def error(self, message):\n self._clear()\n print(\"ERROR:\", message)\n self._draw()",
"def showMessage(self, message):\r\n util.raiseNotDefined()",
"def error(message, code=400):\n return render_template(\"error.html\", top=code, bottom=message)",
"def _insertErrorMsg(self, ErrorMessage, outputFileObject):\n outputFileObject.write('<font color=\"' + AutoGrader.Const.ERROR_COLOR + '\">')\n outputFileObject.write (ErrorMessage)\n outputFileObject.write('</font>')",
"def error(message):\n print str(message)",
"def error():\n return render_template(\"error.html\", **locals())",
"def error(self, message):\n ErrorExit('error: {}\\n'.format(message), 2)",
"def error(self, msg):\n error_msg = self._error_color\n error_msg += \"[SHOULDER_ERROR] \" + msg\n error_msg += self._reset_color\n self.logger.error(error_msg)"
] | [
"0.73833704",
"0.72650695",
"0.7009634",
"0.6869299",
"0.6852875",
"0.6805506",
"0.6801954",
"0.6663326",
"0.666139",
"0.66274875",
"0.66182554",
"0.6591868",
"0.6587565",
"0.6587565",
"0.65738654",
"0.6552163",
"0.65453845",
"0.65323395",
"0.65281844",
"0.6509303",
"0.6486324",
"0.64693004",
"0.64619267",
"0.64444774",
"0.6439064",
"0.6433215",
"0.6419549",
"0.6396381",
"0.63833237",
"0.63722426"
] | 0.7387137 | 0 |
Send previously plotted data points to the web page | def sendPreviousDataPoints(self):
if self.ioLoopInst is not None:
cmd = {'cmd': 'setDataPoints', 'value': self.dataPoints}
self._sendMessageToWeb(cmd)
else:
print("sendPreviousDataPoints: " + self.dataPoints) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def plot_data(self):",
"def publish_data(username):\n x1 = []\n x2 = []\n y1 = []\n y2 = []\n\n for point_set in __data:\n x1.append(point_set[0][0])\n y1.append(point_set[0][1])\n\n x2.append(point_set[1][0])\n y2.append(point_set[1][1])\n\n figure = plt.figure()\n plt.plot(x1, y1, label='Atrium')\n plt.plot(x2, y2, label='Ventrical')\n plt.xlabel('Time (ms)')\n plt.ylabel('Voltage (V)')\n plt.title(\"'{0}' Live Egram Data\".format(username))\n plt.legend()\n\n timestamp = datetime.datetime.now().strftime(Config.getInstance().get('Database', 'db.timestamp')).replace(' ', '_').replace('/', '-').replace(':', '-')\n graph_doc_name = \"{0}_Live_Egram_Data_From_{1}.pdf\".format(username, timestamp)\n pp = PdfPages(os.path.join(parentfolder, 'downloads', graph_doc_name))\n pp.savefig(figure)\n pp.close()\n\n csv_output = list(zip(x1, y1, x2, y2))\n\n csv_doc_name = \"{0}_Live_Egram_Data_From_{1}.csv\".format(username, timestamp)\n with open(os.path.join(parentfolder, 'downloads', csv_doc_name), 'w') as file:\n writer = csv.writer(file)\n writer.writerow(['Atrium Timestamp', 'Atrium Value', 'Ventrical Timestamp', 'Ventrical Value'])\n for line in csv_output:\n writer.writerow(line)",
"def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial\"][1]) + \" at \" + str(self.values[\"Trial\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream.write(dict(x=self.values[\"Trial\"][0], y=self.values[\"Trial\"][1]))",
"def timer_plot_data_out(self, w):\n w.update_plot(self.getLaps())",
"def save_plot(self, ):\n pass",
"def data_graph():\n station_reference = request.args.get(\"stationReference\")\n station_name = request.args.get(\"stationName\")\n station_name = station_name.replace(\" \",\"+\")\n\n if station_name is not None:\n # station_data = station_data.replace(\" \", \"+\")\n station = station_data.loc[station_data.stationName == station_name]\n else:\n station = station_data.loc[station_data.stationReference == station_reference]\n result_station = station.iloc[0]\n\n # Get optional parameters\n time_from = request.args.get(\"from\")\n time_to = request.args.get(\"to\")\n if time_from:\n pass\n else:\n time_from = None\n if time_to:\n pass\n else:\n time_to = None\n # plot pic\n magic_trick= data.station_graph(result_station.stationName, time_from, time_to)\n # img_stream = io.BytesIO(img)\n # img = Image.open(img_stream)\n # imgByteArr = io.BytesIO()\n # img.save(imgByteArr,format='PNG')\n # imgByteArr = imgByteArr.getvalue()\n # return send_file(io.BytesIO(imgByteArr),\n # mimetype = 'image/png',\n # as_attachment = True,\n # attachment_filename = 'tmp.png')\n image_data = open(\"tmp.png\", \"rb\").read()\n response = make_response(image_data)\n response.headers['Content-Type'] = 'image/png'\n return response",
"def plot(self, finished=False):\n send = self.sender.send\n if finished:\n send(None)\n else:\n data = (self.args['time'], self.args['max'],\n self.args['mean'], self.args['sd'],\n self.args['fitness'], self.args['population'],\n self.args['dataset'], self.args['current_gen'],\n self.args['generations'])\n send(data)",
"def plot(self):\n pass",
"def update_plot():\n pass",
"def evaluate(self, plot):",
"def processData(self):\n recordSet = AresChartsService.toMultiSeries(self.vals, self.chartKeys, self.selectedX , self.chartVals, extKeys=self.extKeys)\n self.aresObj.jsGlobal.add(\"data_%s = %s\" % (self.htmlId, json.dumps(recordSet)))",
"def render_data_points(times, data_points, config):\n\n\twith warnings.catch_warnings():\n\t\twarnings.simplefilter(\"ignore\")\n\t\tpyplot.pause(0.01)\n\n\tfor id_, graph in enumerate(data_points.values(), start=1):\n\t\tif config[\"subplots\"][\"show\"]:\n\t\t\tpyplot.subplot(\n\t\t\t\tconfig[\"subplots\"][\"vertical\"],\n\t\t\t\tconfig[\"subplots\"][\"horizontal\"],\n\t\t\t\tid_\n\t\t\t)\n\n\t\ty_values = normalize(graph[\"values\"]) if config[\"normalize\"] \\\n\t\t\telse graph[\"values\"]\n\t\tgraph[\"graph\"].set_data(times, y_values)\n\n\taxes = pyplot.gca()\n\taxes.relim()\n\taxes.autoscale_view()\n\tpyplot.draw()",
"def update(self):\n\t\tprint(\"Plotting \" + str(str(self.values[\"Trial1\"][1]) + \" at \" + str(self.values[\"Trial1\"][0]) + \"\\n\"))\n\t\tif self.clear:\n\t\t\tself.stream1.write(dict(x=[], y=[]))\n\t\t\tself.stream2.write(dict(x=[], y=[]))\n\t\t\tself.stream3.write(dict(x=[], y=[]))\n\t\telse:\n\t\t\tself.stream1.write(dict(x=self.values[\"Trial1\"][0], y=self.values[\"Trial1\"][1]))#, trace=Bar)\n\t\t\tself.stream2.write(dict(x=self.values[\"Trial2\"][0], y=self.values[\"Trial2\"][1]))\n\t\t\tself.stream3.write(dict(x=self.values[\"Trial3\"][0], y=self.values[\"Trial3\"][1]))",
"def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.canvas.draw()",
"def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()",
"def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()",
"def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()",
"def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()",
"def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='gray')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='darkgreen')\n self.canvas.draw()",
"def on_plot(self, event=None):\n data_id, theory_id, state_id = self.set_data_helper()\n self.parent.plot_data(data_id=data_id,\n state_id=state_id,\n theory_id=theory_id,\n append=False)\n self.enable_remove_plot()",
"def updatePlot(self):\n self.axes.clear()\n self.axes.plot(self.data[0], self.data[1], linestyle='-', color='blue')\n self.axes.plot(self.data[0], self.data[2], linestyle='-', color='red')\n self.axes.plot(self.data[0], self.data[3], linestyle='-', color='gray')\n self.canvas.draw()",
"def plot(self, *args, **kwargs):\n pass",
"def plot(data, layout, file_name):\n offline.plot({'data': data,\n 'layout': layout},\n filename='{}-{}_{}-{}.html'.format(file_name,\n todays_day,\n todays_month,\n currency))",
"def _set_data(self):\n\n # Remove old elements from plot\n if self.scatter is not None:\n self.scatter.remove()\n if self.oneoneline is not None:\n self.oneoneline.remove()\n\n # Get new data and plot\n self.slice = self.results.set_index('Location').loc[self.locnaam, [self.input_parameter, self.result_parameter]].values.T\n self.scatter = self.ax.scatter(*self.slice, s=5, alpha=0.7, color='C0')\n\n # Determine axes limits\n lowerlim, upperlim = self.slice.min(), self.slice.max()\n span = (upperlim - lowerlim)\n lowerlim = max(0, lowerlim - 0.05 * span)\n upperlim = upperlim + 0.05 * span\n\n # Plot a diagonal 1:1 line\n self.oneoneline, = self.ax.plot([lowerlim, upperlim], [lowerlim, upperlim], color='grey', dashes=(4, 3), lw=1.0)\n\n # Set the axes limits\n self.ax.set_xlim(lowerlim, upperlim)\n self.ax.set_ylim(lowerlim, upperlim)\n self.canvas.draw()",
"def linedata():\n get_values = request.args\n pc = get_values.get('pc') is not None # Per Capita\n gr = get_values.get('gr') is not None # Growth Rate\n place_args, _ = get_place_args(get_values)\n plot_data, _ = datachart_handler.get_plot_data(place_args, pc, gr)\n return json.dumps(plot_data)",
"def setplot(plotdata):\n#-------------------------- \n\n\n plotdata.clearfigures() # clear any old figures,axes,items data\n\n # Figure for q[0]\n plotfigure = plotdata.new_plotfigure(name='Pressure', figno=1)\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.axescmd = 'subplot(211)'\n \n #plotaxes.xlimits = [0.,150.]\n plotaxes.ylimits = [-1.,1.0]\n plotaxes.title = 'Pressure'\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n plotitem.plot_var = 0\n plotitem.plotstyle = '-o'\n plotitem.color = 'b'\n plotitem.show = True # show on plot?\n plotitem.kwargs = {'linewidth':2,'markersize':5}\n \n\n\n # Set up for axes in this figure:\n plotaxes = plotfigure.new_plotaxes()\n plotaxes.axescmd = 'subplot(212)'\n plotaxes.xlimits = 'auto'\n plotaxes.ylimits = [-1.,1.]\n plotaxes.title = 'Velocity'\n\n # Set up for item on these axes:\n plotitem = plotaxes.new_plotitem(plot_type='1d_plot')\n plotitem.plot_var = 1\n plotitem.plotstyle = '-'\n plotitem.color = 'b'\n plotitem.show = True # show on plot?\n plotitem.kwargs = {'linewidth':3,'markersize':5}\n \n\n # Parameters used only when creating html and/or latex hardcopy\n # e.g., via visclaw.frametools.printframes:\n\n plotdata.printfigs = True # print figures\n plotdata.print_format = 'png' # file format\n plotdata.print_framenos = 'all' # list of frames to print\n plotdata.print_fignos = 'all' # list of figures to print\n plotdata.html = True # create html files of plots?\n plotdata.html_homelink = '../README.html'\n plotdata.latex = True # create latex file of plots?\n plotdata.latex_figsperline = 2 # layout of plots\n plotdata.latex_framesperline = 1 # layout of plots\n plotdata.latex_makepdf = False # also run pdflatex?\n\n return plotdata",
"def plot_page( stat_type ) :\r\n logger.debug( f\"stat_type={stat_type}\" )\r\n param = request.args[\"param\"]\r\n\r\n script = server_document( url=f'http://localhost:5006/{stat_type_2_plot_route[stat_type]}',\r\n arguments={'param' : param ,\r\n 'stat_type' : stat_type ,\r\n 'session_id' : session[ session_info.session_id_key ] } )\r\n\r\n return render_template('plot_page.html',\r\n script=script ,\r\n param=param ,\r\n stat_type=param_stats.StatTypes[stat_type].value )",
"def plot():\n pass",
"def show_plot(self):\r\n\t\tself.generate_plot()\r\n\t\tplt.show()",
"def plot(self):\n\t\tself.plotOfHeatingCurrent().plot()"
] | [
"0.6557775",
"0.63170487",
"0.62147385",
"0.6023356",
"0.60091656",
"0.60037726",
"0.59407425",
"0.59238297",
"0.5918431",
"0.58549476",
"0.5839202",
"0.581282",
"0.5794438",
"0.5786289",
"0.5779231",
"0.5779231",
"0.5779231",
"0.5779231",
"0.5779231",
"0.5770608",
"0.5725653",
"0.56986",
"0.5695462",
"0.5681196",
"0.5633015",
"0.5615718",
"0.5604227",
"0.558953",
"0.5581216",
"0.55726933"
] | 0.6515899 | 1 |
Clear all data plots in the web page | def clearAllPlots(self):
self.dataPoints = [[{'x': 0, 'y': 0}]]
self.sendPreviousDataPoints() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clear(self):\n self._plots[:] = []",
"def clear_graphs(self):\n for ax in (self.master_plot, self.time_velocity, self.time_power, self.power_velocity):\n ax.cla()",
"def clear(self):\n self._plot_data_cache = {}\n self._outstanding_requests = {}",
"def plot_clear():\n plt.cla()",
"def clear_plot(figure, clear_array):\n for i in range(len(clear_array)):\n clear_array[i].remove()",
"def reset(self):\r\n self.myOutputs = list()\r\n self.myPlots = list()\r\n self.pause = 0\r\n self.doMPL = False\r\n self.graphLabelsX = []\r\n self.graphLabelsY = []\r\n for i in self.xData.iterkeys():\r\n self.xData[i] = []\r\n self.yData[i] = []\r\n self.xyData[i] = []\r\n self.graphs[i] = Gnuplot(debug=0)\r\n self.figures[i] = 0\r\n self.mplFigCount = 0",
"def clear(self):\n\n # Inform the user\n log.info(\"Clearing the scatter plotter ...\")",
"def delete_plots():\n return Plot.delete_plots()",
"def reset(self):\n\n self.results = []\n self._plot()",
"def _clear_data_plots(self, display_pts = 5000):\n #Initializing variables for plotting\n self.out_voltages = np.ones(display_pts) * self._curr_output_voltage\n self.measured_powers = np.ones(display_pts) * self._last_power\n\n # Check that setpoint is reasonable, otherwise set error to 0\n self.errors = np.ones(display_pts) * (self._last_power-self.voltageSetpoint)\n self.sp_data = np.ones(display_pts) * self.voltageSetpoint",
"def __del__(self):\n pyplot.clf()",
"def clear_figure(self):\n self.figure.clf()",
"def clear(self):\n self._fig = go.Figure()",
"def plot_finalize():\n global figure\n global axes\n\n plot_refresh()\n plt.ioff()\n plt.show()\n\n figure, axes = None, None",
"def clear_crossfilter1(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[0] = self.p",
"def clear(self, name=None):\n if name:\n self.plots.pop(name)\n else:\n self.plots = {}\n\n self.plot_item.clear()",
"def discard(self) -> None:\n\n self.plot.close()",
"def _update_plots(self):\n for dock in self.plotDocks:\n for widget in dock.widgets:\n if not self.dataList.findItems(dock.name(), QtCore.Qt.MatchExactly):\n # no data for this plot -> reset it\n widget.getPlotItem().clear()\n # TODO remove tab from dock and del instance\n else:\n widget.getPlotItem().clear()\n x_data = self.currentDataset[\"results\"][\"time\"]\n y_data = self._get_data_by_name(dock.name())\n widget.getPlotItem().plot(x=x_data, y=y_data)",
"def clean_plots(request, save_figs):\n\n def fin():\n if save_figs is not None:\n plt.savefig(f\"{os.path.join(save_figs, request.node.name)}.png\")\n plt.close(\"all\")\n\n request.addfinalizer(fin)",
"def reset(self):\n\n self.fig.clear()\n self.ax = self.fig.add_subplot(111)\n self.hasLegend.set(False)\n self.title(Graph.default_title)\n # Lines is a list of DataSet objects. The user should take care to make\n # DataSet names unique, as there is no error checking done by Graph. \n # If a DataSet line is deleted by its formal name, Graph will delete the\n # first line in the list that matches the name.\n self.lines = {}\n self.line_counter = 1",
"def tearDown(self):\n\n self.plot = None",
"def close_all_plots(self):\n return ShadowTools.plt.close(\"all\")",
"def clear_crossfilter2(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[1] = self.p",
"def delete_fit(self):\n self.fft_fit_plotter.delete_plot(self.ax)\n plt.draw()",
"def clearRunPlot(self, runId):\n self.plotDataPoint(runId, None, None)",
"def close(self):\n plotid = self._plotid\n f = self.set(plotid)\n plt.close(f)\n self._plotid = None\n self._plots.remove(plotid)\n self._color_indexes.pop(plotid, None)\n self._mappable.pop(plotid, None)\n self._polar.pop(plotid, None)\n self._xscales.pop(plotid, None)\n self._yscales.pop(plotid, None)\n self._errorbar_colors.pop(plotid, None)",
"def _clear_drawing(self) -> None:\n self.vertices.clear()\n self.edges.clear()\n self.subplot.clear()\n self.subplot2.clear()",
"def clear(self):\n self._frame.clear()\n self._turtles = []\n self._gpens = []",
"def clear(self):\n\n # Clear\n self.axes.cla()\n try:\n self.figure.clf()\n except KeyError:\n FlatCAMApp.App.log.warning(\"KeyError in MPL figure.clf()\")\n\n # Re-build\n self.figure.add_axes(self.axes)\n self.axes.set_aspect(1)\n self.axes.grid(True)\n\n # Re-draw\n self.canvas.draw_idle()",
"def clear(self):\n self._plt.clear()\n self._layer_items = {}"
] | [
"0.7638171",
"0.73615134",
"0.72620654",
"0.71827036",
"0.6937874",
"0.69311833",
"0.68891567",
"0.68481153",
"0.6785341",
"0.6784262",
"0.67601913",
"0.6675455",
"0.66389424",
"0.6599825",
"0.65775836",
"0.6565839",
"0.65606374",
"0.6525755",
"0.6523487",
"0.6488934",
"0.6484234",
"0.64361656",
"0.64341176",
"0.63232255",
"0.63171655",
"0.63014984",
"0.6286484",
"0.6274054",
"0.6270412",
"0.6265162"
] | 0.7805865 | 0 |
Clear the data plot for the specfied run | def clearRunPlot(self, runId):
self.plotDataPoint(runId, None, None) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def clearAllPlots(self):\n self.dataPoints = [[{'x': 0, 'y': 0}]]\n self.sendPreviousDataPoints()",
"def plot_clear():\n plt.cla()",
"def clear(self):\n self._plots[:] = []",
"def reset(self):\n\n self.fig.clear()\n self.ax = self.fig.add_subplot(111)\n self.hasLegend.set(False)\n self.title(Graph.default_title)\n # Lines is a list of DataSet objects. The user should take care to make\n # DataSet names unique, as there is no error checking done by Graph. \n # If a DataSet line is deleted by its formal name, Graph will delete the\n # first line in the list that matches the name.\n self.lines = {}\n self.line_counter = 1",
"def clear(self):\n\n # Inform the user\n log.info(\"Clearing the scatter plotter ...\")",
"def clear(self):\n self._fig = go.Figure()",
"def reset(self):\n\n self.results = []\n self._plot()",
"def _clear_data_plots(self, display_pts = 5000):\n #Initializing variables for plotting\n self.out_voltages = np.ones(display_pts) * self._curr_output_voltage\n self.measured_powers = np.ones(display_pts) * self._last_power\n\n # Check that setpoint is reasonable, otherwise set error to 0\n self.errors = np.ones(display_pts) * (self._last_power-self.voltageSetpoint)\n self.sp_data = np.ones(display_pts) * self.voltageSetpoint",
"def clear_graphs(self):\n for ax in (self.master_plot, self.time_velocity, self.time_power, self.power_velocity):\n ax.cla()",
"def reset(self):\r\n self.myOutputs = list()\r\n self.myPlots = list()\r\n self.pause = 0\r\n self.doMPL = False\r\n self.graphLabelsX = []\r\n self.graphLabelsY = []\r\n for i in self.xData.iterkeys():\r\n self.xData[i] = []\r\n self.yData[i] = []\r\n self.xyData[i] = []\r\n self.graphs[i] = Gnuplot(debug=0)\r\n self.figures[i] = 0\r\n self.mplFigCount = 0",
"def clear_plot(figure, clear_array):\n for i in range(len(clear_array)):\n clear_array[i].remove()",
"def __del__(self):\n pyplot.clf()",
"def reset(self):\n try:\n self.ax.cla()\n except Exception as e:\n print 'Exception BasePlot:', e\n raise e\n \n self._plotbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self._timestampbuffer = { pat: [0 for _ in range(self._plotlength)] for pat in self._patterns }\n self.ax.set_axis_bgcolor('black')\n self.ax.set_xticks([])\n self.ax.set_yticks([])",
"def clear(self):\n if self.flag == 0:\n for coord in INDICES:\n self.kill(coord)\n self.chart[coord] = DEAD",
"def clear_figure(self):\n self.figure.clf()",
"def delete_fit(self):\n self.fft_fit_plotter.delete_plot(self.ax)\n plt.draw()",
"def clear(self):\n self._plot_data_cache = {}\n self._outstanding_requests = {}",
"def reset_graph(self):\n self.sick_per_timestep = []\n self.steps = []\n self.ax.clear()\n self.ax.set_xlabel(self.xlabel)\n self.ax.set_ylabel(self.ylabel)\n self.ax.set_title(self.title)",
"def discard(self) -> None:\n\n self.plot.close()",
"def clear(self):\n self._plt.clear()\n self._layer_items = {}",
"def reset(self):\n self.dims.clear()\n self.xlabels.clear()\n self.annotators.clear()\n self._figTitle = None\n self.tbmTitle = None\n self._isSubplot = False\n self._universal_xlabel = False\n self._plotter = None\n self.Nsp = 0",
"def clear_visualization(self) -> None:\n if self._drawing_handle is not None:\n sim.simAddDrawingObjectItem(self._drawing_handle, None)",
"def resetGraph(self):\n self.colours = [self.uncompletedColor] * self.num_points\n self.setData(pos=self.pos, symbolBrush=self.colours, size=1, symbol=self.symbols, pxMode=False, text=self.text)",
"def clear_crossfilter1(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[0] = self.p",
"def reset(self):\n self.G = nx.Graph()\n self.form.plot_canvas.plot(self.G)",
"def teardown(self):\n\n if len(self.epochs) > 0 and len(self.typecounts) > 0:\n num_types = self.experiment.population._cell_class.max_types\n\n fig = plt.figure()\n plt.xlabel(\"Time (epoch)\")\n plt.ylabel(\"Abundance (cells)\")\n\n prev_xvals = [0] * len(self.epochs)\n for t in range(num_types):\n xvals = []\n for z in range(len(self.typecounts)):\n xvals.append(self.typecounts[z][t] + prev_xvals[z])\n\n plt.fill_between(self.epochs, prev_xvals, xvals, color=self.experiment.population._cell_class.type_colors[t])\n prev_xvals = xvals\n\n end_epoch = self.experiment.config.getint('Experiment', 'epochs')\n if not end_epoch:\n end_epoch = max(self.epochs)\n\n plt.xlim([self.epoch_start, end_epoch])\n\n data_file = self.datafile_path(self.filename)\n plt.savefig(data_file)",
"def plot_finalize():\n global figure\n global axes\n\n plot_refresh()\n plt.ioff()\n plt.show()\n\n figure, axes = None, None",
"def clear(self, name=None):\n if name:\n self.plots.pop(name)\n else:\n self.plots = {}\n\n self.plot_item.clear()",
"def clear_crossfilter2(self):\n print ('Trigger clear')\n self.query_dict = {}\n self.plot_data = None\n self.create_figure_new()\n layout_doc.children[4].children[1] = self.p",
"def teardown(self):\n self.wf.write_graph(dotfilename = self.test_path / \"wf_diagram\", graph2use=\"orig\")\n self.wf.run()\n \n\n self.helpers.plot_timeseries(\n self.export_path, self.sample_raw_image, \n highlight_ranges=self.highlight_ranges,\n num_figs=1\n )\n\n if self.plot_img:\n self.helpers.plot_4D_img_slice(self.export_path, \"sample_processed.png\")"
] | [
"0.7813099",
"0.7786517",
"0.76055056",
"0.75376326",
"0.7504365",
"0.74072784",
"0.7391547",
"0.73866606",
"0.73655605",
"0.72939986",
"0.7274155",
"0.7266801",
"0.71715695",
"0.714844",
"0.7123487",
"0.7080083",
"0.7061483",
"0.7059795",
"0.703659",
"0.69713885",
"0.6916199",
"0.69116986",
"0.6897057",
"0.68810177",
"0.6867803",
"0.68661296",
"0.6832195",
"0.6806454",
"0.67615",
"0.6758262"
] | 0.81491125 | 0 |
Process and upload documents to memory | def upload(self, documents: List[Document], vectorise_func) -> None:
# Add doc_store to documents
for d in documents:
d.doc_store = self
# Check ID uniqueness
check_duplicate_documents(documents)
# Check type consistency
check_document_types(documents)
# Batching
batches = batch_items(documents)
# Update document class conveniently
if issubclass(type(documents[0]), ChunkedDocument):
self._doc_class = ChunkedDocument
for batch in batches:
vectorise_func(batch, self)
self.documents += batch | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def run(self, mapping={}, *args, **kwargs):\n self.processed = 0\n for batch in self._process_by_batch(self.load(*args, **kwargs)):\n batch = list(map(lambda doc: self._apply_mapping(doc, mapping), batch))\n for doc in batch:\n self._ingest(iterable=doc, doctype=doc[\"doctype\"])\n self.processed += 1\n logger.info(\"Added {} documents to the database.\".format(self.processed))",
"def pdfProcessing():\n global DATABASE\n conn = db.create_connection(DATABASE)\n DOCUMENT_ORIGIN_CODE = \"DOSSIER_PATIENT\"\n\n pathFolder = \"fichiers source/\"\n extension = \".pdf\"\n pdfFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing pdf\", end=\"\")\n for file in pdfFileArrayPath:\n text = readFile.readPdfFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n \n db.insert_document(conn, query)\n print(\".\", end = '')\n #commit the changes to db\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")",
"def docxProcessing():\n DOCUMENT_ORIGIN_CODE = \"RADIOLOGIE_SOFTWARE\"\n global DATABASE\n conn = db.create_connection(DATABASE)\n pathFolder = \"fichiers source/\"\n extension = \".docx\"\n docxFileArrayPath = glob.glob(pathFolder + \"*\" + extension)\n print(\" - Processing docx\", end=\"\") \n for file in docxFileArrayPath:\n text = readFile.readDocxFile(file)\n query = getDocumentQuery(text, DOCUMENT_ORIGIN_CODE, file, pathFolder, extension)\n db.insert_document(conn, query) \n print(\".\", end = '')\n #commit the changes to db\t\t\t\n conn.commit()\n #close the connection\n conn.close()\n print(\"\\n\")",
"def multipart():\n with commit():\n link_and_create_multipart_volumes()\n reindex_pidtype('docid')\n reindex_pidtype('serid')",
"def add_documents(self, documents):\n\t\t\n\t\t# flag for StopIteration exceptions\n\t\tmore_documents = True\n\t\t# loop while there are still documents in the iterator\n\t\twhile more_documents:\n\t\t\t# increment batch number\n\t\t\tbatch = len(self.batch_stats) + 1\n\t\t\t# count sentences\n\t\t\tsentences_count = 0\n\t\t\t# create temporary batch data file in the version directory\n\t\t\tbatch_file = os.path.join(self.file_base.get_version_path(self.version), \"data.jl.gz.temp\")\n\t\t\t# try to read the next batch of files, catch exception and stop if there are no more\n\t\t\ttry:\n\t\t\t\t# get next document before opening the file just to make sure it's there\n\t\t\t\tdocument = documents.next()\n\t\t\t\t# open the data file\n\t\t\t\twith gzip.open(batch_file, \"wb\") as outfile:\n\t\t\t\t\t# loop through DOCUMENT_BATCH_SIZE documents\n\t\t\t\t\tfor i in range(DocumentDatabase.DOCUMENT_BATCH_SIZE):\n\t\t\t\t\t\t# count sentences in document\n\t\t\t\t\t\tfor paragraph in document[\"paragraphs\"]:\n\t\t\t\t\t\t\tsentences_count += len(paragraph[\"sentences\"])\n\t\t\t\t\t\t# write JSON to file one line at a time\n\t\t\t\t\t\toutfile.write(\"%s\\n\" % json.dumps(document))\n\t\t\t\t\t\t# if we are not done with this batch, retrieve the next document\n\t\t\t\t\t\tif i < DocumentDatabase.DOCUMENT_BATCH_SIZE - 1:\n\t\t\t\t\t\t\tdocument = documents.next()\n\t\t\texcept StopIteration:\n\t\t\t\t# the end of the documents stream, set the flag to False\n\t\t\t\tmore_documents = False\n\t\t\t# make sure the batch isn't empty\n\t\t\tif sentences_count > 0:\n\t\t\t\t# create the new batch in the file system\n\t\t\t\tself.version_batches.create_latest_version()\n\t\t\t\t# add the stats to the statistics hash\n\t\t\t\tself.batch_stats[batch] = BatchStats(sentences_count)\n\t\t\t\t# write the batch statistics to file\n\t\t\t\twith codecs.open(self._get_batch_stat_file(batch), \"wb\", \"utf-8\") as outfile:\n\t\t\t\t\t# write the JSON representation for the stats\n\t\t\t\t\toutfile.write(json.dumps(self.batch_stats[batch].to_json()))\n\t\t\t\t# move the temp data file to the correct location inside the version folder\n\t\t\t\tos.rename(batch_file, self._get_batch_file(batch))",
"def preprocess_docs():\n\n print(\"Getting started!\")\n stopwords.populate_stopwords(NLP, STOPWORD_URL)\n\n print(str.format(\"Using data dir:{}\", DATA_DIR))\n\n csv_file = open(os.path.join(DATA_DIR, 'PDFs.csv'))\n reader = csv.reader(csv_file, 'excel')\n rows = list(reader)\n\n filenames = [_get_filename(row) for row in rows]\n\n pool = Pool(multiprocessing.cpu_count())\n\n try:\n pool.map(_get_item, rows)\n pool.map(pdf.extract_text, filenames)\n docs = pool.map(_extract_questions, rows)\n docs = [d for d in docs if d is not None]\n\n _find_similar(docs, simdoc=compare.compare_doc_keywords)\n\n for doc in docs:\n if doc is None:\n continue\n doc.save_json()\n\n except KeyboardInterrupt:\n pool.terminate()\n print(\"You cancelled the program!\")\n sys.exit(1)\n\n print(\"Done\")",
"def parse_documents():\n\n\tcount_before = control.find().count()\n\n\tprint \"There are currently %i unprocessed records.\" % count_before\n\n\t#dispatch\n\t# executor = concurrent.futures.ThreadPoolExecutor(10)\n\t# futures = [executor.submit(analyze_message, document) for document in control.find()]\n\t# concurrent.futures.wait(futures)\n\n\tfor document in control.find():\n\t\tanalyze_message(document)\n\n\tcount_after = control.count()\n\tprint \"There are now %i stored records.\" % control.count()",
"def convert_file(self):\n try:\n\n doc_data_txt = []\n pdf_data_txt = []\n\n n = self.args_.file_count(self.docs)\n\n if self.docs:\n doc_data_txt = (\n Parallel\n (n_jobs=n, backend=\"multiprocessing\", verbose=10)\n (delayed\n (self.args_.docx_handler)(path, self.submitted)\n for path in self.docs))\n\n n = self.args_.file_count(self.pdfs)\n\n if self.pdfs:\n pdf_data_txt = (\n Parallel\n (n_jobs=n, backend=\"multiprocessing\", verbose=10)\n (delayed\n (self.args_.pdfminer_handler)(path, self.submitted)\n for path in self.pdfs))\n\n return doc_data_txt, pdf_data_txt\n\n except RuntimeError as error:\n logger.getLogger().error(error)\n exit(1)",
"def upload_doc():\n file = request.files[\"file\"]\n meta_data = {\"name\": request.form[\"name\"].lower()}\n file_id = save_file(meta_data, file)\n print('file-id: ' + file_id)\n index_after_uploading(file_id)\n return jsonify({\"file_id\": file_id})",
"def bulk_process(self):\n\n def actions():\n try:\n task = self.queue.get(block=False, timeout=None)\n\n if task['action'] == 'index':\n yield {\n '_op_type': 'index',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n elif task['action'] == 'delete':\n yield {\n '_op_type': 'delete',\n '_index': self.ensure_index(task),\n '_id': task['id'],\n 'doc': task['properties']\n }\n else:\n raise NotImplementedError\n\n except Empty:\n pass\n\n for success, info in streaming_bulk(self.es_client, actions()):\n if success:\n self.queue.task_done()",
"def bulkupload(self, string, bibo):\n if not self.filemode:\n self.bulknum += 1\n self.esdocs.append(self.rdf2es(string, bibo))\n\n if self.filemode:\n # Output content to file\n #I think we shouldn't serialize the content in memory in the output-file mode\n\n for outer in self.esdocs:\n for inner in outer:\n #self.of.write(dumps(inner, separators='\\n'))\n #we need this json dump method because the content is stored in a dictionary structure - as far as I understand it\n #so we can't just write a string\n dump(inner, self.of)\n #dump(bytes(inner,'UTF-8'), self.of)\n self.writtenDocuments += 1\n\n self.of.write('\\n')\n #perhaps flush it only in bigger chunks? - later\n #self.of.flush()\n del self.esdocs[:]\n if self.writtenDocuments >= self.bulksize:\n self._closeFile()\n self.writtenDocuments = 0\n self._openFile()\n\n elif self.bulknum >= self.bulksize:\n # Perform bulk upload\n helpers.bulk(client=self.of, actions=self.esdocs, stats_only=True)\n # Reset counter and empty list\n self.bulknum = 0\n del self.esdocs[:]",
"def document_upload():\n form = SourceTextForm()\n if form.validate_on_submit():\n user = current_user\n\n doc = {}\n doc[\"file\"] = form.filename.data\n doc[\"author\"] = form.author.data\n doc[\"title\"] = form.title.data\n doc[\"language\"] = form.language.data\n\n params = {}\n params[\"email\"] = user.email\n params[\"new_page\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"PAGE_LIMIT\"]\n params[\"line_size\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"LINE_SIZE\"]\n params[\"early_cutoff\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"EARLY_CUTOFF\"]\n params[\"batch_size\"] = current_app.config[\"DOCUMENT_UPLOAD\"][\"BATCH_SIZE\"]\n params[\"tokenizer\"] = current_app.config[\"TOKENIZER\"].select(doc[\"language\"])\n params[\"resource\"] = create_book\n doc_uploader = DocumentUploader(params)\n \n could_upload = True\n try:\n doc_uploader.upload(doc)\n except Exception as e:\n traceback.print_exc()\n could_upload = False\n error_msg = \"Error uploading document. Please try again.\"\n flash(error_msg)\n\n if could_upload:\n success_msg = \"Document successfully uploaded.\"\n flash(success_msg)\n\n return render_template('content_management/document_upload.html', form=form)",
"def finish_documents():\n\n doc_ids = json.loads(request.form['doc_ids'])\n\n for docid in doc_ids:\n\n document = Document.query.filter_by(id=docid).first_or_404()\n\n document.status = \"OK\"\n\n db.session.add(document)\n\n db.session.commit()",
"def process(self, file=False):\n if file:\n doc = self.corpus.getDocument(file)\n print \"process Document:\", doc.path\n print\n self._process(doc)\n return\n\n count = 0\n\n while True:\n count += 1\n doc = self.corpus.getDocument()\n if doc == None or count > self.limit:\n break\n\n self._process(doc)",
"def gather_documents(self):\n self.document_gatherer.gather_and_save_everything(Constants.path_cord, \n Constants.path_metadata, \n Constants.path_linked_documents,\n Constants.path_unlinked_documents,\n Constants.path_parsed_documents,\n Constants.path_all_documents)\n \n print(\"Done gathering documents.\")",
"def process_documents(session, endpoint, docs, id_map):\n for doc in docs:\n original_asset = doc['asset']\n\n if original_asset['name'] == '' or original_asset['name'] is None:\n LOG.warn('Skipping asset {} with empty name'.format(original_asset['id']))\n\n asset = {}\n asset.update(original_asset)\n del asset['id'] # since it is going to be different\n report = {'source_id': original_asset['id'], 'type': 'upload'}\n\n dest_id = id_map.get(original_asset['id'])\n\n already_exists = dest_id is not None\n if already_exists:\n url = endpoint + dest_id + '/'\n r = session.get(url)\n if r.status_code == 404:\n already_exists = False\n LOG.warn('asset {} not found (original id {})'.format(\n dest_id, original_asset['id']))\n\n if already_exists:\n report['method'] = 'PUT'\n report['url'] = url\n r = session.put(url, json=asset)\n else:\n report['method'] = 'POST'\n r = session.post(endpoint, json=asset)\n\n try:\n r.raise_for_status()\n except requests.HTTPError:\n LOG.error('Saving asset failed: %s', r.content)\n LOG.error('Original asset: %s', asset)\n report['error'] = r.content\n yield report\n continue\n\n response = r.json()\n LOG.info('Saved asset: %s as %s', original_asset['id'], response['id'])\n report['dest_id'] = response['id']\n yield report",
"def _convert_batch(self, bucket, pdf_path, pages, jpeg_prefixes,\n webhook_url, webhook_data):\n # download PDF locally, use first JPEG prefix as its name\n pdf_key = s3.Key(bucket)\n pdf_key.key = pdf_path\n\n local_jpeg_prefix = jpeg_prefixes[0].replace('/', '-')\n local_pdf_path = '%s/%s.pdf' % (self.working_dir, local_jpeg_prefix)\n\n pdf_key.get_contents_to_filename(local_pdf_path)\n threads = []\n\n # convert each page in a separate thread using ImageMagick\n for page_number, jpeg_prefix in zip(pages, jpeg_prefixes):\n args = (local_pdf_path, page_number, jpeg_prefix, bucket, webhook_url,\n webhook_data)\n threads.append(threading.Thread(target=self._upload_page, args=args))\n\n [thread.start() for thread in threads]\n\n # wait until all threads have completed\n [thread.join() for thread in threads]",
"def reaper(self):\n if not self.superuser_request:\n self.abort(402, 'uploads must be from an authorized drone')\n with tempfile.TemporaryDirectory(prefix='.tmp', dir=config.get_item('persistent', 'data_path')) as tempdir_path:\n try:\n file_store = files.FileStore(self.request, tempdir_path)\n except files.FileStoreException as e:\n self.abort(400, str(e))\n now = datetime.datetime.utcnow()\n fileinfo = dict(\n name=file_store.filename,\n created=now,\n modified=now,\n size=file_store.size,\n hash=file_store.hash,\n tags=file_store.tags,\n metadata=file_store.metadata\n )\n container = reaperutil.create_container_hierarchy(file_store.metadata)\n f = container.find(file_store.filename)\n target_path = os.path.join(config.get_item('persistent', 'data_path'), util.path_from_hash(fileinfo['hash']))\n if not f:\n file_store.move_file(target_path)\n container.add_file(fileinfo)\n rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)\n elif not file_store.identical(util.path_from_hash(fileinfo['hash']), f['hash']):\n file_store.move_file(target_path)\n container.update_file(fileinfo)\n rules.create_jobs(config.db, container.acquisition, 'acquisition', fileinfo)\n throughput = file_store.size / file_store.duration.total_seconds()\n log.info('Received %s [%s, %s/s] from %s' % (file_store.filename, util.hrsize(file_store.size), util.hrsize(throughput), self.request.client_addr))",
"def dealDocument(update: Update, _: CallbackContext) -> None:\n file_name = update.message.document.file_name\n file_name = uuid.uuid4().hex + \".\" + \\\n secure_filename(file_name).split(\".\")[-1]\n imagePath = os.path.join(args.input, file_name)\n update.message.document.get_file().download(custom_path=imagePath)\n add_mark(imagePath, mark, args)\n resultPath = os.path.join(args.out, file_name)\n with open(resultPath,\"rb\") as file:\n update.message.reply_document(file)",
"def run():\n assert os.path.exists(args.input_path), \"input_path doesn't exist\"\n assert os.path.exists(args.output_path), \"output_path doesn't exist\"\n\n # read all the paths to the input documents\n doc_files = []\n for root, dirs, files in os.walk(args.input_path):\n for file in files:\n if not file.endswith('gz') and not file.endswith('xml'):\n continue\n doc_files.append(os.path.join(root, file))\n print('{} medline files found from {}'\n ''.format(len(doc_files), args.input_path))\n\n print('converting...')\n pool = Pool(processes=args.num_workers)\n total_doc = 0\n total_batch = 0\n total_empty = 0\n for d, b, n in tqdm(pool.imap_unordered(partial(convert), doc_files),\n total=len(doc_files)):\n total_doc += d\n total_batch += b\n total_empty += n\n\n print('total docs: {}, total batches: {} created (empty doc {})'\n ''.format(total_doc, total_batch, total_empty))",
"def _work(self, payload):\n pdf_path = payload['pdf_path']\n jpeg_prefixes = payload['jpeg_prefixes']\n\n webhook_url = payload['webhook_url']\n webhook_data = payload['webhook_data']\n\n page_start = payload['page_start']\n page_end = payload['page_end']\n\n self._log('Connecting to s3')\n connection, bucket = self._connect_to_s3(payload['s3'])\n\n # create batches of pages to convert\n for batch_first_page in range(page_start, page_end + 1, self.PAGES_PER_BATCH):\n batch_last_page = min(batch_first_page + self.PAGES_PER_BATCH - 1, page_end)\n batch_pages = range(batch_first_page, batch_last_page + 1)\n\n batch_jpeg_prefixes = jpeg_prefixes[batch_first_page - page_start:\n batch_last_page + 1 - page_start]\n self._convert_batch(bucket, pdf_path, batch_pages, batch_jpeg_prefixes,\n webhook_url, webhook_data)",
"def upload(self, documents: List[ElasticDocument], vectorise_func, index: str = None) -> None:\n if not index:\n index = self._index\n\n # Add doc_store to documents\n for d in documents:\n d.doc_store = self\n # Check ID uniqueness\n check_duplicate_documents(documents)\n # Check type consistency\n check_document_types(documents)\n # Batching\n batches = batch_items(documents)\n\n for batch in batches:\n payload = []\n # Calculate vectors\n vectorise_func(batch, self)\n\n for document in batch:\n # JSON representation of document\n doc_json = document.to_elastic()\n\n # Add correct index\n doc_json[\"_index\"] = index\n\n # Rename id key\n doc_json[\"_id\"] = doc_json[\"id\"]\n del doc_json[\"id\"]\n\n payload.append(doc_json)\n\n # Bulk upload to elasticsearch\n helpers.bulk(self._client, payload)\n\n # Update index\n self._client.indices.refresh(index=self._index)",
"def ingest_files(is_authenticated, is_authorized):\n if not is_authorized:\n return render_template(\"unauthorized_user.html\"), 401\n\n data_json = request.get_json()\n payload_data = {}\n payload_data['egnyte_uploaded_files'] = data_json.get('egnyte_uploaded_files')\n for field in payload_data:\n response_msg = check_field_validations(payload_data, field)\n if response_msg:\n return Response(json.dumps(response_msg), status=400, mimetype='application/json')\n try:\n thread = threading.Thread(target=corpus_indexer.index_based_on_trigger, args=payload_data['egnyte_uploaded_files'])\n thread.start()\n resp = Response(json.dumps({'status':'Ingest Files started'}), status=201, mimetype='application/json')\n except Exception as e:\n logger.error(\"ingest_files api is failed with error %s\" % str(e))\n return render_template(\"internal_server_error.html\"), 500\n return resp",
"def process_files(compress, files):\n [compress.add_file(file) for file in files]\n\n compress.execute() # upload files to iLovePDF\n compress.download() # download resultant file\n print(\"Compression saved {}% of disk space.\".format(\n PDFWorkshop.__percentage_storage_saved(compress))\n )\n compress.delete_current_task()",
"def run(\n self,\n query=\"*\",\n destination=\"exports/\",\n overwrite=False,\n batchsize=None,\n *args,\n **kwargs\n ):\n if not batchsize:\n batchsize = self.batchsize\n for docbatch in self._process_by_batch(\n self._retrieve(query), batchsize=batchsize\n ):\n self.save(docbatch, destination=destination, *args, **kwargs)\n if self.fileobj:\n self.fileobj.close()",
"def store_documents(self, partner, documents):\n for docs in documents:\n if docs and docs['type'] in DOCS_TYPES:\n document = DocumentDetails()\n document.partner_id = partner\n document.type = DOCS_TYPES[docs['type']]\n document.file_name = docs['file']\n document.file_data = os.path.join('documents/partner_doc', docs['file'])\n document.save()",
"def parallel_import_documents(self, index, documents, **kwargs):\n \n # Set default values in passed as kwargs\n chunk_size = kwargs.get('chunk_size', None)\n if chunk_size is None:\n chunk_size = 20000\n kwargs['chunk_size'] = chunk_size\n \n request_timeout = kwargs.get('request_timeout', None)\n if request_timeout is None:\n request_timeout = 3600\n kwargs['request_timeout'] = request_timeout\n \n doc_type = kwargs.get('doc_type', None)\n if doc_type is None:\n doc_type = \"_doc\"\n kwargs['doc_type'] = doc_type\n \n raise_on_exception = kwargs.get('raise_on_exception', None)\n if raise_on_exception is None:\n raise_on_exception = False\n kwargs['raise_on_exception'] = raise_on_exception\n \n raise_on_error = kwargs.get('raise_on_error', None)\n if raise_on_error is None:\n raise_on_error = False\n kwargs['raise_on_error'] = raise_on_error\n \n self._logger.info('%s documents to index into %s', len(documents), index)\n doc_count = 0 \n \n if len(documents) > 0:\n for success, info in helpers.parallel_bulk(self.es, documents, index=index, **kwargs):\n if not success:\n self._logger.error(f'A document failed: {info}')\n else:\n doc_count += 1\n \n self._logger.info('%s documents indexed into %s', doc_count, index)\n \n return doc_count",
"def process(self):\n level = self.parameter['level-of-operation']\n assert_file_grp_cardinality(self.input_file_grp, 1)\n assert_file_grp_cardinality(self.output_file_grp, 1)\n\n for (n, input_file) in enumerate(self.input_files):\n self.logger.info(\"INPUT FILE %i / %s\", n, input_file.pageId or input_file.ID)\n file_id = make_file_id(input_file, self.output_file_grp)\n\n pcgts = page_from_file(self.workspace.download_file(input_file))\n self.add_metadata(pcgts)\n page_id = pcgts.pcGtsId or input_file.pageId or input_file.ID # (PageType has no id)\n page = pcgts.get_Page()\n \n page_image, page_xywh, page_image_info = self.workspace.image_from_page(\n page, page_id, feature_filter='binarized')\n if self.parameter['dpi'] > 0:\n zoom = 300.0/self.parameter['dpi']\n elif page_image_info.resolution != 1:\n dpi = page_image_info.resolution\n if page_image_info.resolutionUnit == 'cm':\n dpi *= 2.54\n self.logger.info('Page \"%s\" uses %f DPI', page_id, dpi)\n zoom = 300.0/dpi\n else:\n zoom = 1\n \n if level == 'page':\n self.process_page(page, page_image, page_xywh, zoom,\n input_file.pageId, file_id)\n else:\n if level == 'table':\n regions = page.get_TableRegion()\n else: # region\n regions = page.get_AllRegions(classes=['Text'], order='reading-order')\n if not regions:\n self.logger.warning('Page \"%s\" contains no text regions', page_id)\n for region in regions:\n region_image, region_xywh = self.workspace.image_from_segment(\n region, page_image, page_xywh, feature_filter='binarized')\n if level == 'region':\n self.process_region(region, region_image, region_xywh, zoom,\n input_file.pageId, file_id + '_' + region.id)\n continue\n lines = region.get_TextLine()\n if not lines:\n self.logger.warning('Page \"%s\" region \"%s\" contains no text lines',\n page_id, region.id)\n for line in lines:\n line_image, line_xywh = self.workspace.image_from_segment(\n line, region_image, region_xywh, feature_filter='binarized')\n self.process_line(line, line_image, line_xywh, zoom,\n input_file.pageId, region.id,\n file_id + '_' + region.id + '_' + line.id)\n\n # update METS (add the PAGE file):\n file_path = os.path.join(self.output_file_grp, file_id + '.xml')\n pcgts.set_pcGtsId(file_id)\n out = self.workspace.add_file(\n ID=file_id,\n file_grp=self.output_file_grp,\n pageId=input_file.pageId,\n local_filename=file_path,\n mimetype=MIMETYPE_PAGE,\n content=to_xml(pcgts))\n self.logger.info('created file ID: %s, file_grp: %s, path: %s',\n file_id, self.output_file_grp, out.local_filename)",
"def process():",
"def insert_documents(connection: DBConnection, documents: Sequence[Document]) -> None:\n max_ = len(documents)\n current = 0\n print() # print an extra line, because we will delete lines with printing \\r\n for chunk in chunks(documents):\n connection.execute(\"BEGIN TRANSACTION\")\n for doc in chunk:\n # python doesn't support prepared statements, but instead has a builtin sql cache\n connection.execute(\n \"INSERT INTO docs(did, title, url) VALUES (?, ?, ?)\", doc.convert_to_tuple())\n current += 1\n print(f\"\\r[{current}/{max_}] doc done\", end='')\n connection.execute(\"COMMIT\")"
] | [
"0.6526872",
"0.6464491",
"0.6338085",
"0.6282727",
"0.62149245",
"0.61992455",
"0.61971223",
"0.6139438",
"0.6122199",
"0.60769445",
"0.60100853",
"0.6003393",
"0.59760845",
"0.5970945",
"0.5958176",
"0.59526074",
"0.59257114",
"0.58856386",
"0.58802295",
"0.58416474",
"0.5823333",
"0.5815783",
"0.5807461",
"0.57988983",
"0.57873374",
"0.57856643",
"0.5745436",
"0.57433623",
"0.5728181",
"0.57254946"
] | 0.69855595 | 0 |
Pep8 function for donnee | def function_donnee_pep1():
socio = function_data_socio()
plugs = function_data_plugs()
erup = function_data_erup()
dollars = function_data_dollars()
fire = function_data_fire()
fertilizer = function_data_ferti()
periode = function_data_periode()
pole = data_function_pole()
return socio, plugs, erup, dollars,\
fire, fertilizer, periode, pole | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def reckon(self):",
"def function_donnee_pep():\r\n\r\n particles = data_function_particle()\r\n weather = data_function_weather()\r\n wind = data_function_wind()\r\n temperature = data_function_temperature()\r\n season = data_function_season()\r\n deaparture = data_function_departure()\r\n day = data_function_day()\r\n rank = data_function_ranking()\r\n pressure = data_function_pressure()\r\n demonstration = data_function_demonstration()\r\n\r\n return particles, weather, wind, temperature, season, deaparture,\\\r\n day, rank, pressure, demonstration",
"def cliquer_sur_unité(self):",
"def analyse_donnees(self, mere, foetus, pere, log):\n concordance_mf = 0\n concordance_pf = None\n if len(pere) != 0:\n concordance_pf = 0\n log = log + \"Père détecté.................................\\n\"\n log = log + \"\\n\\nVérification concordance des ADNs entre père et foetus..............................\\n\"\n for Alleles in range(len(foetus)):\n for Allele_Foe in range(3):\n if foetus[Alleles].allele[Allele_Foe] in pere[Alleles].allele:\n if foetus[Alleles].allele[Allele_Foe] != 0.0:\n pere[Alleles].concordance_pere_foetus = \"OUI\"\n concordance_pf = concordance_pf + 1\n log = log + \"Concordance pour marqueur \" + str(\n foetus[Alleles].marqueur) + \" OK..................\\n\"\n break\n else:\n pere[Alleles].concordance_pere_foetus = \"NON\"\n log = log + \"Concordance pour marqueur \" + foetus[\n Alleles].marqueur + \" PAS OK..............\\n\"\n break\n log = log + \"\\n\\nVérification concordance des ADNs entre mère et foetus..............................\\n\"\n for Alleles in range(len(foetus)):\n for Allele_Foe in range(3):\n if foetus[Alleles].allele[Allele_Foe] in mere[Alleles].allele:\n if foetus[Alleles].allele[Allele_Foe] != 0.0:\n foetus[Alleles].concordance_mere_foetus = \"OUI\"\n concordance_mf = concordance_mf + 1\n log = log + \"Concordance pour marqueur \" + str(\n foetus[Alleles].marqueur) + \" OK..................\\n\"\n break\n else:\n foetus[Alleles].concordance_mere_foetus = \"NON\"\n log = log + \"Concordance pour marqueur \" + foetus[Alleles].marqueur + \" PAS OK..............\\n\"\n break\n log = log + \"Vérification concordance des ADns terminée..................................\\n\\n\\n\"\n if concordance_mf != len(foetus):\n resultats, conclusion = self.resultat(concordance_mf, concordance_pf, foetus, mere, pere)\n log = log + \"Concordance des ADNs PAS OK....................\\n\"\n log = log + \"Erreur dans l'échantillon...................\\n\"\n log = log + \"Revérifier s'il vous plaît.............\\n\"\n return resultats, conclusion, log\n else:\n log = log + \"Traitement des 15 autres marqueurs..............................\\n\"\n for nbre_lignes in range(1, len(mere)):\n log = log + \"Traitement du marqueur \" + str(foetus[nbre_lignes].marqueur) + \"..........\\n\"\n pic = foetus[nbre_lignes].foetus_pics()\n log = log + \"Calcul du nombre d'allèles pour le foetus......................\\n\"\n log = log + \"Nombre d'allèles pour le foetus : \" + str(pic) + \".........\\n\"\n log = log + \"Vérification de l'homozygotie de la mère......................\\n\"\n mere[nbre_lignes].homozygotie()\n log = log + \"Mère homozygote : \" + str(mere[nbre_lignes].homozygote) + \"...............\\n\"\n log = log + \"Vérification mère et foetus mêmes allèles......................\\n\"\n foetus[nbre_lignes].allele_semblable(mere[nbre_lignes])\n log = log + \"Code de retour vérification allèles semblables: \" + str(\n foetus[nbre_lignes].informatif) + \"...............\\n\"\n log = log + \"Initialisation du taux de contamination pour calcul à venir...............\\n\"\n foetus[nbre_lignes].taux = 0.0\n log = log + \"Taux initialisé.................................\\n\"\n log = log + \"Si code informatif de retour allèles semblables différent de 2, vérification écho.............\\n\"\n log = log + \"Si écho, affection code informatif 3...............\\n\"\n if foetus[nbre_lignes].informatif != 2:\n log = log + \"Vérification si écho......................\\n\"\n mere[nbre_lignes].echo(foetus[nbre_lignes])\n log = log + \"Code retour vérification écho : \" + str(\n foetus[nbre_lignes].informatif) + \"...............\\n\"\n log = log + \"Début chaîne de traitement...........................\\n\"\n if pic == 3:\n log = log + \"Trois allèles détectés......................\\n\"\n foetus[nbre_lignes].contamination_heterozygote(mere[nbre_lignes])\n log = log + \"Marqueur informatif, affectation du code contamination 1..............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Calcul taux de contamination du marqueur..........\\n\"\n foetus[nbre_lignes].contamination = 2\n log = log + \"Calcul terminé....................\\n\"\n elif mere[nbre_lignes].homozygote:\n log = log + \"Mère homozygote.......................\\n\"\n log = log + \"Marqueur non informatif, affectation du code informatif 0............\\n\"\n foetus[nbre_lignes].informatif = 0\n elif pic == 2:\n log = log + \"Deux allèles détectés..............\\n\"\n if foetus[nbre_lignes].informatif == 2:\n log = log + \"Si mêmes allèles, vérification homozygote contaminé...............\\n\"\n foetus[nbre_lignes].verif_homozygote_contamine(self)\n if foetus[nbre_lignes].contamination == 1:\n log = log + \"Homozygote contaminé identifié.....................\\n\"\n log = log + \"Calcul du taux de contamination....................\\n\"\n foetus[nbre_lignes].homozygote_contamine(self)\n log = log + \"Calcul du taux de contamination effectué...........\\n\"\n else:\n if foetus[nbre_lignes].informatif != 3:\n log = log + \"Code calcul écho différent de 3..................\\n\"\n log = log + \"Marqueur informatif, affectation du code informatif 1.............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Marqueur non contaminé, affectation du code contamination 0................\\n\"\n foetus[nbre_lignes].contamination = 0\n else:\n log = log + \"Un seul allèle détecté............\\n\"\n if foetus[nbre_lignes].informatif != 3:\n log = log + \"Code informatif différent de 3...........\\n\"\n log = log + \"Marqueur informatif, affectation du code informatif 1.............\\n\"\n foetus[nbre_lignes].informatif = 1\n log = log + \"Marqueur non contaminé, affectation du code contamination 0................\\n\"\n foetus[nbre_lignes].contamination = 0\n log = log + \"\\n\\n\"\n log = log + \"Calcul échantillon contaminé ou non......\\n\"\n log = log + \"Marqueur contaminé si >\" + str(self.seuil_taux_conta) + \".......\\n\"\n log = log + \"Echantillon contaminé si plus de \" + str(\n self.seuil_nbre_marqueurs) + \"marqueurs contaminés...\\n\"\n self.conclusion_echantillon(foetus)\n log = log + \"Calcul échantillon terminé.....\\n\"\n log = log + \"Fin de traitement...........\\n\"\n resultats, conclusion = self.resultat(concordance_mf, concordance_pf, foetus, mere, pere)\n return resultats, conclusion, log",
"def donnée(request):\r\n\r\n particles, weather, wind, temperature, season, deaparture,\\\r\n day, rank, pressure, demonstration = function_donnee_pep()\r\n\r\n socio, plugs, erup, dollars,\\\r\n fire, fertilizer, periode, pole = function_donnee_pep1()\r\n\r\n\r\n return render(request, 'donnée.html', {'lyon':particles[0],\r\n 'paris':particles[1],\r\n 'marseille':particles[2],\r\n 'weather_lyon':weather[0],\r\n 'weather_marseille':weather[1],\r\n 'weather_paris':weather[2],\r\n 'wind_lyon':wind[0],\r\n 'wind_paris':wind[1],\r\n 'wind_marseille':wind[2],\r\n 'temperature_lyon':round(temperature[0]),\r\n 'temperature_paris':round(temperature[1]),\r\n 'temperature_marseille':round(temperature[2]),\r\n 'current_season':season,\r\n 'departure_lyon':deaparture[0],\r\n 'regular_day_lyon':deaparture[2],\r\n 'hour_point_lyon':deaparture[1],\r\n 'no_point_lyon':deaparture[3],\r\n 'departure_marseille':deaparture[0],\r\n 'hour_point_paris':deaparture[1],\r\n 'regular_day_marseille':deaparture[2],\r\n 'no_point_marseille':deaparture[3],\r\n 'departure_paris':deaparture[0],\r\n 'hour_point_marseille':deaparture[1],\r\n 'regular_day_paris':deaparture[2],\r\n 'no_point_paris':deaparture[3],\r\n 'weekend':day[0],\r\n 'week_day':day[1],\r\n 'ranking_lyon':rank[0],\r\n 'ranking_paris':rank[1],\r\n 'ranking_marseille':rank[2],\r\n 'pole_lyon':pole[0],\r\n 'pole_paris':pole[1],\r\n 'pole_marseille':pole[2],\r\n 'pressure_lyon':pressure[0],\r\n 'pressure_paris':pressure[1],\r\n 'pressure_marseille':pressure[2],\r\n 'demonstration_lyon':demonstration[0],\r\n 'demonstration_paris':demonstration[1],\r\n 'demonstration_marseille':demonstration[2],\r\n 'socio_lyon':socio[0],\r\n 'socio_marseille':socio[2],\r\n 'socio_paris':socio[1],\r\n 'plugs_lyon':plugs[0],\r\n 'plugs_paris':plugs[1],\r\n 'eruption':erup,\r\n 'diesel':dollars[0],\r\n 'dollars':dollars[1],\r\n 'fire_lyon':fire[0],\r\n 'fire_marseille':fire[1],\r\n 'fire_paris':fire[2],\r\n 'fertilizer':fertilizer,\r\n 'periode':periode[0],\r\n 'po_lyon':periode[1],\r\n 'po_paris':periode[2],\r\n 'po_marseille':periode[3]})\r\n\r\n\r\n return render(request, 'donnée.html')",
"def afficher(dico):\n return dico",
"def substantiate():",
"def joueCoup(position,coup):\n nouvelle_pos = clonePosition(position) # on duplique pour ne pas modifier l'original\n n = nouvelle_pos['taille']\n trait = nouvelle_pos['trait']\n # on transforme coup en indice\n if trait == 'SUD':\n indice_depart = coup-1\n else:\n indice_depart = 2*n-coup\n # retrait des graines de la case de depart\n nbGraines = nouvelle_pos['tablier'][indice_depart]\n nouvelle_pos['tablier'][indice_depart] = 0\n # on seme les graines dans les cases a partir de celle de depart\n indice_courant = indice_depart\n while nbGraines > 0:\n indice_courant = (indice_courant + 1) % (2*n)\n if (indice_courant != indice_depart): # si ce n'est pas la case de depart\n nouvelle_pos['tablier'][indice_courant] += 1 # on seme une graine\n nbGraines -= 1\n # la case d'arrivee est dans le camp ennemi ?\n if (trait == 'NORD'):\n estChezEnnemi = (indice_courant < n)\n else:\n estChezEnnemi = (indice_courant >= n)\n # realisation des prises eventuelles\n while estChezEnnemi and (nouvelle_pos['tablier'][indice_courant] in range(2,4)):\n nouvelle_pos['graines'][trait] += nouvelle_pos['tablier'][indice_courant]\n nouvelle_pos['tablier'][indice_courant] = 0\n indice_courant = (indice_courant - 1) % (2*n)\n if (trait == 'NORD'):\n estChezEnnemi = (indice_courant < n)\n else:\n estChezEnnemi = (indice_courant >= n)\n # mise a jour du camp au trait\n if trait == 'SUD':\n nouvelle_pos['trait'] = 'NORD'\n else:\n nouvelle_pos['trait'] = 'SUD'\n return nouvelle_pos",
"def t_fleche(t):\r\n ev=donne_evenement()\r\n type_ev=type_evenement(ev)\r\n if type_ev==\"Deplacement\":\r\n t=clic_x(ev)\r\n else:\r\n t=t\r\n return t",
"def es_satisfecho_por(self, candidata):",
"def t_fleche2(t):\r\n ev=donne_evenement()\r\n type_ev=type_evenement(ev)\r\n if type_ev==\"Touche\":\r\n t=touche(ev)\r\n else:\r\n t=\"kek\"\r\n return t",
"def opinion():\n pass",
"def enchere(self):\n\n i = 0\n while i < 5 and self.annonce < 4:\n paroleJ = self.joueurs[i].parler(self.annonce)\n if paroleJ != 0:\n self.annonce = paroleJ\n self.indiceJoueurQuiPrend = i\n i += 1\n\n print(\"joueur qui prend : \" + str(self.indiceJoueurQuiPrend))\n if self.indiceJoueurQuiPrend != -1:\n print(\"annonce : \" + str(self.annonce))\n if self.annonce == 1 or self.annonce == 2:\n self.joueurs[self.indiceJoueurQuiPrend].possedeChien = True\n self.joueurs[self.indiceJoueurQuiPrend].construireChien()\n self.debuterPartie()\n\n else:\n self.finirPartie()",
"def mezclar_bolsa(self):",
"def sucessor(self, no):\n if no is not None:\n if no.getDireito() is not None:\n return self.minimo(no.getDireito())\n else:\n pai = no.getPai()\n while pai is not None and no is pai.getDireito():\n no = pai\n pai = no.getPai()\n return pai",
"def monome_plus_petit_degre(self):\n\t\tif self.__tete:\n\t\t\t\"\"\" plus grand == plus a droite dans l'ABR \"\"\"\n\t\t\treturn self.__tete.plus_grand()\n\t\telse:\n\t\t\treturn None",
"def Problem11():\n return 'Ductile Coulomb-Mohr'",
"def deter_alea(nom) :\n\n alea = randint(0,3)\n\n if alea == 0 :\n\n deter = article_def(nom)\n\n elif alea == 1 :\n\n deter = demonstratif(nom)\n\n elif alea == 2 :\n\n deter = possessif(nom)\n\n elif alea == 3 :\n\n deter = cardinal(nom)\n\n return deter",
"def afficher(self, personnage, jeu, partie):\n en_main = jeu.en_main.get(personnage)\n tableau = jeu.tableau\n if en_main:\n msg = \"Dans votre main, vous avez {} et {}.\".format(\n en_main[0].nom_complet_indefini,\n en_main[1].nom_complet_indefini)\n else:\n msg = \"Vous n'avez encore rien dans votre main.\"\n \n if tableau:\n tableau = [piece.nom_complet_indefini for piece in tableau]\n aff_tableau = \", \".join(tableau[:-1]) + \" et \" + tableau[-1]\n msg += \"\\nSur le tableau se trouve {}.\".format(aff_tableau)\n\n if partie.tour is personnage:\n msg += \"\\nC'est votre tour.\"\n \n return msg",
"def dis(self):\n return self.nlegomena(2)",
"def prove_NNE() -> Proof:\n # Optional Task 6.7b",
"def possessif(nom):\n\n CA = nom[1]\n\n\n rand = randint(0,5)\n\n if CA == \"-1\" or CA == \"-3\" or CA == \"-5\" or CA == \"-7\" or CA == \"-8\" or CA == \"-4\" or Premiere_lettre_voyelle(nom[0]):\n if rand == 0:\n return \"mon \"\n elif rand == 1:\n return \"ton \"\n elif rand == 2:\n return \"son \"\n elif rand == 3:\n return \"notre \"\n elif rand == 4:\n return \"votre \"\n elif rand == 5:\n return \"leur \"\n \n elif (CA == \"-2\" or CA == \"-6\" or CA == \"-9\"):\n if rand == 0:\n return \"ma \"\n elif rand == 1:\n return \"ta \"\n elif rand == 2:\n return \"sa \"\n elif rand == 3:\n return \"notre \"\n elif rand == 4:\n return \"votre \"\n elif rand == 5:\n return \"leur \"\n else:\n return False",
"def narration_self(self):\n pass",
"def vendre(self, symbole, quantite, une_date=date.today()):\n\n if une_date > date.today():\n raise ErreurDate(\"La date est postérieure à la date d'aujourd'hui\")\n\n else:\n if symbole in self.portefeuille:\n quantite_titre = 0.0\n\n for les_jours in self.portefeuille[symbole]:\n if les_jours <= une_date:\n quantite_titre += self.portefeuille[symbole][les_jours]\n\n if quantite_titre < quantite:\n raise ErreurQuantité(\"Quantité insuffisante pour effectuer la vente\")\n\n else:\n if une_date in self.portefeuille[symbole]:\n self.portefeuille[symbole][une_date] -= float(quantite)\n\n elif une_date not in self.portefeuille[symbole]:\n self.portefeuille[symbole][une_date] = - float(quantite)\n\n cout = self.marche.prix(symbole, une_date) * quantite\n if une_date in self.argent:\n self.argent[une_date] += float(cout)\n\n elif une_date not in self.argent:\n self.argent[une_date] = float(cout)\n\n #Ca sert tu a de quoi ca ? Yes le chum\n else:\n raise ErreurQuantité(\"Le titre ne fait pas partie du portefeuille\")",
"def modifier_classement_joueur_tournoi(self, joueurs_tournoi, championnat, rapport):\r\n rapport.affichage_classement_championnat(championnat)\r\n championnat = sorted(championnat, key=lambda x: x.classement) # tri joueurs du championnat par classement\r\n print(\"Veuillez indiquer le numéro du joueur à modifier:\")\r\n choix = int(input())\r\n if choix <= len(championnat): # test si choix numero joueur valide\r\n index = choix - 1 # car liste commence a 0\r\n joueur = championnat[index]\r\n nouveau_joueur = copy.deepcopy(joueur)\r\n print(\"Veuillez indiquer le nouveau classement de \" + joueur.nom)\r\n nouveau_classement = int(input())\r\n nouveau_joueur.classement = nouveau_classement\r\n championnat.remove(joueur) # enleve ancienne position du joueur dans classement\r\n joueurs_tournoi.remove(joueur) # enleve ancienne position du joueur dans tournoi\r\n championnat.append(nouveau_joueur) # ajoute joueur avec classement actualise\r\n joueurs_tournoi.append(nouveau_joueur) # ajoute joueur classement actualise dans liste participants tournoi\r\n return joueurs_tournoi, championnat\r\n else:\r\n print(\"Numero joueur invalide\")\r\n return",
"def exo2():",
"def degibber(self):",
"def antecessor(self, no):\n if no is not None:\n if no.getEsquerdo() is not None:\n return self.maximo(no.getEsquerdo())\n else:\n pai = no.getPai()\n while pai is not None and no is pai.getEsquerdo():\n no = pai\n pai = no.getPai()\n return pai",
"def degre(self):\n\t\tif self.__tete:\n\t\t\treturn len(self.__tete.plus_petit().get_indeterminee())\n\t\telse:\n\t\t\t\"\"\" concession a la definition mathematique du degre du polynome nul \"\"\"\n\t\t\treturn (-1)",
"def exercise_b2_69():\r\n pass"
] | [
"0.61733526",
"0.5896056",
"0.5715602",
"0.5704777",
"0.56908274",
"0.5673039",
"0.56533015",
"0.55861145",
"0.5498113",
"0.548725",
"0.54335994",
"0.542311",
"0.5402919",
"0.5365584",
"0.5351271",
"0.53461313",
"0.53282666",
"0.52006626",
"0.5159694",
"0.5155111",
"0.51463705",
"0.5142845",
"0.5106252",
"0.50948644",
"0.5088824",
"0.50774443",
"0.5059613",
"0.5047698",
"0.5047032",
"0.4993955"
] | 0.6139485 | 1 |
Information page about pollution | def info_pollu(request):
return render(request, 'info_pollu.html') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def manage_info():",
"def warning(self) -> str:\n return pulumi.get(self, \"warning\")",
"def displaysuspicions(self):\n raise NotImplementedError()",
"def warning(self) -> 'outputs.AnyResponse':\n return pulumi.get(self, \"warning\")",
"def get(self):\n WriteTemplate(self.response, 'tips.html', {})",
"def healthcare():",
"def list_warnings(self):\n lwarn = []\n r = (220,0,0) # Red\n w = (244,234,244) # White\n g = (144,238,144) # Green\n w = (255,255,255) # White\n c = cf.gs.game.character\n ci = c.inventory\n f = ci.sorted_items['food'].amount\n if f > 0 and f < 10:\n lwarn.append(\n {'item':None,'value':'Low food!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif f <= 0:\n lwarn.append(\n {'item':None,'value':'0 food: HP -1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n lwarn.append(\n {'item':None,'value':'0 food: Sanity -1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n h = cf.gs.game.character.selected_house\n if h == 'Staying with Friends':\n lwarn.append(\n {'item':None,'value':'No house: Sanity -1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if c.health == 1:\n lwarn.append(\n {'item':None,'value':'Low health!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif c.health <= 0:\n lwarn.append(\n {'item':None,'value':'0 health!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if len(cf.gs.game.events.inactive_events) == 5:\n lwarn.append(\n {'item':None,'value':'5 events: Activating!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if c.sanity > 0 and c.sanity < 10:\n lwarn.append(\n {'item':None,'value':'Low sanity!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif c.sanity <= 0:\n lwarn.append(\n {'item':None,'value':'0 sanity!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n cash = ci.sorted_items['cash'].amount\n if cash > 0 and cash < 4000:\n lwarn.append(\n {'item':None,'value':'Low cash!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n elif cash <= 0:\n lwarn.append(\n {'item':None,'value':'0 cash: Sanity-=1!',\n 'selected_bgcolor':r,'bgcolor':r,'font_size':20,'color':w})\n if len(lwarn) == 0:\n lwarn.append(\n {'item':None,'value':'Green means go!',\n 'selected_bgcolor':g,'bgcolor':g,'font_size':20})\n return lwarn",
"def details(key):\n error_message = ''\n if request.method == 'POST':\n try:\n choice_key = request.form['choice']\n repository.increment_vote(key, choice_key)\n return redirect('/results/{0}'.format(key))\n except KeyError:\n error_message = 'Please make a selection.'\n\n return render_template(\n 'details.jade',\n title='Poll',\n year=datetime.now().year,\n poll=repository.get_poll(key),\n error_message=error_message,\n )",
"def get_hint_html(self, system):\r\n if self.child_state in (self.INITIAL, self.ASSESSING):\r\n return ''\r\n\r\n if self.child_state == self.DONE:\r\n # display the previous hint\r\n latest = self.latest_post_assessment(system)\r\n hint = latest if latest is not None else ''\r\n else:\r\n hint = ''\r\n\r\n context = {'hint': hint}\r\n\r\n if self.child_state == self.POST_ASSESSMENT:\r\n context['read_only'] = False\r\n elif self.child_state == self.DONE:\r\n context['read_only'] = True\r\n else:\r\n # This is a dev_facing_error\r\n raise ValueError(\"Self Assessment module is in an illegal state '{0}'\".format(self.child_state))\r\n\r\n return system.render_template('{0}/self_assessment_hint.html'.format(self.TEMPLATE_DIR), context)",
"def show_info(self):\n print(\"Problem number: \" + str(self.number))\n print(\"Problem name: \" + str(self.name))\n print(\"Problem description: \" + str(self.desc))",
"def details(self):\n pass",
"def get_warning_text(self):\n \n to_print = []\n if self['skipped_subchannel'] > 0:\n to_print.append(\"Some event with large weight have been discarded.\"+\\\n \" This happens %s times.\" % self['skipped_subchannel'])\n if self['n_madloop_calls'] > 0:\n fraction = self['exceptional_points']/float(self['n_madloop_calls'])\n if fraction > 1.0e-4:\n to_print.append(\"Some PS with numerical instability have been set \"+\\\n \"to a zero matrix-element (%.3g%%)\" % (100.0*fraction))\n \n return ('\\n'.join(to_print)).replace(\"'\",\" \")",
"def show_stat_desc(self):\n qtw.QMessageBox.information(\n self, \"Stat Descriptions\", '*This list is not comprehensive, and '\n 'will vary depending on race/(sub)class.*\\n\\nStrength\\n\\t-damage '\n 'and attack bonuses for most melee and thrown weapons\\n\\t-carry '\n 'capacity\\n\\t-Athletics checks\\nDexterity\\n\\t-damage and attack '\n 'bonuses for ranged/finesse weapons\\n\\t-Acrobatics, Stealth, '\n 'Sleight of Hand, and Initiative checks\\nConstitution\\n\\t-hit '\n 'points\\n\\t-resistance to poisons, etc.\\nIntelligence\\n\\t-spell '\n 'save DC/attack bonus for Wizards\\n\\t-Arcana, History, '\n 'Investigation, Nature, and Religion checks\\nWisdom\\n\\t-spell save'\n ' DC/attack bonus for Druid, Cleric, and Ranger\\n\\t-Animal '\n 'Handling, Insight, Medicine, Perception, and Survival checks\\n'\n 'Charisma\\n\\t-spell save DC/attack bonus for Bard, Paladin, '\n 'Warlock and Sorceror\\n\\t-Deception, Intimidation, Performance and'\n ' Persuasion checks'\n )",
"def help(request):\n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\tupdatedstatresult=updatedstat\n\tcountaxis=len(updatedstatresult)/10\n\tif countaxis==0:\n\t\tcountaxis=len(updatedstatresult)\n\treturn render(request, 'help.html', {'updatedstatresult':updatedstatresult,'countaxis':countaxis})",
"def info(self):",
"def info(self):",
"def get_info(self):\n return \"TODO !\"",
"def hook_displaysuspicions(self):\n charMarginal = (np.array(self.hypCountByCharacter,dtype=np.float64)\n / np.sum(self.hypCountByCharacter))\n roomMarginal = (np.array(self.hypCountByRoom,dtype=np.float64)\n / np.sum(self.hypCountByRoom))\n weapMarginal = (np.array(self.hypCountByWeapon,dtype=np.float64)\n / np.sum(self.hypCountByWeapon))\n ui.plotscenariomarginals(charMarginal,roomMarginal,weapMarginal)\n ui.plotforbidden(self.forbidden)",
"def showPersecutionPopup(self):\n\t\t\n\t\tpopup = Popup.PyPopup(7626, EventContextTypes.EVENTCONTEXT_ALL)\n\t\tpopup.setHeaderString(\"Religious Persecution\")\n\t\tpopup.setBodyString(\"Choose a religious minority to deal with...\")\n\t\treligionList = sd.getPersecutionReligions()\n\t\tfor iReligion in religionList:\n\t\t\tstrIcon = gc.getReligionInfo(iReligion).getType()\n\t\t\tstrIcon = \"[%s]\" %(strIcon.replace(\"RELIGION_\", \"ICON_\"))\n\t\t\tstrButtonText = \"%s %s\" %(localText.getText(strIcon, ()), gc.getReligionInfo(iReligion).getText())\n\t\t\tpopup.addButton(strButtonText)\n\t\tpopup.launch(False)",
"def warning(self) -> Optional[str]:\n return pulumi.get(self, \"warning\")",
"def info() -> None:",
"def test_low_priority_warnings(self):\n self.__jenkins.contents = self.html\n self.assertEqual(0, self.__jenkins.nr_warnings(('job',), 'low'))",
"def showprivelages(self):\r\n\t\tprint (\"An administrator has the following abilities: \")\r\n\t\tfor power in self.powers:\r\n\t\t\tprint (\"- \" + power)",
"def displayStatistics(self):\n return \"\"",
"def display_help_message():\n return lambda_response(None, {\n \"text\": \"\"\"\n/gauges list - list favorite gauges\n/gauges add USGS_SITE_NUMBER RIVER_DESCRIPTION - add gauge to list of favorite gauges\n/gauges check USGS_SITE_NUMBER - display current flow readings for gauge\n \"\"\".strip(),\n })",
"def statistics():\n return render_template('statistics.html'), 200",
"def get_warnings(self):\n pass",
"def get_warning(self) -> List[str]:\n return []",
"def get_warning(self) -> List[str]:\n return []",
"def _get_poll_info(self, poll_id):\n url = 'https://strawpoll.me/api/v2/polls/{}'.format(poll_id)\n for attempt in range(5):\n try:\n r = requests.get(url)\n poll_options = r.json()['options']\n poll_votes = r.json()['votes']\n except ValueError:\n continue\n except TypeError:\n continue\n else:\n return poll_options, poll_votes\n else:\n self._add_to_chat_queue(\n \"Sorry, there was a problem talking to the strawpoll api. Maybe wait a bit and retry your command?\")"
] | [
"0.57769835",
"0.57013565",
"0.56552356",
"0.56520486",
"0.5593325",
"0.5581116",
"0.55377436",
"0.55259347",
"0.54547215",
"0.53984815",
"0.53958166",
"0.5375226",
"0.5328111",
"0.5316547",
"0.53163064",
"0.53163064",
"0.53009826",
"0.5300177",
"0.5263906",
"0.52632004",
"0.5261898",
"0.5256735",
"0.5253478",
"0.52520937",
"0.52461386",
"0.5245101",
"0.5243257",
"0.5226758",
"0.5226758",
"0.5215126"
] | 0.57919174 | 0 |
Our predict page. By Ajax call We recup this cities and ask database by aide_analysa.py from prediction_site and try to match by condition with analysa2.py and return it in html page | def prediction(request):
if request.method == "POST":
city1 = request.POST.get('lyon')
city2 = request.POST.get('paris')
city3 = request.POST.get('marseille')
if city1:
#from predi_site.analysa2.py
predi = predi_analysa2('lyon')
return HttpResponse(predi)
if city2:
predi = predi_analysa2('paris')
return HttpResponse(predi)
if city3:
predi = predi_analysa2('marseille')
return HttpResponse(predi)
return render(request, 'prediction.html') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def results():\n \n to_predict_list = request.form.to_dict() \n to_predict_list = list(to_predict_list.values()) \n to_predict_list = list(map(float, to_predict_list)) \n result = ValuePredictor(to_predict_list) \n if int(result)== 1: \n prediction ='Run Martha, or you\\'re gonna get the sugar.'\n else: \n prediction ='Go ahead and have another donut Martha, you\\'re all good.' \n return render_template(\"results.html\",\n year=datetime.now().year,\n prediction = prediction\n )",
"def show_predictions(request):\n route_tag = request.GET['RT']\n direction_tag = request.GET['DT']\n stop_tag = request.GET['ST']\n\n pred = get_predictions('sf-muni', route_tag, direction_tag, stop_tag)\n\n return HttpResponse(json.dumps(pred), content_type='application/json')",
"def fdaAssay(request):\n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\n\tcontextres =[]\n\t#build elasticsearch query to search data\n\tquery={\"query\": {\n\t\t\"bool\": {\n\t\t\t\"must\": [\n\t\t\t\t{\"match\": {\"Assays for FDA approved Marker\": \"Yes\"}},\n\t\t\t\t{\"match\": {\"UniprotKb entry status\": \"Yes\"}}\n\t\t\t]\n\t\t}\n\t}\n\t}\n\t#generate random file name to store search result in json format\n\tnameFIle=names.get_first_name()\n\tjsonfilename=nameFIle+'_basic_search_fda.json'\n\tjsonfilepath=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'basicsearch', 'results', jsonfilename)\n\tjsonfileoutput= open(jsonfilepath,'w')\n\tjfinaldata=[]\n\tes.indices.refresh(index=\"mrmassaydb-index\")\n\t#elasticsearch will search data\n\tres=helpers.scan(client=es,scroll='2m',index=\"mrmassaydb-index\", doc_type=\"mrmassaydb-type\",query=query,request_timeout=30)\n\tjfinaldata=[]\n\t#if data is valid based on uniprotkb release then it will display\n\tfor i in res:\n\t\tjdic=i['_source']\n\t\tjdic={str(tkey):force_text(tvalue) for tkey,tvalue in jdic.items()}\n\t\tif jdic[\"UniprotKb entry status\"] ==\"Yes\" and jdic['UniProtKB Accession'] !='502':\n\t\t\tjdic[\"PPI\"] =\"View\"\n\t\t\tjdic[\"sel\"] =\"\"\n\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('\\\\','')\n\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('<br>','|')\n\t\t\tjdic[\"SRMAtlas URL\"]=jdic[\"SRMAtlas URL\"].replace('\\\\','')\n\t\t\tjdic[\"Passel URL\"]=jdic[\"Passel URL\"].replace('\\\\','')\n\t\t\tjdic[\"CPTAC URL\"]=jdic[\"CPTAC URL\"].replace('\\\\','')\n\t\t\tjdic[\"Panoramaweb URL\"]=jdic[\"Panoramaweb URL\"].replace('\\\\','')\n\t\t\tjdic[\"PeptideTracker URL\"]=jdic[\"PeptideTracker URL\"].replace('\\\\','')\n\t\t\t#if jdic[\"Pathway Name\"].lower() !='na':\n\t\t\t#\tjdic[\"Pathway Name\"]=re.sub(r\"(\\w)([A-Z])\",r\"\\1|\\2\",jdic[\"Pathway Name\"])\n\t\t\tjdic[\"Mean Concentration\"] =jdic[\"Mean Concentration\"].replace('fmol/','fmol/µ')\n\t\t\tjdic[\"Concentration\"] =jdic[\"Concentration\"].replace('fmol/','fmol/µ')\n\t\t\tjfinaldata.append(jdic)\n\tes.indices.refresh(index=\"mrmassaydb-index\")\n\t#checking any result generated by database\n\tfoundHits=len(jfinaldata)\n\t#storing only 10000 rows in json format\n\tjson.dump(jfinaldata[:10000],jsonfileoutput)\n\tjsonfileoutput.close()\n\t# if result found then do other job\n\tif foundHits >0:\n\t\tstatsummary=summaryStatcal(jfinaldata) # sent data to this funcation for generating stat\n\t\tpathwaychart=statsummary['pathwaychart']\n\t\tpathwaychart=[i[:2] for i in pathwaychart]\n\t\tspecieslist=statsummary['specieslist']\n\t\ttotallist=statsummary['total']\n\t\tsubcell=statsummary['subcell']\n\t\tgodic=statsummary['godic']\n\t\tjvennprot=statsummary['jevennstat'][0]\n\t\tjvennpep=statsummary['jevennstat'][1]\n\t\tmrmdatabase=statsummary['jevennstat'][2]\n\t\tsortedgodic=OrderedDict(sorted(godic.items(), key=lambda t: t[1])) # sorting GO data\n\t\tupdatedgodic=dict(list(sortedgodic.items()))\n\t\tpepseqdataseries=ast.literal_eval(json.dumps(statsummary['pepseqdataseries'])) #dumping data into json format\n\t\tprodataseries=statsummary['prodataseries']\n\t\tunqisostat=statsummary['unqisostat']\n\t\tjsonfilepathStat=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'basicsearch', 'statsummary', jsonfilename) #storing stat result in json format\n\t\tjsonfileoutputStat= open(jsonfilepathStat,'w')\n\t\tjson.dump(statsummary,jsonfileoutputStat)\n\t\tjsonfileoutputStat.close()\n\t\turlname=\"'/resultFile/jsonData/resultJson/basicsearch/results/\"+jsonfilename+\"'\"\n\n\t\tcontextindex={\n\t\t\t\"filename\":urlname,\"colname\":json.dumps(colname),'foundHits':foundHits,\n\t\t\t'pathwaychart':pathwaychart[:11],'specieslist':specieslist,\n\t\t\t'totallist':totallist,'subcell':subcell,\n\t\t\t'updatedgodic':updatedgodic,'pepseqdataseries':pepseqdataseries,\n\t\t\t'prodataseries':prodataseries,'unqisostat':unqisostat,\n\t\t\t'jvennprot':json.dumps(jvennprot),'jvennpep':json.dumps(jvennpep),'jvennmrmdb':json.dumps(mrmdatabase)\n\t\t\t}\n\t\treturn render(request,'fdaAssay.html',contextindex)\n\telse:\n\t\treturn render(request,'fdaAssay.html',{'foundHits':foundHits})",
"def select_ind_sentence(request):\n global results\n if request.method == \"POST\":\n\n return _start_analysis(request)\n else:\n return HttpResponse(\n json.dumps({\"error\": \"error, GET request not supported\"}),\n content_type=\"application/json\"\n )",
"def covid19(request):\n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\n\tcontextres =[]\n\t#build elasticsearch query to search data\n\tquery={\"query\": {\n\t\t\"bool\": {\n\t\t\t\"must\": [\n\t\t\t\t{\"match\": {\"Associated with COVID-19\": \"Yes\"}},\n\t\t\t\t{\"match\": {\"UniprotKb entry status\": \"Yes\"}}\n\t\t\t]\n\t\t}\n\t}\n\t}\n\t#generate random file name to store search result in json format\n\tnameFIle=names.get_first_name()\n\tjsonfilename=nameFIle+'_basic_search_covid19.json'\n\tjsonfilepath=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'basicsearch', 'results', jsonfilename)\n\tjsonfileoutput= open(jsonfilepath,'w')\n\tjfinaldata=[]\n\tes.indices.refresh(index=\"mrmassaydb-index\")\n\t#elasticsearch will search data\n\tres=helpers.scan(client=es,scroll='2m',index=\"mrmassaydb-index\", doc_type=\"mrmassaydb-type\",query=query,request_timeout=30)\n\tjfinaldata=[]\n\tpepSeqList=[]\n\tproteinList=[]\n\t#if data is valid based on uniprotkb release then it will display\n\tfor i in res:\n\t\tjdic=i['_source']\n\t\tjdic={str(tkey):force_text(tvalue) for tkey,tvalue in jdic.items()}\n\t\tif jdic[\"UniprotKb entry status\"] ==\"Yes\" and jdic['UniProtKB Accession'] !='502':\n\t\t\tjdic[\"PPI\"] =\"View\"\n\t\t\tjdic[\"sel\"] =\"\"\n\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('\\\\','')\n\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('<br>','|')\n\t\t\tjdic[\"SRMAtlas URL\"]=jdic[\"SRMAtlas URL\"].replace('\\\\','')\n\t\t\tjdic[\"Passel URL\"]=jdic[\"Passel URL\"].replace('\\\\','')\n\t\t\tjdic[\"CPTAC URL\"]=jdic[\"CPTAC URL\"].replace('\\\\','')\n\t\t\tjdic[\"Panoramaweb URL\"]=jdic[\"Panoramaweb URL\"].replace('\\\\','')\n\t\t\tjdic[\"PeptideTracker URL\"]=jdic[\"PeptideTracker URL\"].replace('\\\\','')\n\t\t\t#if jdic[\"Pathway Name\"].lower() !='na':\n\t\t\t#\tjdic[\"Pathway Name\"]=re.sub(r\"(\\w)([A-Z])\",r\"\\1|\\2\",jdic[\"Pathway Name\"])\n\t\t\tjdic[\"Mean Concentration\"] =jdic[\"Mean Concentration\"].replace('fmol/','fmol/µ')\n\t\t\tjdic[\"Concentration\"] =jdic[\"Concentration\"].replace('fmol/','fmol/µ')\n\t\t\tif str(jdic[\"Associated with COVID-19\"]).strip().upper() =='YES':\n\t\t\t\tpepSeqList.append(jdic[\"Peptide Sequence\"].strip())\n\t\t\t\tproteinList.append(jdic[\"UniProtKB Accession\"].strip().split('-')[0])\n\t\t\tjfinaldata.append(jdic)\n\tes.indices.refresh(index=\"mrmassaydb-index\")\n\t#checking any result generated by database\n\tfoundHits=len(jfinaldata)\n\t#storing only 10000 rows in json format\n\tjson.dump(jfinaldata[:10000],jsonfileoutput)\n\tjsonfileoutput.close()\n\t# if result found then do other job\n\tif foundHits >0:\n\t\tstatsummary=summaryStatcal(jfinaldata) # sent data to this funcation for generating stat\n\t\tpathwaychart=statsummary['pathwaychart']\n\t\tpathwaychart=[i[:2] for i in pathwaychart]\n\t\tspecieslist=statsummary['specieslist']\n\t\ttotallist=statsummary['total']\n\t\tsubcell=statsummary['subcell']\n\t\tgodic=statsummary['godic']\n\t\tjvennprot=statsummary['jevennstat'][0]\n\t\tjvennpep=statsummary['jevennstat'][1]\n\t\tmrmdatabase=statsummary['jevennstat'][2]\n\t\tsortedgodic=OrderedDict(sorted(godic.items(), key=lambda t: t[1])) # sorting GO data\n\t\tupdatedgodic=dict(list(sortedgodic.items()))\n\t\tpepseqdataseries=ast.literal_eval(json.dumps(statsummary['pepseqdataseries'])) #dumping data into json format\n\t\tprodataseries=statsummary['prodataseries']\n\t\tunqisostat=statsummary['unqisostat']\n\t\tjsonfilepathStat=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'basicsearch', 'statsummary', jsonfilename) #storing stat result in json format\n\t\tjsonfileoutputStat= open(jsonfilepathStat,'w')\n\t\tjson.dump(statsummary,jsonfileoutputStat)\n\t\tjsonfileoutputStat.close()\n\t\turlname=\"'/resultFile/jsonData/resultJson/basicsearch/results/\"+jsonfilename+\"'\"\n\n\t\tcontextindex={\n\t\t\t\"filename\":urlname,\"colname\":json.dumps(colname),'foundHits':foundHits,\n\t\t\t'pathwaychart':pathwaychart[:11],'specieslist':specieslist,\n\t\t\t'totallist':totallist,'subcell':subcell,\n\t\t\t'updatedgodic':updatedgodic,'pepseqdataseries':pepseqdataseries,\n\t\t\t'prodataseries':prodataseries,'unqisostat':unqisostat,\n\t\t\t'uniquePepSeq':len(set(pepSeqList)),'uniqueProtein':len(set(proteinList)),\n\t\t\t'jvennprot':json.dumps(jvennprot),'jvennpep':json.dumps(jvennpep),'jvennmrmdb':json.dumps(mrmdatabase)\n\t\t\t}\n\t\treturn render(request,'covid19.html',contextindex)\n\telse:\n\t\treturn render(request,'covid19.html',{'foundHits':foundHits})",
"def analysis():\n\n response_all_doctors_and_appointments = requests.post(server_url + 'doctor/all_doctors_and_all_appointments')\n doctors_and_appointments = response_all_doctors_and_appointments.json()\n\n return render_template('clerks/analysis.html', doctors_and_appointments=doctors_and_appointments)",
"def predictInsurance():\r\n logging.info(request.form.values())\r\n int_features = [x for x in request.form.values()]\r\n name = int_features[0]\r\n logging.info(int_features)\r\n age = int_features[1]\r\n bmi = int_features[2]\r\n children= int_features[3]\r\n sex = binarizeVariable(int_features[4])\r\n smoker = binarizeVariable(int_features[5])\r\n final_features = [np.array([age, bmi, children,sex,smoker])]\r\n prediction = model.predict(scalar.transform(final_features))\r\n\r\n output = round(prediction[0], 2)\r\n logging.info(output)\r\n return render_template(\"insurance_premium_predictor_home.html\", prediction_text='Hi {name}, your Insurance Premium could be Rs: {output}'.format(name= name, output=output))",
"def covid_cases_predict(request):\n\n print(request)\n data_cases = []\n country_predict_cases = []\n country = \"\"\n i = 0\n\n queryset_covid = CovidData.objects.all().order_by(\"country\")\n for covid_entry in queryset_covid:\n if country != covid_entry.country:\n if country != \"\":\n print(data_cases)\n predict_cases = linear_reg_predict(data_cases)\n predict_cases.append(country)\n print(predict_cases)\n country_predict_cases.append(predict_cases)\n i = 0\n data_cases.clear()\n country = covid_entry.country\n data_cases.append([i, covid_entry.confirmed])\n i = i + 1\n\n return JsonResponse(\n data={\n \"country_predict_cases\": country_predict_cases,\n }\n )",
"def vaccine_doses_predict(request):\n\n print(request)\n vaccine_doses = []\n country_predict_doses = []\n country = \"\"\n i = 0\n\n queryset_vaccine = VaccineData.objects.all().order_by(\"country\")\n for vaccine_entry in queryset_vaccine:\n if country != vaccine_entry.country:\n if country != \"\":\n print(vaccine_doses)\n predict_doses = linear_reg_predict(vaccine_doses)\n predict_doses.append(country)\n print(predict_doses)\n country_predict_doses.append(predict_doses)\n i = 0\n vaccine_doses.clear()\n country = vaccine_entry.country\n vaccine_doses.append([i, vaccine_entry.doses_administered])\n i = i + 1\n\n return JsonResponse(\n data={\n \"country_predict_doses\": country_predict_doses,\n }\n )",
"def yield_prediction():\n ## input code here\n if request.method == \"POST\":\n re = request.get_json()\n city = re[\"city\"]\n state = re[\"state\"]\n ## convert into lower case\n state = state.lower()\n city = city.lower()\n model_crop = re[\"crop\"]\n model_crop = model_crop.lower()\n model_season = re[\"season\"]\n model_season = model_season.lower()\n model_area = re[\"area\"]\n model_area = int(model_area)\n\n ## store name of crop for the graph\n crop = model_crop\n ## preprocesss the code\n\n try:\n state_le = load(\"static/labelencoder/state_le.joblib\")\n district_le = load(\"static/labelencoder/district_le.joblib\")\n season_le = load(\"static/labelencoder/season_le.joblib\")\n crop_le = load(\"static/labelencoder/crop_le.joblib\")\n model_crop = crop_le.transform([model_crop])[0]\n model_season = season_le.transform([model_season])[0]\n model_state = state_le.transform([state])[0]\n model_city = district_le.transform([city])[0]\n except:\n response_dict = {\n \"status\": False,\n \"message\": \"Enter Valid Data\"\n }\n return jsonify(response_dict)\n\n model_city = int(model_city)\n model_state = int(model_state)\n model_crop = int(model_crop)\n model_season = int(model_season)\n model_para = [model_state, model_city, model_season, model_crop, model_area]\n\n ## prediction code here\n\n import requests\n # NOTE: you must manually set API_KEY below using information retrieved from your IBM Cloud account.\n API_KEY = \"S30qFHkYTHMDO81ijSRiGSiE1jOfnlt01Vtn9UBU2KqL\"\n token_response = requests.post('https://iam.cloud.ibm.com/identity/token',\n data={\"apikey\": API_KEY, \"grant_type\": 'urn:ibm:params:oauth:grant-type:apikey'})\n mltoken = token_response.json()[\"access_token\"]\n\n header = {'Content-Type': 'application/json', 'Authorization': 'Bearer ' + mltoken}\n\n # NOTE: manually define and pass the array(s) of values to be scored in the next line\n payload_scoring = {\"input_data\": [\n {\"fields\": [\"State_Name\", \"District_Name\", \"Season\", \"Crop\", \"Area\"], \"values\": [model_para]}]}\n\n response_scoring = requests.post(\n 'https://us-south.ml.cloud.ibm.com/ml/v4/deployments/180fe5c1-a652-4e59-8b33-781326790706/predictions?version=2021-07-16',\n json=payload_scoring, headers={'Authorization': 'Bearer ' + mltoken})\n\n output = response_scoring.json()\n\n ## retrive the output\n\n pred_yield = output[\"predictions\"][0]['values'][0][0]\n pred_production = pred_yield * model_area\n\n ## PIE CHART\n try:\n kharif_value = kharif_yield.query.filter_by(crop_name=crop).first()\n kharif_values = kharif_value.yield_value\n except:\n kharif_values = 0\n try:\n rabi_value = rabi_yield.query.filter_by(crop_name=crop).first()\n rabi_values = rabi_value.yield_value\n except:\n rabi_values = 0\n\n try:\n summer_value = summer_yield.query.filter_by(crop_name=crop).first()\n summer_values = summer_value.yield_value\n except:\n summer_values = 0\n\n try:\n winter_value = winter_yield.query.filter_by(crop_name=crop).first()\n winter_values = winter_value.yield_value\n except:\n winter_values = 0\n\n try:\n autumn_value = autumn_yield.query.filter_by(crop_name=crop).first()\n autumn_values = autumn_value.yield_value\n except:\n autumn_values = 0\n\n try:\n whole_year_value = whole_year_yield.query.filter_by(crop_name=crop).first()\n whole_year_values = whole_year_value.yield_value\n except:\n whole_year_values = 0\n\n season_name = ['kharif', 'rabi', 'summer', 'winter', 'autumn', 'whole year']\n yield_list = [kharif_values, rabi_values, summer_values, winter_values, autumn_values, whole_year_values]\n\n season_yield_dict = dict()\n pie_list = list()\n for season, value in zip(season_name, yield_list):\n if value == 0:\n pass\n else:\n season_yield_dict[season] = round(value, 2)\n pie_list.append(round(value, 2))\n bar_graph_label = list(season_yield_dict.keys())\n pie_final_list = list()\n sum_list = sum(pie_list)\n for val in pie_list:\n suceess = val / sum_list\n suceess = round(suceess, 2)\n pie_final_list.append(suceess * 100)\n\n ## reponse dict here\n response_dict = {\n \"predYield\": pred_yield,\n \"predProduction\": pred_production,\n \"barGraphLabel\": bar_graph_label,\n \"barGraphvalue\": yield_list,\n \"pieChartLabel\": bar_graph_label,\n \"pieChartValue\": pie_final_list\n }\n return jsonify(response_dict)",
"def index():\n # ip1= jsonify({'ip': request.remote_addr})\n # xx = request.remote_addr\n # gg = request.remote_user\n ip2=request.environ.get('HTTP_X_REAL_IP', request.remote_addr) \n # ip = requests.get('https://api.ipify.org').text\n\n # if request.environ.get('HTTP_X_FORWARDED_FOR') is None:\n # print('22',request.environ['REMOTE_ADDR'])\n # else:\n # print('11',request.environ['HTTP_X_FORWARDED_FOR']) # if behind a proxy\n\n # return f'request.remote_addr {xx} request.remote_user {gg} request.environ.get {ip2} requests.get(https: {ip} '\n \n\n # query = '103.194.67.94'\n # query = ip\n # url = f\"http://ip-api.com/json/{query}\"\n # payload = \"{\\\"ips\\\": [\\\"1.1.1.1\\\", \\\"1.2.3.4\\\"]}\"\n # response_ip = requests.request(\"POST\", url, data=payload)\n # y=response_ip.json()\n\n \"\"\"get weathrer condition \"\"\"\n\n key = '53d7f1dde8564a69838135859212907'\n q = ip2\n url = f'http://api.weatherapi.com/v1/current.json?key={key}&q={q}&aqi=no'\n response = requests.request(\"POST\", url)\n\n weather_json = response.json()\n current_temp = weather_json[\"current\"][\"temp_c\"]\n print (current_temp)\n temps = Temp.query.all()\n for temp in temps:\n if temp.mintemp < current_temp < temp.maxtemp:\n condition = temp\n photo = random.choice(condition.photo)\n return render_template('main.html', weather_json=weather_json, photo=photo)\n abort (404)\n return render_template('main.html',weather_json=weather_json, photo =photo)",
"def check_presence_exa_conc_lab(request):\n\n # reports = request.GET.get('reports',None)\n rep = request.GET.get('id_report',None)\n language = request.GET.get('language',None)\n usecase = request.GET.get('usecase',None)\n reports = None\n if request.method == 'POST':\n request_body_json = json.loads(request.body)\n reports = request_body_json['reports']\n if rep is not None and language is not None:\n report = Report.objects.get(id_report = rep,language = language)\n usecase = report.name_id\n # print(usecase)\n json_resp = {}\n if usecase in ['colon','uterine cervix','lung']:\n bool = check_exa_lab_conc_only(usecase)\n print('bool',bool)\n else:\n bool = [False,False]\n json_resp['labels'] = bool[0]\n json_resp['concepts'] = bool[1]\n elif usecase is not None:\n json_resp = {}\n json_resp['labels'] = False\n json_resp['concepts'] = False\n\n # labels = []\n # concepts = []\n json_resp = {}\n if usecase in ['colon','uterine cervix','lung']:\n bool = check_exa_lab_conc_only(usecase)\n else:\n bool = [False,False]\n json_resp['labels'] = bool[0]\n json_resp['concepts'] = bool[1]\n # labels.append(bool[0])\n # concepts.append(bool[1])\n # if False in labels:\n # json_resp['labels'] = False\n # else:\n # json_resp['labels'] = True\n #\n # if False in concepts:\n # json_resp['concepts'] = False\n # else:\n # json_resp['concepts'] = True\n elif reports is not None:\n report_list = json.loads(reports)\n json_resp = {}\n json_resp['labels'] = False\n json_resp['concepts'] = False\n usecases = []\n for rep in report_list:\n # rep = json.loads(rep)\n if rep['usecase'] not in usecases:\n usecases.append(rep['usecase'])\n labels = []\n concepts = []\n for u in usecases:\n # print(u)\n json_resp = {}\n if u in ['colon', 'uterine cervix', 'lung']:\n bool = check_exa_lab_conc_only(u)\n else:\n bool = [False, False]\n\n labels.append(bool[0])\n concepts.append(bool[1])\n if False in labels:\n json_resp['labels'] = False\n else:\n json_resp['labels'] = True\n\n if False in concepts:\n json_resp['concepts'] = False\n else:\n json_resp['concepts'] = True\n\n else:\n json_resp={'error':'a usecase is needed'}\n\n print(json_resp)\n return JsonResponse(json_resp)",
"def navebarre_prediction(request):\r\n return render(request, 'menu/navebarre_prediction.html')",
"def temp(request):\n if request.method == 'GET':\n response = {request.GET.get('text', None)}\n # Exception Block t handle errors\n try:\n # Try to get output from our model\n model = joblib.load(os.getcwd()+'/model.pkl')\n output_array = model.predict([main.spacy_cleaner(str(response))])\n return {\"Sucess\": True ,'Sentiment': output_array[0].item()}\n\n except (ValueError, TypeError) as e:\n # If any error occurs\n return {\"Sucess\": False ,'Sentiment':'Null'}",
"def scrap_societe_site(page):\n\n req = requests.get(page)\n soup = BeautifulSoup(req.content, \"html.parser\")\n\n if soup.find(\"h1\", attrs={\"id\": \"identite_deno\"}):\n new_data.loc[i, \"enseigne_et1\"] = soup.find(\"h1\", attrs={\"id\":\"identite_deno\"}).get_text().strip()\n if soup.find(\"table\", attrs={\"id\":\"rensjur\"}):\n siret_det = soup.find(\"table\", attrs={\"id\":\"rensjur\"})\n siret_det = siret_det.get_text().split(\"\\n\")\n for j in range(len(siret_det)):\n if \"SIRET\" in siret_det[j]:\n new_data.loc[i, \"siret\"] = siret_det[j+1]\n break\n\n if soup.find(\"table\", attrs={\"id\":\"rensjurcomplete\"}):\n details = soup.find(\"table\", attrs={\"id\":\"rensjurcomplete\"})\n details_list = details.get_text().split(\"\\n\")\n for j in range(len(details_list)):\n if \"Adresse\" in details_list[j]:\n new_data.loc[i, \"adr_et_voie_lib\"] = details_list[j+1]\n if \"postal\" in details_list[j]:\n new_data.loc[i, \"adr_et_post\"] = details_list[j+1]\n if \"NAF\" in details_list[j] and is_nan(new_data.loc[i, \"naf\"]):\n new_data.loc[i, \"naf\"] = details_list[j+1]\n if \"Ville\" in details_list[j]:\n new_data.loc[i, \"adr_et_com_lib\"] = details_list[j+1]\n\n elif soup.find(\"table\", attrs={\"id\":\"etab\"}):\n details = soup.find(\"table\", attrs={\"id\":\"etab\"})\n details_list = details.get_text().split(\"\\n\")\n for j in range(len(details_list)):\n if \"Adresse\" in details_list[j]:\n new_data.loc[i, \"adr_et_voie_lib\"] = details_list[j+1]\n if \"postal\" in details_list[j]:\n new_data.loc[i, \"adr_et_post\"] = details_list[j+1]\n if \"NAF\" in details_list[j] and is_nan(new_data.loc[i, \"naf\"]):\n new_data.loc[i, \"naf\"] = details_list[j+1]\n if \"Ville\" in details_list[j]:\n new_data.loc[i, \"adr_et_com_lib\"] = details_list[j+1]",
"def predict():\n return render_template(\n 'predict.html',\n year=datetime.now().year,\n )",
"def detail(request):\r\n\r\n\tdataset = request.GET.get('dataset', '')\r\n\tdatatype = request.GET.get('datatype', 'RNA')\r\n\ttissue = request.GET.get('tissue', 'brain')\r\n\tcategory = request.GET.get('category', 'region')\r\n\tgroup = request.GET.get('group', 'PFC')\r\n\tcomparison = request.GET.get('comparison', 'AD-vs-Control')\r\n\tfeature_symbols_in_interest = request.GET.get('features', '').split(' ')\r\n\tcollection_name = \"%s_%s_%s-%s_%s\" % (datatype,\r\n\t\t\t\t\t\t\t\t\t\t\ttissue,\r\n\t\t\t\t\t\t\t\t\t\t\tcategory,\r\n\t\t\t\t\t\t\t\t\t\t\tgroup,\r\n\t\t\t\t\t\t\t\t\t\t\tcomparison)\r\n\t\"\"\"\r\n\t\tWe should split POST[\"featureInput\"] here\r\n\t\"\"\"\r\n\t# import pdb; pdb.set_trace();\r\n\t# feature_symbols_in_interest = split_feature_input_to_list(request.POST[\"featureInput\"])\r\n\r\n\tway_to_choose_probe = request.GET.get('way_to_choose_probe', 'fold change')\r\n\r\n\tall_datasets = test_stat_client.get_all_datasets(collection_name)\r\n\r\n\ttest_statistics = list(test_stat_client.get_all_for_this_category(collection_name))\r\n\r\n\tdisease_state_list = test_stat_client.get_disease_state_list(collection_name)\r\n\r\n\ttest_statistics = pd.DataFrame(test_statistics)\r\n\r\n\t# Filter 1 - dataset accession & features in interest\r\n\tfilt_ind = (test_statistics['dataset_accession'] == dataset) & (test_statistics['symb'].isin(feature_symbols_in_interest))\r\n\ttest_stat_df = test_statistics[filt_ind]\r\n\r\n\t# Filter 2 - remove duplicates\r\n\t\"\"\"\r\n\t\tHere we provide options for user to choose how to select a probe when \r\n\t\tmultiple probes are corresponding to one feature\r\n\t\"\"\"\r\n\r\n\tif way_to_choose_probe == \"fold change\":\r\n\t\ttest_stat_df = filtered_duplicate_by(test_stat_df, by='fc', group_index=['symb'])\r\n\r\n\telif way_to_choose_probe == \"limma p value\" : \r\n\t\ttest_stat_df = filtered_duplicate_by(test_stat_df, by='lp', group_index=['symb'])\r\n\r\n\telif way_to_choose_probe == \"t test p value\" :\r\n\t\ttest_stat_df = filtered_duplicate_by(test_stat_df, by='tp', group_index=['symb'])\r\n\r\n\t# Split dataframe for stat table display and graph display\r\n\tstat_table = test_stat_df.drop(['eval', 'dsl'], axis=1)\r\n\tstat_graph_exprs = test_stat_df[['symb', 'eval']]\r\n\tstat_graph_ds = disease_state_list[dataset]\r\n\r\n\t# import pdb; pdb.set_trace()\r\n\tstat_table['entrez_gene_id'] = stat_table.apply(from_symbol_to_entrez_gene_id, axis=1)\t\t\t\r\n\tstat_table['string_id'] = from_single_symbol_to_string_id(stat_table['symb'])\r\n\r\n\tds_1_count = sum(stat_graph_ds)\r\n\tds_0_count = len(stat_graph_ds) - sum(stat_graph_ds)\r\n\r\n\tstat_graph_ds_1 = [True if x == 1 else False for x in stat_graph_ds]\r\n\tstat_graph_ds_0 = [True if x == 0 else False for x in stat_graph_ds]\r\n\t# stat_graph_ds_0 = stat_graph_ds == 0\r\n\r\n\theatmap_feature_count = test_stat_df.shape[0]\r\n\theatmap_sample_count = len(stat_graph_ds)\r\n\theatmap_df_row_count = heatmap_sample_count * heatmap_feature_count\r\n\r\n\t\r\n\t# import pdb;pdb.set_trace\r\n\t# Generate a expression table (row as feature)\r\n\texpression_table = pd.DataFrame(list(stat_graph_exprs['eval']))\r\n\r\n\t\r\n\t# import pdb;pdb.set_trace();\r\n\t# Transpose table before sorting by disease state\r\n\texpression_table = pd.DataFrame.transpose(expression_table)\r\n\r\n\t# Get new expression table sorted by disease state\r\n\texpression_table = expression_table[stat_graph_ds_1].append(expression_table[stat_graph_ds_0], ignore_index=True)\r\n\r\n\t### Normalize row expression\r\n\texpression_table_normalized = normalize_heatmap_row_expression(expression_table)\r\n\t\r\n\t# Get minimum and maximum value of expression\r\n\texprs_min = np.nanmin(expression_table_normalized.values)\r\n\texprs_max = np.nanmax(expression_table_normalized.values)\r\n\r\n\r\n\theatmap_dataset_df = pd.DataFrame({\r\n\t\t\t'0' : sorted(range(0, heatmap_sample_count) * heatmap_feature_count), # sample_x\r\n\t\t\t'1' : range(0, heatmap_feature_count) * heatmap_sample_count,\t# feature_y\r\n\t\t\t'2' : [val for row in expression_table_normalized.values.tolist() for val in row] #expression_z\r\n\t\t})\r\n\r\n\t# Remove NANs in heatmap data series\r\n\tnot_nan_index = np.invert(np.isnan(heatmap_dataset_df['2']))\r\n\theatmap_dataset_df = heatmap_dataset_df[not_nan_index]\r\n\t# Prepare one dimentional scatter plot\r\n\r\n\t# Final output\r\n\t# Scatter plot\r\n\tstate_1_data_series = generate_scatterplot_series(range(0, ds_1_count), 0, expression_table)\r\n\tstate_0_data_series = generate_scatterplot_series(range(ds_1_count, ds_1_count+ds_0_count), 1, expression_table)\t\r\n\tstate_1_name = \"AD\"\r\n\tstate_0_name = \"Control\"\r\n\t# Heatmap\r\n\theatmap_feature_list = [x.encode('utf-8') for x in list(stat_graph_exprs['symb'])]\r\n\theatmap_sample_ds_list = ['AD'] * ds_1_count + ['Control'] * ds_0_count\r\n\theatmap_datasets = heatmap_dataset_df.values.tolist()\r\n\theatmap_extremes = [exprs_min, exprs_max]\r\n\t# Statistic table\r\n\tstat_table_output = stat_table.to_dict(outtype='records')\r\n\r\n\treturn render(request, 'feature_stat_detail.html',\r\n\t\t\t\t{\r\n\t\t\t\t\t'dataset_name' : dataset,\r\n\t\t\t\t\t'test_stat' : stat_table_output,\r\n\t\t\t\t\t'feature_list' : heatmap_feature_list,\r\n\t\t\t\t\t'sample_state_list' : heatmap_sample_ds_list,\r\n\t\t\t\t\t'heatmap_datasets' : heatmap_datasets,\r\n\t\t\t\t\t'heatmap_extremes' : heatmap_extremes,\r\n\t\t\t\t\t'state_1_data_series' : state_1_data_series,\r\n\t\t\t\t\t'state_0_data_series' : state_0_data_series,\r\n\t\t\t\t\t'state_1_name' : state_1_name,\r\n\t\t\t\t\t'state_0_name' : state_0_name,\r\n\t\t\t\t})",
"def webScraper(self):\n try:\n self.covid_df = pd.read_csv(self.COVID_URL)\n except:\n sys.exit('COVID data is unavailable at source.')\n \n latest_date = self.covid_df['date'].max()\n earliest_date = self.covid_df['date'].min()\n self.covid_df = self.covid_df[self.covid_df['date'] == self.date.strftime('%Y-%m-%d')]\n \n if self.covid_df.empty:\n exit_string = 'Requested date not available. Latest date available is ' + latest_date + ' while earliest is ' + earliest_date\n sys.exit(exit_string)\n else:\n self.covid_df = self.covid_df[self.covid_df['location'] != 'World']\n \n try:\n self.countries_centroids = pd.read_html(self.CENTROIDS_URL, header=0, index_col='country')[0]\n except:\n sys.exit('Central coordinates data for countries unavailable from Google developers.')\n \n try:\n self.geo_data = requests.get(self.GEOJSON_URL).json()\n except:\n sys.exit('GeoJSON data unavailable to draw country polygons.')",
"def search_form(request): \n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\ttitle= \"Please search by:\"\n\t# opening files for plotting stat\n\torganismName=overallSumresult['organism']\n\tspeciesName=overallSumresult['species']\n\tspeciesstat=overallSumresult['speciesstat'][0:10]\n\tspeciesName=list(set(speciesName))\n\tspeciesName=sorted(speciesName)\n\tspeciesstat.insert(0,['Species','Unique protein','Unique peptide'])\n\tgostat=overallSumresult['gostat'][:10]\n\tgostat.insert(0,['Go Term','Unique proteins in various species'])\n\tkeggstat=overallSumresult['keggstat'][:10]\n\tkeggstat.insert(0,['Pathway Name', 'Unique proteins in various species', 'PeptideTracker', 'CPTAC', 'PASSEL', 'SRMAtlas', 'PanoramaWeb'])\n\tpepseqdic=finalresult['pepseqdic']\n\tprodic=finalresult['prodic']\n\tpepdatavalues=finalresult['pepdatavalues']\n\tprodatavalues=finalresult['prodatavalues']\n\tmrmdatabase=finalresult['mrmdatabase']\n\tallpepassay=totalpepassay['totalassayNonValid']\n\tallvalidpepassay=totalpepassay['totalassayValid']\n\tallunqStripPep=totalpepassay['totalstripPep']\n\tuqpep=len(pepseqdic)\n\tuqpro=len(prodic)\n\tkeggstat=[i[:2] for i in keggstat]\n\tspeciesstat=[i[:2] for i in speciesstat]\n\tcontextindex ={\"title\": title,\"uqpro\":uqpro, \"uqpep\":uqpep,\\\n\t\t\t\t\t\"speciesName\":speciesName,\"speciesnumber\":len(speciesName)-1,\\\n\t\t\t\t\t\"speciesstat\":json.dumps(speciesstat),\\\n\t\t\t\t\t\"gostat\":json.dumps(gostat),\"keggstat\":json.dumps(keggstat),\\\n\t\t\t\t\t'allpepassay':allpepassay,\\\n\t\t\t\t\t'allvalidpepassay':allvalidpepassay,\\\n\t\t\t\t\t'allunqStripPep':len(allunqStripPep),\\\n\t\t\t\t\t'jvennpep':json.dumps(pepdatavalues),\\\n\t\t\t\t\t'jvennprot':json.dumps(prodatavalues),\\\n\t\t\t\t\t'jvennmrmdb':json.dumps(mrmdatabase)\\\n\t\t\t\t\t}\n\treturn render(request, 'index.html', contextindex)",
"def index():\n if request.method == 'GET':\n\n\n return render_template('index.html')\n \n if request.method == 'POST':\n\n message = request.form['text']\n data_vector = text_vector.transform([message])\n data_transform = text_transformer.transform(data_vector)\n prediction = lsvc_model.predict(data_transform)\n # output_prediction = lsvc_model.predict(data_transform)\n \n # return render_template('result.html', output_prediction = prediction)\n return render_template('index.html', output_prediction = prediction)",
"def search(request):\n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\tif request.method=='POST':\n\t\tsearchterm=request.POST.getlist('searchterm')# user input for searching result\n\t\tsearchterm=map(str, searchterm)\n\t\tsearchterm=searchterm[0]\n\t\tsearchterm= searchterm.strip()\n\n\t\tif len(searchterm)>0:\n\t\t\tcontextres =[]\n\t\t\t#build elasticsearch query to search data\n\t\t\tquery={\n\t\t\t\t\"query\":{\n\t\t\t\t\t\"bool\":{\n\t\t\t\t\t\t\"should\":[{\n\t\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\t\"query\":searchterm,\n\t\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\t\"fields\":searchFields,\n\t\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}]\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t#generate random file name to store search result in json format\n\t\t\tnameFIle=names.get_first_name()\n\t\t\tjsonfilename=nameFIle+'_basic_search.json'\n\t\t\tjsonfilepath=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'basicsearch', 'results', jsonfilename)\n\t\t\tjsonfileoutput= open(jsonfilepath,'w')\n\t\t\tjfinaldata=[]\n\t\t\tes.indices.refresh(index=\"mrmassaydb-index\")\n\t\t\t#elasticsearch will search data\n\t\t\tres=helpers.scan(client=es,scroll='2m',index=\"mrmassaydb-index\", doc_type=\"mrmassaydb-type\",query=query,request_timeout=30)\n\t\t\tjfinaldata=[]\n\t\t\t#if data is valid based on uniprotkb release then it will display\n\t\t\tfor i in res:\n\t\t\t\tjdic=i['_source']\n\t\t\t\tjdic={str(tkey):force_text(tvalue) for tkey,tvalue in jdic.items()}\n\t\t\t\tif jdic[\"UniprotKb entry status\"] ==\"Yes\" and jdic['UniProtKB Accession'] !='502':\n\t\t\t\t\tjdic[\"PPI\"] =\"View\"\n\t\t\t\t\tjdic[\"sel\"] =\"\"\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('<br>','|')\n\t\t\t\t\tjdic[\"SRMAtlas URL\"]=jdic[\"SRMAtlas URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Passel URL\"]=jdic[\"Passel URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"CPTAC URL\"]=jdic[\"CPTAC URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Panoramaweb URL\"]=jdic[\"Panoramaweb URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"PeptideTracker URL\"]=jdic[\"PeptideTracker URL\"].replace('\\\\','')\n\t\t\t\t\t#if jdic[\"Pathway Name\"].lower() !='na':\n\t\t\t\t\t#jdic[\"Pathway Name\"]=re.sub(r\"(\\w)([A-Z])\",r\"\\1|\\2\",jdic[\"Pathway Name\"])\n\t\t\t\t\tjfinaldata.append(jdic)\n\t\t\tes.indices.refresh(index=\"mrmassaydb-index\")\n\t\t\t#checking any result generated by database\n\t\t\tfoundHits=len(jfinaldata)\n\t\t\t#storing only 10000 rows in json format\n\t\t\tjson.dump(jfinaldata[:10000],jsonfileoutput)\n\t\t\tjsonfileoutput.close()\n\t\t\t# if result found then do other job\n\t\t\tif foundHits >0:\n\t\t\t\tstatsummary=summaryStatcal(jfinaldata) # sent data to this funcation for generating stat\n\t\t\t\tpathwaychart=statsummary['pathwaychart']\n\t\t\t\tpathwaychart=[i[:2] for i in pathwaychart]\n\t\t\t\tspecieslist=statsummary['specieslist']\n\t\t\t\ttotallist=statsummary['total']\n\t\t\t\tsubcell=statsummary['subcell']\n\t\t\t\tgodic=statsummary['godic']\n\t\t\t\tjvennprot=statsummary['jevennstat'][0]\n\t\t\t\tjvennpep=statsummary['jevennstat'][1]\n\t\t\t\tmrmdatabase=statsummary['jevennstat'][2]\n\t\t\t\tsortedgodic=OrderedDict(sorted(godic.items(), key=lambda t: t[1])) # sorting GO data\n\t\t\t\tupdatedgodic=dict(list(sortedgodic.items()))\n\t\t\t\tpepseqdataseries=ast.literal_eval(json.dumps(statsummary['pepseqdataseries'])) #dumping data into json format\n\t\t\t\tprodataseries=statsummary['prodataseries']\n\t\t\t\tunqisostat=statsummary['unqisostat']\n\t\t\t\tjsonfilepathStat=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'basicsearch', 'statsummary', jsonfilename) #storing stat result in json format\n\t\t\t\tjsonfileoutputStat= open(jsonfilepathStat,'w')\n\t\t\t\tjson.dump(statsummary,jsonfileoutputStat)\n\t\t\t\tjsonfileoutputStat.close()\n\t\t\t\turlname=\"'/resultFile/jsonData/resultJson/basicsearch/results/\"+jsonfilename+\"'\"\n\n\t\t\t\tcontextindex={\n\t\t\t\t\t\"filename\":urlname,\"colname\":json.dumps(colname),\n\t\t\t\t\t'query': searchterm,'foundHits':foundHits,\n\t\t\t\t\t'pathwaychart':pathwaychart[:11],'specieslist':specieslist,\n\t\t\t\t\t'totallist':totallist,'subcell':subcell,\n\t\t\t\t\t'updatedgodic':updatedgodic,'pepseqdataseries':pepseqdataseries,\n\t\t\t\t\t'prodataseries':prodataseries,'unqisostat':unqisostat,\n\t\t\t\t\t'jvennprot':json.dumps(jvennprot),'jvennpep':json.dumps(jvennpep),'jvennmrmdb':json.dumps(mrmdatabase)\n\t\t\t\t\t}\n\t\t\t\treturn render(request,'resultform.html',contextindex)\n\t\t\telse:\n\t\t\t\treturn render(request,'resultform.html',{'foundHits':foundHits})\n\t\telse:\n\t\t\treturn render(request,'resultform.html',{'foundHits':0})",
"def fit():\n form = MedForm(request.form)\n if request.method == 'POST' and form.validate():\n\n zipcode = form.zipcode.data\n # Check the zipcode\n\n plan = form.plan.data\n medication = form.medication.data\n\n ip = str(request.environ.get('HTTP_X_REAL_IP', request.remote_addr))\n rq = Requests(**dict(user=current_user.id, ip = ip, zipcode = zipcode, plan = plan, drug = medication))\n rq.save()\n\n # Process either medicare or medicaid\n plan_type = form.plan_type.data\n try:\n if plan_type == 'medicare':\n table = get_medicare_plan(medication, plan, zipcode)\n else:\n table = get_medicaid_plan(medication, plan, zipcode, plan_type)\n\n except tools.BadPlanName as e:\n form.errors['plan_name'] = str(e)\n context = {'form': form}\n html = 'fit.html'\n\n except tools.BadLocation as e:\n form.errors['zipcode'] = str(e)\n context = {'form': form}\n html = 'fit.html'\n else:\n # You have to order the data in a list or it won't show right\n data = []\n for item in table['data']:\n row = [item[h] for h in table['heading']]\n data.append(row)\n\n context = {'data':data,\n 'head':table['heading'],\n 'drug':medication,\n 'pa': table['pa'],\n 'zipcode':zipcode,\n 'plan':plan,\n 'plan_type':form.plan_type.data,\n }\n html = 'table.html'\n\n # If its a GET see if parameters were passed\n else:\n if request.method == 'GET':\n form.zipcode.data = request.args.get('zipcode', \"\")\n form.plan.data = request.args.get('plan', \"\")\n form.medication.data = request.args.get('drug', \"\")\n form.plan_type.data = request.args.get('plan_type', \"medicare\")\n\n # a POST with errors\n elif form.errors:\n if 'plan_type' in form.errors:\n form.errors['plan_type'] = \"Please pick a Medicare, Medicaid, or Private plan\"\n\n context = {'form': form}\n html = 'fit.html'\n\n content = render_template(html, **context)\n return content",
"def search_engine(city_name):\n\n API_Key = \"zIGuOeUd0aE4O621Gj1KGDc6JiZ3PAGb\"\n http_request = f\"http://dataservice.accuweather.com/locations/v1/cities/search?apikey={API_Key}&q={city_name}&language=pt-br\"\n\n search_request = requests.get(http_request)\n\n if search_request.status_code != 200:\n print(f\"It was not possible to retrive information about {city_name}\")\n\n else:\n search_response = search_request.json()\n print(f\"Obtaining information about the weather in {city_name}\")\n\n return search_response[0]",
"def get_condition_template(request):\n\n try:\n age = int(request.POST.get(\"age\"))\n gender = int(request.POST.get(\"gender\"))\n hometownId = str(request.POST.get(\"hometownId\"))\n universityId = int(request.POST.get(\"universityId\"))\n schoolId = map(float, request.POST.getlist(\"schoolId[]\"))\n hobbiesId = map(float, request.POST.getlist(\"hobbiesId[]\"))\n templates = recommend_template(age, gender, hometownId, universityId, schoolId, hobbiesId)\n except:\n return JsonResponse({\"data\": []})\n else:\n return JsonResponse({\"data\": templates})",
"def page1(self):\n result = request101.GET('/Cars_Sample_App/supercars.do', None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'), ))\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'manu'\n # 15 different values for token_mid found in response, using the first one.\n self.token_mid = \\\n httpUtilities.valueFromBodyURI('mid') # '3'\n\n grinder.sleep(124)\n request102.GET('/Cars_Sample_App/images/enquire_but.gif')\n\n request103.GET('/Cars_Sample_App/images/line.gif')\n\n request104.GET('/Cars_Sample_App/images/manufacturers/Bmw.gif')\n\n request105.GET('/Cars_Sample_App/images/manufacturers/AstonMartin.gif')\n\n request106.GET('/Cars_Sample_App/images/manufacturers/Ferrari.gif')\n\n request107.GET('/Cars_Sample_App/images/insurance_but.gif')\n\n grinder.sleep(90)\n request108.GET('/Cars_Sample_App/images/manufacturers/Porsche.gif')\n\n request109.GET('/Cars_Sample_App/images/manufacturers/Jaguar.gif')\n\n request110.GET('/Cars_Sample_App/images/pipe.gif')\n\n request111.GET('/Cars_Sample_App/images/manufacturers/Lotus.gif')\n\n return result",
"def perform_scraping(current_session):\r\n\r\n # List Array storing all relevant decision information\r\n final_data_fetch = []\r\n pagination_index = global_constants['NUMBER_PAGE_TO_SCRAPE_FIRST']\r\n while pagination_index < global_constants['NUMBER_PAGE_TO_SCRAPE_LAST']:\r\n print(\"Page:\", pagination_index, \" Collected records:\", len(final_data_fetch))\r\n\r\n # Get relevant admit-reject page based on pagination value\r\n result = current_session.get(global_constants['ALL_RESULTS_URL'] + str(pagination_index),\r\n headers=dict(referer=global_constants['ALL_RESULTS_URL']))\r\n tree = lxml_html.fromstring(result.content)\r\n\r\n # Get Nodes containing individual decisions for each page(approx 20 per page)\r\n decision_buckets = tree.xpath('//*[@class=\"row\"]/div[@class=\"col-sm-6\"]/div[@class=\"panel panel-warning\"]/div[@class=\"panel-body\"]')\r\n\r\n # If decision buckets are empty, captcha page has been encountered\r\n if len(decision_buckets) == 0:\r\n print(\"Captcha Time\")\r\n time.sleep(120)\r\n continue\r\n\r\n for individual_decision_bucket in decision_buckets:\r\n\r\n current_admit_status = ((individual_decision_bucket.xpath('./div[1]/div[2]/label'))[0]).text.strip()\r\n\r\n # Fetch results only if ADMIT or REJECT\r\n if current_admit_status.lower() == 'admit' or current_admit_status.lower() == 'reject':\r\n\r\n # Get relevant information from html page returned in response\r\n current_bucket_university_course = ((individual_decision_bucket.xpath('./div[1]/div[1]/h4/small'))[0]).text.replace(\"\\n\",\"\").strip()\r\n current_gre = get_gre_or_toefl(((((individual_decision_bucket.xpath('./div[2]/div[1]'))[0]).getchildren())[1]).tail)\r\n current_toefl = get_gre_or_toefl(((((individual_decision_bucket.xpath('./div[2]/div[2]'))[0]).getchildren())[1]).tail)\r\n current_gpa = get_gpa(((((individual_decision_bucket.xpath('./div[2]/div[3]'))[0]).getchildren())[1]).tail)\r\n current_workex = get_workex_months(((((individual_decision_bucket.xpath('./div[2]/div[4]'))[0]).getchildren())[1]).tail)\r\n\r\n current_university, current_course = split_bucket_university_course(current_bucket_university_course.lower())\r\n # Append decision information to final bucket only if minimum criteria met\r\n if current_university is not None and filter_criteria_met(current_gre, current_gpa, current_toefl):\r\n\r\n # Get UG College from profile of user\r\n profile_page_path = ((individual_decision_bucket.xpath('./div[1]/div[1]/h4/a'))[0]).attrib['href']\r\n profile_result = current_session.get(global_constants['HOME_PAGE'] + profile_page_path,\r\n headers=dict(referer=global_constants['PAST_RESULTS_URL']))\r\n profile_tree = lxml_html.fromstring(profile_result.content)\r\n ug_details_bucket = (profile_tree.xpath('//div[@class=\"col-sm-12 card\"][1]'))\r\n if len(ug_details_bucket) >= 1:\r\n ug_details_bucket = ug_details_bucket[0]\r\n current_ug_course = ((ug_details_bucket.xpath('./div[1]/div[7]/p[1]/b[1]'))[0]).text.replace(\"\\n\", \"\").strip()\r\n current_ug_college = ((ug_details_bucket.xpath('./div[1]/div[7]/p[2]'))[0]).text.replace(\"\\n\", \"\").strip()\r\n\r\n final_data_fetch.append([current_course, current_university, current_gpa, current_gre, current_toefl,\r\n current_workex, current_ug_course, current_ug_college, current_admit_status])\r\n\r\n # Add sleep time to allow for web scraping in undetected manner\r\n sleep_delay = random.choice([0, 1, 2, 3])\r\n time.sleep(sleep_delay)\r\n pagination_index += 1\r\n\r\n # Export final_data to excel sheet\r\n export_to_file(final_data_fetch)",
"def _process_html(self) -> None:\n opinion_json = self.request[\"response\"].json()\n for case in opinion_json:\n url = self._get_url(case[\"docketNumber\"], case[\"docketEntryId\"])\n status = (\n \"Published\"\n if case[\"documentType\"] == \"T.C. Opinion\"\n else \"Unpublished\"\n )\n self.cases.append(\n {\n \"judge\": case[\"judge\"],\n \"date\": case[\"filingDate\"][:10],\n \"docket\": case[\"docketNumber\"],\n \"url\": url,\n \"name\": titlecase(case[\"caseCaption\"]),\n \"status\": status,\n }\n )",
"def do_predict(self):\n answer = []\n response = []\n\n for it_predictions in json.loads(request.data.decode('UTF-8')):\n prediction = it_predictions['score']\n for ite_clf in g_list_of_classifier:\n answer.append(ite_clf.predict(prediction))\n if answer.count(True) > answer.count(False):\n response.append({'answer' : True})\n else:\n response.append({'answer' : False})\n return json.dumps(response, indent=4)",
"def predict():\n data = request.json\n\n if data:\n predict = bool(data[\"predict\"])\n\n if predict:\n if predictor.pred_dict[\"model\"] == 0:\n # ARIMA\n arima_forecast = predictor.get_prediction_arima()\n plots.arima_df = arima_forecast\n elif predictor.pred_dict[\"model\"] == 1:\n # Prophet\n prophet_forecast = predictor.get_prediction_prophet()\n plots.prophet_df = prophet_forecast\n elif predictor.pred_dict[\"model\"] == 2:\n # LSTM\n lstm_forecast = predictor.get_prediction_bidirectlstm()\n plots.lstm_df = lstm_forecast\n else:\n pass\n\n return 'Non tam praeclarum est scire latine, quam turpe nescire'",
"def advanced_search(request):\n\tip = get_ip(request, right_most_proxy=True)\n\tIpAddressInformation.objects.create(ip_address=ip)\n\tif request.method=='POST':\n\t\tsearchterm =request.POST.getlist('searchterm') # list of search term value associated with searchtype\n\t\tsearchtype =request.POST.getlist('searchtype') # list of search parameter\n\t\tsearchtermorg=request.POST.getlist('searchtermorg') # search term value for organism\n\t\tsearchtermfda=request.POST.getlist('searchtermfda') # search term value for FDA\n\t\tsearchtermlist=[]\n\t\tnameFIle=names.get_first_name() # generate random file name to store user search result\n\t\tfastaseq=[]\n\t\tfinalsearhdata=''\n\t\tunique_peptides = set()\n\t\ttryptic_peptide={}\n\t\tuserSuppliedPepSeqStatus=0\n\t\ttry:\n\t\t\tfastafile = request.FILES[\"fileupload\"].read()\n\t\t\tfinalsearhdata+='File'+':'+'Fasta Sequence'+' '\n\t\t\tcurrdate=str(datetime.datetime.now())\n\t\t\tcurrdate=currdate.replace('-','_')\n\t\t\tcurrdate=currdate.replace(' ','_')\n\t\t\tcurrdate=currdate.replace(':','_')\n\t\t\tcurrdate=currdate.replace('.','_')\n\t\t\tnameFIle=currdate+'_'+str(request.FILES[\"fileupload\"]).split('.')[0] # if user upload fasta file then file name will be replaced with user provided file name along with current data and time\n\t\t\tfastafilename=nameFIle+'.fasta'\n\t\t\t#storing user provided fasta file\n\t\t\tfastafilepath=os.path.join(settings.BASE_DIR, 'resultFile', 'fastaFIle', fastafilename)\n\t\t\tfastafilewrite=open(fastafilepath,\"w\")\n\t\t\tfastafilewrite.write(fastafile)\n\t\t\tfastafilewrite.close()\n\n\t\t\t#reading fasta file\n\t\t\tseqCounter=0\n\t\t\tfor useq_record in SeqIO.parse(fastafilepath, 'fasta'):\n\t\t\t\tseqCounter+=1\n\t\t\t\tseqheader = useq_record.id\n\t\t\t\tsequniID = seqheader.split(' ')[0]\n\t\t\t\tsequniID=sequniID.replace('>','')\n\t\t\t\ttempseqs = str(useq_record.seq).strip()\n\t\t\t\tnew_peptides = parser.cleave(tempseqs, 'trypsin')\n\t\t\t\tnew_peptides=[pep for pep in new_peptides if len(pep.strip()) > 3 and len(pep.strip()) <50]\n\t\t\t\ttryptic_peptide[seqCounter]=list(new_peptides)\n\t\t\t\tnew_peptides=list(set(new_peptides))\n\t\t\t\tunique_peptides.update(new_peptides)\n\t\t\t\tfastaseq.append(str(sequniID)+'_'+tempseqs.upper())\n\t\texcept MultiValueDictKeyError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\tfastafileindex=searchtype.index(\"FastaFile\")\n\t\t\t#delete data based on index from list\n\t\t\tdel searchtype[fastafileindex]\n\t\t\tdel searchterm[fastafileindex]\n\t\texcept ValueError:\n\t\t\tpass\n\n\t\ttry:\n\t\t\torgindex=searchtype.index(\"Organism\")\n\t\t\t#delete data based on index from list\n\t\t\tdel searchtype[orgindex]\n\t\t\tdel searchterm[orgindex]\n\t\texcept ValueError:\n\t\t\tpass\n\t\tif len(fastaseq)>0:\n\t\t\tunique_peptides=list(unique_peptides)\n\t\t\tunique_peptides=list(map(lambda x:x.lower(),unique_peptides))\n\t\tsearchtermorg=map(str, searchtermorg) # convert data into string\n\t\tsearchtermorg=map(lambda j: j.strip(), searchtermorg) # remove space\n\t\tsearchtermorg=filter(None, searchtermorg) # remove empty value\n\t\tunqsearchtermorg=list(set(searchtermorg))\n\t\tif len(unqsearchtermorg)>0:\n\t\t\tfinalsearhdata+='Organism'+':'+unqsearchtermorg[0].strip()+' '\n\t\t\t#build elasticsearch query for organism to search data\n\n\t\t\torgquery={\"should\":[\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\t\"query\":unqsearchtermorg[0].strip(),\n\t\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\t\"fields\":[\"Organism.ngram\"],\n\t\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t]\n\t\t\t\t\t}\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]=orgquery\n\t\t\tsearchtermlist.append(booldic)\n\n\t\ttry:\n\t\t\tfdaindex=searchtype.index(\"Assays for FDA approved Marker\")\n\t\t\t#delete data based on index from list\n\t\t\tdel searchtype[fdaindex]\n\t\t\tdel searchterm[fdaindex]\n\t\texcept ValueError:\n\t\t\tpass\n\n\t\tsearchtermfda=map(str, searchtermfda) # convert data into string\n\t\tsearchtermfda=map(lambda j: j.strip(), searchtermfda) # remove space\n\t\tsearchtermfda=filter(None, searchtermfda) # remove empty value\n\t\tunqsearchtermfda=list(set(searchtermfda))\n\t\tif len(unqsearchtermfda)>0:\n\t\t\tfinalsearhdata+='Assays for FDA approved Marker'+':'+unqsearchtermfda[0].strip()+' '\n\t\t\t#build elasticsearch query for FDA to search data\n\n\t\t\tfdaquery={\"should\":[\n\t\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\t\"query\":unqsearchtermfda[0].strip(),\n\t\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\t\"fields\":[\"Assays for FDA approved Marker.ngram\"],\n\t\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t]\n\t\t\t\t\t}\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]=fdaquery\n\t\t\tsearchtermlist.append(booldic)\n\t\tif 'Peptide Sequence' in searchtype:\n\t\t\tuserSuppliedPepSeqStatus=1\n\t\tfor i in range(0,len(searchtype)):\n\t\t\tsubsearchtype=searchtype[i]\n\t\t\tsubsearchterm=searchterm[i]\n\t\t\t#build elasticsearch query for all except organism and FDA to search data\n\t\t\tif '|' in subsearchterm:\n\t\t\t\tsubsearchterm=(subsearchterm.strip()).split('|')\n\t\t\telse:\n\t\t\t\tsubsearchterm=(subsearchterm.strip()).split('\\n')\n\t\t\tsubsearchterm=map(str, subsearchterm)\n\t\t\tsubsearchterm=map(lambda j: j.strip(), subsearchterm)\n\t\t\tsubsearchterm=filter(None, subsearchterm)\n\t\t\tif subsearchtype == 'Peptide Sequence':\n\t\t\t\tif userSuppliedPepSeqStatus==1:\n\t\t\t\t\tfinalsearhdata+=''.join(subsearchtype)+':'+';'.join(subsearchterm)+' '\n\t\t\t\t\tif len(unique_peptides)>0:\n\t\t\t\t\t\tsubsearchterm=[(item.strip()).lower() for item in subsearchterm]\n\t\t\t\t\t\tsubsearchterm=list(set(subsearchterm) & set(unique_peptides))\n\t\t\telse:\n\t\t\t\tfinalsearhdata+=''.join(subsearchtype)+':'+';'.join(subsearchterm)+' '\n\t\t\tif len(subsearchterm)>0:\n\t\t\t\tsubsearchterm=[(item.strip()).lower() for item in subsearchterm] #converting into lower case\n\t\t\t\tsubsearchterm=list(set(subsearchterm))\n\t\t\t\tshouldlist=[]\n\t\t\t\t\n\t\t\t\tfor x in subsearchterm:\n\t\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\t\"fields\":[str(subsearchtype)+\".ngram\"],\n\t\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t\t}\n\t\t\t\t\t\t\t}\n\t\t\t\t\tshouldlist.append(tempquery)\n\t\t\t\tbooldic={}\n\t\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\t\tsearchtermlist.append(booldic)\n\n\t\tif userSuppliedPepSeqStatus==0 and len(unique_peptides)>0:\n\t\t\tshouldlist=[]\n\t\t\tfor x in unique_peptides:\n\t\t\t\ttempquery={\n\t\t\t\t\t\t\t\"multi_match\":{\n\t\t\t\t\t\t\t\t\"query\":x.strip(),\n\t\t\t\t\t\t\t\t\"type\":\"best_fields\",\n\t\t\t\t\t\t\t\t\"fields\":[\"Peptide Sequence.ngram\"],\n\t\t\t\t\t\t\t\t\"minimum_should_match\":\"100%\"\n\t\t\t\t\t\t\t}\n\t\t\t\t\t\t}\n\t\t\t\tshouldlist.append(tempquery)\n\t\t\tbooldic={}\n\t\t\tbooldic[\"bool\"]={\"should\":shouldlist,\"minimum_should_match\": 1}\n\t\t\tsearchtermlist.append(booldic)\n\t\tunqfastaseq=list(set(fastaseq))\n\t\tif len(searchtermlist)>0 or len(unqfastaseq)>0:\n\t\t\tes.indices.refresh(index=\"mrmassaydb-index\")\n\n\t\t\tquery=\"\"\n\t\t\t#if len(searchtermlist)>0:\n\t\t\tquery={\n\t\t\t\t\"query\": {\n\t\t\t\t\t\"bool\": {\n\t\t\t\t\t\t\"must\":searchtermlist\n\t\t\t\t\t}\n\t\t\t\t}\n\t\t\t}\n\t\t\t# if len(searchtermlist)==0:\n\t\t\t# \tquery={\n\t\t\t# \t\t\"query\": {\n\t\t\t# \t\t\t\"match_all\": {}\n\t\t\t# \t\t}\n\t\t\t# \t}\n\t\t\t#storing user search result into json format\n\t\t\tjsonfilename=nameFIle+'_advance_search.json'\n\t\t\tjsonfilepath=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'adavancesearch', 'results', jsonfilename)\n\t\t\tjsonfileoutput= open(jsonfilepath,'w')\n\t\t\tjfinaldata=[]\n\t\t\tres=helpers.scan(client=es,size=1000,scroll='2m',index=\"mrmassaydb-index\", doc_type=\"mrmassaydb-type\",query=query,request_timeout=60)\n\t\t\t#res=helpers.scan(client=es,size=1000,scroll='2m',index=\"my-index\", doc_type=\"my-type\",query=query,request_timeout=30)\n\t\t\tjfinaldata=[]\n\t\t\tusersequnq=[]\n\t\t\tfor i in res:\n\t\t\t\tjdic=i['_source']\n\t\t\t\tjdic={str(tkey):force_text(tvalue) for tkey,tvalue in jdic.items()}\n\t\t\t\tif jdic[\"UniprotKb entry status\"] ==\"Yes\" and jdic['UniProtKB Accession'] !='502':\n\t\t\t\t\tjdic[\"PPI\"] =\"View\"\n\t\t\t\t\tjdic[\"sel\"] =\"\"\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Drug Bank\"]=jdic[\"Drug Bank\"].replace('<br>','|')\n\t\t\t\t\tjdic[\"SRMAtlas URL\"]=jdic[\"SRMAtlas URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Passel URL\"]=jdic[\"Passel URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"CPTAC URL\"]=jdic[\"CPTAC URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"Panoramaweb URL\"]=jdic[\"Panoramaweb URL\"].replace('\\\\','')\n\t\t\t\t\tjdic[\"PeptideTracker URL\"]=jdic[\"PeptideTracker URL\"].replace('\\\\','')\n\t\t\t\t\t#if jdic[\"Pathway Name\"].lower() !='na':\n\t\t\t\t\t#\tjdic[\"Pathway Name\"]=re.sub(r\"(\\w)([A-Z])\",r\"\\1|\\2\",jdic[\"Pathway Name\"])\n\t\t\t\t\tseqhit=0\n\t\t\t\t\t# checking any peptide present in user provided fasta sequence\n\t\t\t\t\t# classified into 3 catagories\n\t\t\t\t\tif len(unqfastaseq)>0:\n\t\t\t\t\t\tpepseq=str(jdic['Peptide Sequence']).strip()\n\t\t\t\t\t\t#if \n\t\t\t\t\t\t#matchCount = tryptic_peptide.count(pepseq.upper())\n\t\t\t\t\t\tindices = [k for k in tryptic_peptide if pepseq.upper() in tryptic_peptide[k]]\n\t\t\t\t\t\tif len(indices)>0:\n\t\t\t\t\t\t\ttempuserseqheadermatch='NA'\n\t\t\t\t\t\t\ttempmatchlist=[]\n\t\t\t\t\t\t\tfor i in indices:\n\t\t\t\t\t\t\t\ttempmatchlist.append('_'.join(fastaseq[i-1].split('_')[:-1]))\n\t\t\t\t\t\t\tif len(tempmatchlist)>0:\n\t\t\t\t\t\t\t\ttempuserseqheadermatch='<br/>'.join(tempmatchlist)\n\t\t\t\t\t\t\n\t\t\t\t\t\t\tif len(indices) > 1:\n\t\t\t\t\t\t\t\tseqhit=len(indices)\n\t\t\t\t\t\t\t\tjdic[\"Peptide in user's database\"] =tempuserseqheadermatch\n\t\t\t\t\t\t\t\tjdic[\"Peptide unique in user's database\"] =\"Present but not unique\"\n\t\t\t\t\t\t\tif len(indices) == 1:\n\t\t\t\t\t\t\t\tseqhit=len(indices)\n\t\t\t\t\t\t\t\tjdic[\"Peptide in user's database\"] =tempuserseqheadermatch\n\t\t\t\t\t\t\t\tjdic[\"Peptide unique in user's database\"] =\"Present and unique\"\n\t\t\t\t\t\t\t\tusersequnq.append(\"Present and unique\")\n\t\t\t\t\t\t\tjfinaldata.append(jdic)\n\t\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tjfinaldata.append(jdic)\n\t\t\tes.indices.refresh(index=\"mrmassaydb-index\")\n\t\t\t#checking any result generated by database\n\t\t\tfoundHits=len(jfinaldata)\n\t\t\t#storing only 10000 rows in json format\n\t\t\tjson.dump(jfinaldata[:10000],jsonfileoutput)\n\t\t\tjsonfileoutput.close()\n\t\t\t# if result found then do other job\n\t\t\tif foundHits >0:\n\t\t\t\tstatsummary=summaryStatcal(jfinaldata) # sent data to this funcation for generating stat\n\t\t\t\tpathwaychart=statsummary['pathwaychart']\n\t\t\t\tpathwaychart=[i[:2] for i in pathwaychart]\n\t\t\t\tspecieslist=statsummary['specieslist']\n\t\t\t\ttotallist=statsummary['total']\n\t\t\t\tsubcell=statsummary['subcell']\n\t\t\t\tgodic=statsummary['godic']\n\t\t\t\tjvennprot=statsummary['jevennstat'][0]\n\t\t\t\tjvennpep=statsummary['jevennstat'][1]\n\t\t\t\tmrmdatabase=statsummary['jevennstat'][2]\n\t\t\t\tsortedgodic=OrderedDict(sorted(godic.items(), key=lambda t: t[1])) # sorting GO data\n\t\t\t\tupdatedgodic=dict(list(sortedgodic.items()))\n\t\t\t\tpepseqdataseries=ast.literal_eval(json.dumps(statsummary['pepseqdataseries'])) #dumping data into json format\n\t\t\t\tprodataseries=statsummary['prodataseries']\n\t\t\t\tunqisostat=statsummary['unqisostat']\n\t\t\t\tjsonfilepathStat=os.path.join(settings.BASE_DIR, 'resultFile', 'jsonData','resultJson', 'adavancesearch', 'statsummary', jsonfilename)\n\t\t\t\tjsonfileoutputStat= open(jsonfilepathStat,'w')\n\t\t\t\tjson.dump(statsummary,jsonfileoutputStat)\n\t\t\t\tjsonfileoutputStat.close()\n\t\t\t\turlname=\"'/resultFile/jsonData/resultJson/adavancesearch/results/\"+jsonfilename+\"'\"\n\t\t\t\tif len(unqfastaseq)>0:\n\t\t\t\t\ttempcalunq=str(round(((float(usersequnq.count('Present and unique'))/float(len(jfinaldata)))*100),2))+'%'\n\t\t\t\t\tunqisostat.append([\"User data\",tempcalunq,\"NA\"])\n\t\t\t\t\tcontextindex={\n\t\t\t\t\t\t\"filename\":urlname,\"fastacolname\":json.dumps(fastacolname),\n\t\t\t\t\t\t'query': finalsearhdata,'foundHits':foundHits,\n\t\t\t\t\t\t'pathwaychart':pathwaychart[:11],'specieslist':specieslist,\n\t\t\t\t\t\t'totallist':totallist,'subcell':subcell,\n\t\t\t\t\t\t'updatedgodic':updatedgodic,'pepseqdataseries':pepseqdataseries,\n\t\t\t\t\t\t'prodataseries':prodataseries,'unqisostat':unqisostat,\n\t\t\t\t\t\t'jvennprot':json.dumps(jvennprot),'jvennpep':json.dumps(jvennpep),'jvennmrmdb':json.dumps(mrmdatabase),'fastafilename':json.dumps(nameFIle)\n\t\t\t\t\t\t}\n\t\t\t\t\treturn render(request,'resultformuserseq.html',contextindex)\n\t\t\t\telse:\n\t\t\t\t\tcontextindex={\n\t\t\t\t\t\t\"filename\":urlname,\"colname\":json.dumps(colname),\n\t\t\t\t\t\t'query': finalsearhdata,'foundHits':foundHits,\n\t\t\t\t\t\t'pathwaychart':pathwaychart[:11],'specieslist':specieslist,\n\t\t\t\t\t\t'totallist':totallist,'subcell':subcell,\n\t\t\t\t\t\t'updatedgodic':updatedgodic,'pepseqdataseries':pepseqdataseries,\n\t\t\t\t\t\t'prodataseries':prodataseries,'unqisostat':unqisostat,\n\t\t\t\t\t\t'jvennprot':json.dumps(jvennprot),'jvennpep':json.dumps(jvennpep),'jvennmrmdb':json.dumps(mrmdatabase)\n\t\t\t\t\t\t}\n\t\t\t\t\treturn render(request,'resultform.html',contextindex)\n\t\t\telse:\n\t\t\t\treturn render(request,'resultform.html',{'foundHits':foundHits})\n\t\telse:\n\t\t\treturn render(request,'resultform.html',{'foundHits':0})"
] | [
"0.60719806",
"0.6044698",
"0.57561797",
"0.568547",
"0.5680097",
"0.5664656",
"0.5654453",
"0.5639726",
"0.55690736",
"0.5557861",
"0.5479778",
"0.543426",
"0.54201204",
"0.54084146",
"0.5404982",
"0.53986996",
"0.53515655",
"0.53484523",
"0.5343179",
"0.5337715",
"0.53359956",
"0.53074384",
"0.529489",
"0.5288465",
"0.52652556",
"0.52376544",
"0.5234431",
"0.5225933",
"0.52235234",
"0.52167934"
] | 0.7738971 | 0 |
Return permissions for a OGC service. | def service_permission(self, identity, service_name, ows_type):
self.logger.debug("Getting permissions for identity %s", identity)
permission = {}
if ows_type:
permission = self.permission.ogc_permissions(
service_name, ows_type, identity
)
return permission | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_service_permissions(self, request, service_path):\n\n service_path_permission_classes = getattr(\n settings,\n 'ODOO_API_SERVICE_PATH_PERMISSION_CLASSES',\n {}\n )\n permission_classes = None\n for pattern, perms in service_path_permission_classes.items():\n if pattern.match(service_path):\n permission_classes = perms.get(request.method.upper())\n break\n permission_classes = permission_classes or getattr(\n settings,\n 'ODOO_API_PERMISSION_CLASSES',\n (permissions.IsAuthenticated,))\n return [permission() for permission in permission_classes]",
"def service_permissions(self, service, params, username, group):\n permission_handler = self.permission_handlers.get(service, None)\n if permission_handler is not None:\n # create session for ConfigDB\n session = self.config_models.session()\n\n # query permissions\n permissions = permission_handler.permissions(\n params, username, group, session\n )\n\n # close session\n session.close()\n\n return {\n 'permissions': permissions\n }\n else:\n return {'error': \"Service type '%s' not found\" % service}",
"def octopus_permissions_get(self, msg, args):\r\n return self.permissions.get_permissions()",
"def retrieve_permissions(service, file_id):\n try:\n permissions = service.permissions().list(fileId=file_id).execute()\n return permissions.get('items', [])\n except errors.HttpError, error:\n print 'An error occurred: %s' % error\n return None",
"def permissions(self) -> 'outputs.PermissionsResponse':\n return pulumi.get(self, \"permissions\")",
"def get_permissions():\n return config.get_cfg_storage(ID_PERMISSION)",
"def get_permissions(self):\n return self.settings[\"permissions\"]",
"def get_permissions(self, principal_id):",
"def permissions(self):\n return self.get_permissions()",
"def get_permissions(self):\n \n if self.action in ['signup', 'login', 'verify']:\n permissions =[AllowAny]\n # cualquiera que vaya a acceder a estas peticiones lo podra hacer\n # si la accion es de tipo retrieve se debe validar el permiso de acceso\n elif self.action in ['retrieve', 'update', 'partial_update']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [IsAuthenticated]\n # si no hay ninguna opcion debe tener una sesion autenticada \n return [p() for p in permissions]",
"async def fetch_permissions(self, condensed=False):\n\n logging.debug(\"Getting permissions (%scondensed)\" % (\n \"\" if condensed else \"not \"))\n\n if condensed:\n perms = await self.client.request.get(\n \"/auth/permissions\", params={\"condensed\": True})\n return perms[\"data\"]\n else:\n perms = await self.client.request.get(\"/auth/permissions\")\n return [BasePermission.build_permission(\n self.client, perm, self.loop) for perm in perms[\"data\"]]",
"def get_permissions(self):\n if self.action in ['list', 'create']:\n permission_classes = [IsStaffOrReadOnly]\n else:\n permission_classes = [IsAuthorOrReadOnly, IsStaffOrReadOnly]\n return [permission() for permission in permission_classes]",
"def get_permissions(self):\n\t\treturn call_sdk_function('PrlFsEntry_GetPermissions', self.handle)",
"def permissions(self):\n return [DSSWorkspacePermissionItem(permission) for permission in self.settings['permissions']]",
"def get_group_permissions (self):\n return [] # likewise with the other permission defs",
"def get_group_permissions (self):\n return [] # likewise with the other permission defs",
"def permissions(self) -> str:\n return pulumi.get(self, \"permissions\")",
"def permissions(self):\n return self.proto.details.appDetails.permission",
"def permissions(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"permissions\")",
"def permissions(self):\n return self._permissions",
"def getPermissions(self, scope):\n\n return [permissions.api_enum_for_permission(p)\n for p in permissions.get_permissions(scope)]",
"def get_permissions(self):\n if not hasattr(self, '_permissions'):\n self._permissions = self.permissions.all()\n return self._permissions",
"def permissions(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"permissions\")",
"def get_all_permissions(self):\n\t\turl = f'{self.root.url}/api/v1/sessions/permissions'\n\t\treturn self.root.r('GET', url, body=None, headers=None, verify=self.root.verify)",
"def get_all_permissions(self, obj=None):",
"def permissions(self) -> discord.Permissions:\n return self.channel.permissions_for(self.guild.me)",
"def get_permissions(self):\n if self.action in ['signup', 'login']:\n permissions = [AllowAny]\n elif self.action in ['retrieve']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [AllowAny]\n return [p() for p in permissions]",
"def get_permissions(self):\n if self.action in ['signup', 'login', 'verify']:\n permissions = [AllowAny]\n elif self.action in ['retrieve', 'update', 'partial_update', 'destroy', 'u', 'p']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [IsAuthenticated]\n return [p() for p in permissions]",
"def get_permissions(self):\n if self.action in ['create', 'retrieve', 'react', 'reactions']:\n permissions = [IsAuthenticated, IsFriendPostOwner]\n elif self.action in ['update', 'partial_update']:\n permissions = [IsAuthenticated, IsCommentOwner]\n elif self.action in ['destroy']:\n permissions = [IsAuthenticated, IsCommentOrPostOwner]\n else:\n permissions = [IsAuthenticated]\n return[p() for p in permissions]",
"def get_permissions(self):\n if self.action in [\"update\", \"partial_update\", \"destroy\"]:\n permission_classes = [IsAdminOrOwner]\n else:\n permission_classes = [IsAuthenticated]\n return [permission() for permission in permission_classes]"
] | [
"0.72820175",
"0.71609414",
"0.6613455",
"0.6587816",
"0.6386372",
"0.6377997",
"0.62134784",
"0.6197311",
"0.6146319",
"0.6117645",
"0.6071896",
"0.6031946",
"0.5966233",
"0.59371376",
"0.5926131",
"0.5926131",
"0.5890674",
"0.58691025",
"0.5860763",
"0.5859673",
"0.5855396",
"0.5833496",
"0.58281684",
"0.581481",
"0.58142763",
"0.5812445",
"0.58076954",
"0.5798724",
"0.57942635",
"0.5776776"
] | 0.7893323 | 0 |
Check presence and permitted layers for requested layers parameter. | def check_layers(self, layer_param, params, permitted_layers, mandatory):
exception = None
requested_layers = params.get(layer_param)
if requested_layers:
requested_layers = requested_layers.split(',')
for layer in requested_layers:
# allow only permitted layers
if layer and not layer.startswith('EXTERNAL_WMS:') and layer not in permitted_layers:
exception = {
'code': "LayerNotDefined",
'message': (
'Layer "%s" does not exist or is not permitted'
% layer
)
}
break
elif mandatory:
# mandatory layers param is missing or blank
exception = {
'code': "MissingParameterValue",
'message': (
'%s is mandatory for %s operation'
% (layer_param, params.get('REQUEST'))
)
}
return exception | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _check_layer_exists(self) -> None:\n layer_exists = (\n self.viewer.layer_dict[self.layer_type][self.layer_name][\n self.layer_subtype\n ][\"layer\"]\n is not None\n )\n # hide button if layer doesn't exist\n if layer_exists:\n self.layout.display = \"block\"\n else:\n self.layout.display = \"none\"\n self.logger.debug(\n (\n \"LayerButtonWidget hidden for %s of %s. \"\n \"(type: %s). Layer doesn't exist.\"\n ),\n self.layer_subtype,\n self.layer_name,\n self.layer_type,\n )",
"def check(cls, layer_param, is_check_verbose=False, **kw):\n\t\t# setup\n\t\tif type(layer_param) == ListWrapper or type(layer_param) == tuple: layer_param = list(layer_param)\n\n\t\t# check parameters\n\t\tif not type(layer_param) == list: \n\t\t\tif is_check_verbose: print(\"layer_param must be converatble to list but is type %s\"%type(layer_param))\n\t\t\treturn False\n\n\t\tif not cls._check(layer_param=layer_param, is_check_verbose=is_check_verbose, **kw): \n\t\t\tif is_check_verbose: print(\"checks failed\")\n\t\t\treturn False\n\n\t\t# additional checks\n\t\tif cls.additional_check(layer_param=layer_param, is_check_verbose=is_check_verbose, **kw) is False: \n\t\t\tif is_check_verbose: print(\"additional checks failed\")\n\t\t\treturn False\n\n\t\treturn True",
"def IsLayer(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_IsLayer(self, *args)",
"def check_model(self):\n layers_map = self.core.query_network(network=self.network,\n device_name=self.device)\n\n unsupported_layers = [\n l for l in self.network.layers.keys() if l not in layers_map\n ]\n\n if (unsupported_layers != []):\n sys.exit(\"Those mention layers in your model are not supported by OpenVino Inference Engine:\" \\\n \" \\n\\t\" + \"\\n\\t\".join(unsupported_layers))",
"def request_layers(url):\n layer_names = get_layers(url)\n for l in layer_names:\n print(\"Checking '%s'...\" % l)\n get_image(url, l, check_blank=True)",
"def check_layers_count(context, count):\n history = DOCKER_CLIENT.history(context.config.userdata['IMAGE'])\n if len(history) == int(count):\n return True\n\n raise Exception(\"Image does not contain %s layers, current number of layers: %s\" % (count, len(history)), history)",
"def test_addon_layer(self):\n layers = [l.getName() for l in registered_layers()]\n self.assertIn('IBriefyPloneLayer', layers)",
"def check_request(self, params, permission):\n exception = {}\n\n if permission.get('qgs_project') is None:\n # service unknown or not permitted\n exception = {\n 'code': \"Service configuration error\",\n 'message': \"Service unknown or unsupported\"\n }\n elif not params.get('REQUEST'):\n # REQUEST missing or blank\n exception = {\n 'code': \"OperationNotSupported\",\n 'message': \"Please check the value of the REQUEST parameter\"\n }\n else:\n service = params.get('SERVICE', '')\n request = params.get('REQUEST', '').upper()\n\n if service == 'WMS' and request == 'GETFEATUREINFO':\n # check info format\n info_format = params.get('INFO_FORMAT', 'text/plain')\n if re.match('^application/vnd.ogc.gml.+$', info_format):\n # do not support broken GML3 info format\n # i.e. 'application/vnd.ogc.gml/3.1.1'\n exception = {\n 'code': \"InvalidFormat\",\n 'message': (\n \"Feature info format '%s' is not supported. \"\n \"Possibilities are 'text/plain', 'text/html' or \"\n \"'text/xml'.\"\n % info_format\n )\n }\n elif service == 'WMS' and request == 'GETPRINT':\n # check print templates\n template = params.get('TEMPLATE')\n if template and template not in permission['print_templates']:\n # allow only permitted print templates\n exception = {\n 'code': \"Error\",\n 'message': (\n 'Composer template not found or not permitted'\n )\n }\n\n if not exception:\n # check layers params\n\n # lookup for layers params by request\n # {\n # <SERVICE>: {\n # <REQUEST>: [\n # <optional layers param>, <mandatory layers param>\n # ]\n # }\n # }\n ogc_layers_params = {\n 'WMS': {\n 'GETMAP': ['LAYERS', None],\n 'GETFEATUREINFO': ['LAYERS', 'QUERY_LAYERS'],\n 'GETLEGENDGRAPHIC': [None, 'LAYER'],\n 'GETLEGENDGRAPHICS': [None, 'LAYER'], # QGIS legacy request\n 'DESCRIBELAYER': [None, 'LAYERS'],\n 'GETSTYLES': [None, 'LAYERS']\n },\n 'WFS': {\n 'DESCRIBEFEATURETYPE': ['TYPENAME', None],\n 'GETFEATURE': [None, 'TYPENAME']\n }\n }\n\n layer_params = ogc_layers_params.get(service, {}).get(request, {})\n\n if service == 'WMS' and request == 'GETPRINT':\n # find map layers param for GetPrint (usually 'map0:LAYERS')\n for key, value in params.items():\n if key.endswith(\":LAYERS\"):\n layer_params = [key, None]\n break\n\n if layer_params:\n permitted_layers = permission['public_layers']\n filename = params.get('FILENAME', '')\n if (service == 'WMS' and (\n (request == 'GETMAP' and filename) or request == 'GETPRINT'\n )):\n # When doing a raster export (GetMap with FILENAME)\n # or printing (GetPrint), also allow background layers\n permitted_layers += permission['background_layers']\n if layer_params[0] is not None:\n # check optional layers param\n exception = self.check_layers(\n layer_params[0], params, permitted_layers, False\n )\n if not exception and layer_params[1] is not None:\n # check mandatory layers param\n exception = self.check_layers(\n layer_params[1], params, permitted_layers, True\n )\n\n return exception",
"def check_layer(layer1, layer2, values=False):\n def check(name):\n assert check_shape(layer1, layer2, name)\n if values:\n assert check_values(layer1, layer2, name)\n\n assert type(layer1) is type(layer2)\n if hasattr(layer1, 'input_shape'):\n assert layer1.input_shape == layer2.input_shape\n if hasattr(layer2, 'output_shape'):\n assert layer1.output_shape == layer2.output_shape\n if isinstance(layer1, (Conv2DLayer, DenseLayer)):\n assert check_shape(layer1, layer2, 'W')\n check('b')\n assert layer1.nonlinearity == layer2.nonlinearity\n if isinstance(layer1, NonlinearityLayer):\n assert layer1.nonlinearity == layer2.nonlinearity\n if isinstance(layer1, BatchNormLayer):\n check('mean')\n check('inv_std')\n check('gamma')\n check('beta')\n if isinstance(layer1, DropoutLayer):\n assert layer1.p == layer2.p\n assert layer1.rescale == layer2.rescale\n assert layer1.shared_axes == layer2.shared_axes\n if isinstance(layer1, ScaleLayer):\n check('scales')\n if isinstance(layer1, BiasLayer):\n check('b')\n if isinstance(layer1, GlobalPoolLayer):\n assert layer1.pool_function is layer2.pool_function\n if isinstance(layer1, Pool2DLayer):\n assert layer1.ignore_border == layer2.ignore_border\n assert layer1.mode == layer2.mode\n assert layer1.pad == layer2.pad\n assert layer1.pool_size == layer2.pool_size\n assert layer1.stride == layer2.stride\n return True",
"def checkLayersOverride(shape):\n required = []\n connected = [] \n\n # find the shaders / displacement that are required\n layersOverride = cmds.getAttr(\"%s.layersOverride\" % shape)\n if layersOverride:\n layersOverride = json.loads(layersOverride)\n for layer in layersOverride:\n if layersOverride[layer].has_key('shaders'):\n for k in layersOverride[layer]['shaders'].keys():\n if not k in required:\n required.append(k)\n\n shape_connections = cmds.listAttr(\"%s.shaders\" % shape, multi=True)\n\n # go find the connected shaders\n if shape_connections:\n for con in shape_connections:\n connected_shader = cmds.listConnections(\"%s.%s\" % (shape, con))[0]\n connected.append(connected_shader)\n \n port = len(connected)\n for req in required:\n if req not in connected:\n if cmds.objExists(req):\n cmds.connectAttr( req + \".message\", shape + \".shaders[%i]\" % port)\n port += 1\n message = 'Connected %s to %s' % (req, shape)\n MGlobal.displayInfo(message)\n else:\n message = \"Missing shader : %s\" % req\n MGlobal.displayWarning(message)",
"def check_RNN_layers_valid(self):\n error_msg_layer_type = \"First element in a layer specification must be one of {}\".format(self.valid_RNN_hidden_layer_types)\n error_msg_layer_form = \"Layer must be of form [layer_name, hidden_units]\"\n error_msg_layer_list = \"Layers must be provided as a list\"\n error_msg_output_heads = \"Number of output activations must equal number of output heads\"\n\n assert isinstance(self.layers_info, list), error_msg_layer_list\n\n all_layers = self.layers_info[:-1]\n output_layer = self.layers_info[-1]\n assert isinstance(output_layer, list), error_msg_layer_list\n if isinstance(output_layer[0], list):\n assert len(output_layer) == len(\n self.output_activation), error_msg_output_heads\n for layer in output_layer:\n all_layers.append(layer)\n else:\n assert not isinstance(self.output_activation, list) or len(self.output_activation) == 1, error_msg_output_heads\n all_layers.append(output_layer)\n\n rest_must_be_linear = False\n for layer in all_layers:\n assert isinstance(layer, list), \"Each layer must be a list\"\n assert isinstance(layer[0], str), error_msg_layer_type\n layer_type_name = layer[0].lower()\n assert layer_type_name in self.valid_RNN_hidden_layer_types, \"Layer name {} not valid, use one of {}\".format(\n layer_type_name, self.valid_RNN_hidden_layer_types)\n\n assert isinstance(layer[1], int), error_msg_layer_form\n assert layer[1] > 0, \"Must have hidden_units >= 1\"\n assert len(layer) == 2, error_msg_layer_form\n\n if rest_must_be_linear: assert layer[0].lower() == \"linear\", \"If have linear layers then they must come at end\"\n if layer_type_name == \"linear\": rest_must_be_linear = True",
"def check_layer(self, service: Service):\n wms_helper = WmsHelper(service)\n urls_to_check = [\n (wms_helper.get_get_map_url(), True),\n (wms_helper.get_get_styles_url(), False),\n (wms_helper.get_get_feature_info_url(), False),\n (wms_helper.get_describe_layer_url(), False),\n ]\n for url in urls_to_check:\n if url[0] is None:\n continue\n self.check_service(url[0], check_image=url[1])",
"def check_recursive(self, summary_list: List[\"LayerInfo\"]) -> None:\n if list(self.module.named_parameters()):\n for other_layer in summary_list:\n if self.layer_id == other_layer.layer_id:\n self.is_recursive = True",
"def test_all_layer_types(self):\n\n\t\tdetails = self.watcher.describe()\n\t\t\n\t\tdenseLayers = details[details.layer_type==str(LAYER_TYPE.DENSE)]\n\t\tdenseCount = len(denseLayers)\n\t\tself.assertEqual(denseCount, 3, \"3 dense layers, but {} found\".format(denseCount))\t\t\n\t\n\t\tconv2DLayers = details[details.layer_type==str(LAYER_TYPE.CONV2D)]\n\t\tconv2DCount = len(conv2DLayers)\n\t\tself.assertEqual(conv2DCount, 8, \"8 conv2D layers, but {} found\".format(denseCount))",
"def check_layer_name(field):\n \n hygienize = field.replace(\"\\\"\", \"\")\n layer_name = (hygienize.split(\".\"))[0]\n \n if layer_name in layer_names:\n return True\n return False",
"def check_display_layer(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n if len(pm.ls(type=\"displayLayer\")) > 1:\n progress_controller.complete()\n raise PublishError(\"There should be no <b>Display Layers</b> in the scene!!!\")\n progress_controller.complete()",
"def test_addon_layer_removed(self):\n layers = [l.getName() for l in registered_layers()]\n self.assertNotIn('IBriefyPloneLayer', layers)",
"def is_layer(obj):\n # TODO(b/110718070): Replace with isinstance(obj, base_layer.Layer).\n return hasattr(obj, \"_is_layer\") and not isinstance(obj, type)",
"def is_feature_layer(layer):\n return getattr(layer, '_is_feature_layer', False)",
"def checkRequirements(self):\n # Checkin requirements\n self.runButton.setEnabled(False)\n tbl = self.toBendLayer()\n pl = self.pairsLayer()\n if tbl is None:\n self.displayMsg(\"You must select a vector layer to bend !\", True)\n return\n if pl is None:\n self.displayMsg(\n (\n \"You must select a vector (line) layer \"\n \"which defines the points pairs !\"\n ),\n True,\n )\n return\n if pl is tbl:\n self.displayMsg(\n (\"The layer to bend must be \" \"different from the pairs layer !\"), True\n )\n return\n if not tbl.isEditable():\n self.displayMsg(\"The layer to bend must be in edit mode !\", True)\n return\n if not pl.isEditable() and self.pairsToPinsCheckBox.isChecked():\n self.displayMsg(\n (\n \"The pairs layer must be in edit mode if you want \"\n \"to change pairs to pins !\"\n ),\n True,\n )\n return\n if self.stackedWidget.currentIndex() == 0:\n self.displayMsg(\n \"Impossible to run with an invalid transformation type.\", True\n )\n return\n self.displayMsg(\"Ready to go...\")\n self.runButton.setEnabled(True)",
"def test_build_compose_section_supports_layers():\n\n custom_output_dir = './build_not_dist'\n manifest = {\n 'output_dir': custom_output_dir,\n 'layers': {\n 'first': {'requirements': 'requirements/first.txt'},\n 'second': {'requirements': 'requirements/second.txt'},\n }\n }\n\n result = actions._get_compose_template(manifest)\n yaml_result = yaml.safe_load(result)\n\n first_layer = yaml_result['services']['first-layer']\n assert any('requirements/first.txt' in volume for volume in first_layer['volumes'])\n assert 'build_layer.sh first' in first_layer['command']\n\n second_layer = yaml_result['services']['second-layer']\n assert any('requirements/second.txt' in volume for volume in second_layer['volumes'])\n assert 'build_layer.sh second' in second_layer['command']",
"def _check(self):\n if not isinstance(self.fc_layers, tuple):\n raise TypeError(f'fc_layers require tuple, get {type(self.fc_layers)}')\n if not isinstance(self.use_dropout, tuple):\n raise TypeError(f'use_dropout require tuple, get {type(self.use_dropout)}')\n if not isinstance(self.drop_prob, tuple):\n raise TypeError(f'drop_prob require tuple, get {type(self.drop_prob)}')\n if not isinstance(self.use_activation, tuple):\n raise TypeError(f'use_activation require tuple, get {type(self.use_activation)}')\n l_fc_layer = len(self.fc_layers)\n l_use_drop = len(self.use_dropout)\n l_drop_prob = len(self.drop_prob)\n l_use_activation = len(self.use_activation)\n pass_check = l_fc_layer >= 2 and l_use_drop < l_fc_layer and l_drop_prob < l_fc_layer and l_use_activation < l_fc_layer and l_drop_prob == l_use_drop\n if not pass_check:\n msg = 'Wrong BaseDiscriminator parameters!'\n raise ValueError(msg)",
"def test_layer_API(self):\n\n # Exceptions\n exclude = ['get_topN', 'get_bins',\n 'get_geotransform',\n 'get_nodata_value',\n 'get_attribute_names',\n 'get_resolution',\n 'get_geometry_type',\n 'get_geometry_name',\n 'to_vector_points',\n 'to_vector_layer']\n\n V = Vector() # Empty vector instance\n R = Raster() # Empty raster instance\n\n assert same_API(V, R, exclude=exclude)\n\n for filename in [os.path.join(TESTDATA,\n 'test_buildings.shp'),\n os.path.join(HAZDATA,\n 'Lembang_Earthquake_Scenario.asc')]:\n\n L = read_layer(filename)\n\n assert same_API(L, V, exclude=exclude)\n assert same_API(L, R, exclude=exclude)",
"def check_anim_layers(progress_controller=None):\n if progress_controller is None:\n progress_controller = ProgressControllerBase()\n if len(pm.ls(type=\"animLayer\")) > 0:\n progress_controller.complete()\n raise PublishError(\"There should be no <b>Animation Layers</b> in the scene!!!\")\n progress_controller.complete()",
"def test_layer_ok(self):\n self.assertTrue(self.vector)",
"def check_init(self):\n if self.Nlayer > 1:\n raise Exception(\"Nlayer == 1 currently\")",
"def test_filter_layer_ids(self):\n\t\t\n\t\tdetails = self.watcher.describe(layers=[])\n\t\tprint(details)\n\t\t\n\t\tdetails = self.watcher.describe(layers=self.fc_layers)\n\t\tprint(details)\n\t\t\n\t\tdenseLayers = details[details.layer_type==str(LAYER_TYPE.DENSE)]\n\t\tdenseCount = len(denseLayers)\n\t\tself.assertEqual(denseCount, 3, \"3 dense layers, but {} found\".format(denseCount))\n\t\t\t\n\t\tnonDenseLayers = details[details.layer_type!=str(LAYER_TYPE.DENSE)]\n\t\tnonDenseCount = len(nonDenseLayers)\n\t\tself.assertEqual(nonDenseCount, 0, \"Filter has No dense layers: {} found\".format(nonDenseCount))",
"def adjust_params(self, params, permission):\n ogc_service = params.get('SERVICE', '')\n ogc_request = params.get('REQUEST', '').upper()\n\n if ogc_service == 'WMS' and ogc_request == 'GETMAP':\n requested_layers = params.get('LAYERS')\n if requested_layers:\n # replace restricted group layers with permitted sublayers\n requested_layers = requested_layers.split(',')\n restricted_group_layers = permission['restricted_group_layers']\n permitted_layers = self.expand_group_layers(\n requested_layers, restricted_group_layers\n )\n\n params['LAYERS'] = \",\".join(permitted_layers)\n\n elif ogc_service == 'WMS' and ogc_request == 'GETFEATUREINFO':\n requested_layers = params.get('QUERY_LAYERS')\n if requested_layers:\n # replace restricted group layers with permitted sublayers\n requested_layers = requested_layers.split(',')\n restricted_group_layers = permission['restricted_group_layers']\n permitted_layers = self.expand_group_layers(\n reversed(requested_layers), restricted_group_layers\n )\n\n # filter by queryable layers\n queryable_layers = permission['queryable_layers']\n permitted_layers = [\n l for l in permitted_layers if l in queryable_layers\n ]\n\n # reverse layer order\n permitted_layers = reversed(permitted_layers)\n\n params['QUERY_LAYERS'] = \",\".join(permitted_layers)\n\n elif (ogc_service == 'WMS' and\n ogc_request in ['GETLEGENDGRAPHIC', 'GETLEGENDGRAPHICS']):\n requested_layers = params.get('LAYER')\n if requested_layers:\n # replace restricted group layers with permitted sublayers\n requested_layers = requested_layers.split(',')\n restricted_group_layers = permission['restricted_group_layers']\n permitted_layers = self.expand_group_layers(\n requested_layers, restricted_group_layers\n )\n\n params['LAYER'] = \",\".join(permitted_layers)\n\n elif ogc_service == 'WMS' and ogc_request == 'GETPRINT':\n # find map layers param for GetPrint (usually 'map0:LAYERS')\n map_layers_param = None\n for key, value in params.items():\n if key.endswith(\":LAYERS\"):\n map_layers_param = key\n break\n\n requested_layers = params.get(map_layers_param)\n if requested_layers:\n # replace restricted group layers with permitted sublayers\n requested_layers = requested_layers.split(',')\n restricted_group_layers = permission['restricted_group_layers']\n permitted_layers = self.expand_group_layers(\n requested_layers, restricted_group_layers\n )\n\n params[map_layers_param] = \",\".join(permitted_layers)\n\n elif ogc_service == 'WMS' and ogc_request == 'DESCRIBELAYER':\n requested_layers = params.get('LAYERS')\n if requested_layers:\n # replace restricted group layers with permitted sublayers\n requested_layers = requested_layers.split(',')\n restricted_group_layers = permission['restricted_group_layers']\n permitted_layers = self.expand_group_layers(\n reversed(requested_layers), restricted_group_layers\n )\n\n # reverse layer order\n permitted_layers = reversed(permitted_layers)\n\n params['LAYERS'] = \",\".join(permitted_layers)",
"def _check_param(grads, images, kernel_name, align_corners, half_pixel_centers):\n if half_pixel_centers:\n if align_corners:\n raise RuntimeError(\"If half_pixel_centers is True, \"\n \"align_corners must be False.\")\n grads_shape = grads.get(\"shape\")\n grads_dtype = grads.get(\"dtype\")\n images_shape = images.get(\"shape\")\n images_dtype = images.get(\"dtype\")\n data_limit = ((1 << 31) - 1) // (4 if images_dtype == \"float32\" else 2)\n util.check_kernel_name(kernel_name)\n util.check_shape_rule(grads_shape)\n util.check_shape_rule(images_shape)\n util.check_shape_size(grads_shape, data_limit)\n util.check_shape_size(images_shape, data_limit)\n check_list_grads = (\"float32\")\n check_list_images = (\"float32\")\n util.check_dtype_rule(grads_dtype.lower(), check_list_grads)\n util.check_dtype_rule(images_dtype.lower(), check_list_images)",
"def IsRenderLayersOn(self):\n\n renderLayers = pm.ls(exactType=\"renderLayer\")\n referenceLayers = pm.ls(exactType=\"renderLayer\", rn=1)\n return ((len(renderLayers) - len(referenceLayers)) > 1)"
] | [
"0.665987",
"0.6410096",
"0.6369205",
"0.6325547",
"0.62577754",
"0.6249218",
"0.6232833",
"0.6193797",
"0.61657876",
"0.5994172",
"0.5975074",
"0.5945743",
"0.5877518",
"0.57443446",
"0.5725027",
"0.5710859",
"0.56924355",
"0.5676123",
"0.5661383",
"0.56075794",
"0.5587897",
"0.5584314",
"0.5583036",
"0.5569907",
"0.55426663",
"0.5534237",
"0.5528949",
"0.5522107",
"0.54999083",
"0.54963726"
] | 0.8573627 | 0 |
Adjust parameters depending on request and permissions. | def adjust_params(self, params, permission):
ogc_service = params.get('SERVICE', '')
ogc_request = params.get('REQUEST', '').upper()
if ogc_service == 'WMS' and ogc_request == 'GETMAP':
requested_layers = params.get('LAYERS')
if requested_layers:
# replace restricted group layers with permitted sublayers
requested_layers = requested_layers.split(',')
restricted_group_layers = permission['restricted_group_layers']
permitted_layers = self.expand_group_layers(
requested_layers, restricted_group_layers
)
params['LAYERS'] = ",".join(permitted_layers)
elif ogc_service == 'WMS' and ogc_request == 'GETFEATUREINFO':
requested_layers = params.get('QUERY_LAYERS')
if requested_layers:
# replace restricted group layers with permitted sublayers
requested_layers = requested_layers.split(',')
restricted_group_layers = permission['restricted_group_layers']
permitted_layers = self.expand_group_layers(
reversed(requested_layers), restricted_group_layers
)
# filter by queryable layers
queryable_layers = permission['queryable_layers']
permitted_layers = [
l for l in permitted_layers if l in queryable_layers
]
# reverse layer order
permitted_layers = reversed(permitted_layers)
params['QUERY_LAYERS'] = ",".join(permitted_layers)
elif (ogc_service == 'WMS' and
ogc_request in ['GETLEGENDGRAPHIC', 'GETLEGENDGRAPHICS']):
requested_layers = params.get('LAYER')
if requested_layers:
# replace restricted group layers with permitted sublayers
requested_layers = requested_layers.split(',')
restricted_group_layers = permission['restricted_group_layers']
permitted_layers = self.expand_group_layers(
requested_layers, restricted_group_layers
)
params['LAYER'] = ",".join(permitted_layers)
elif ogc_service == 'WMS' and ogc_request == 'GETPRINT':
# find map layers param for GetPrint (usually 'map0:LAYERS')
map_layers_param = None
for key, value in params.items():
if key.endswith(":LAYERS"):
map_layers_param = key
break
requested_layers = params.get(map_layers_param)
if requested_layers:
# replace restricted group layers with permitted sublayers
requested_layers = requested_layers.split(',')
restricted_group_layers = permission['restricted_group_layers']
permitted_layers = self.expand_group_layers(
requested_layers, restricted_group_layers
)
params[map_layers_param] = ",".join(permitted_layers)
elif ogc_service == 'WMS' and ogc_request == 'DESCRIBELAYER':
requested_layers = params.get('LAYERS')
if requested_layers:
# replace restricted group layers with permitted sublayers
requested_layers = requested_layers.split(',')
restricted_group_layers = permission['restricted_group_layers']
permitted_layers = self.expand_group_layers(
reversed(requested_layers), restricted_group_layers
)
# reverse layer order
permitted_layers = reversed(permitted_layers)
params['LAYERS'] = ",".join(permitted_layers) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def additional_access_token_request_parameters(self, parameters, request):",
"def _oauth2_process_params(self, request):\n self.in_canvas = (request.REQUEST.get('fb_sig_in_canvas') == '1')\n self.added = (request.REQUEST.get('fb_sig_added') == '1')\n # If app_id is not set explicitly, pick it up from the params\n if not self.app_id:\n self.app_id = request.REQUEST.get('fb_sig_app_id')\n if not self.uid:\n self.uid = request.REQUEST.get('fb_sig_user')",
"def setup(self):\r\n \r\n if self.requestedAction == admin.ACTION_EDIT or self.requestedAction == admin.ACTION_CREATE:\r\n \r\n # Set the required parameters\r\n for arg in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addReqArg(arg)\r\n \r\n # Set up the valid parameters\r\n for arg in RadiusAuthRestHandler.VALID_PARAMS:\r\n if arg not in RadiusAuthRestHandler.REQUIRED_PARAMS:\r\n self.supportedArgs.addOptArg(arg)",
"def _update_params(self):\n pass",
"def update_params(self):\n pass",
"def auth_extra_arguments(self):\n extra_arguments = super().auth_extra_arguments()\n extra_arguments[\"p\"] = self.policy or self.data.get(\"p\")\n return extra_arguments",
"def _params(self, request: Request) -> dict:\n params = {'forceAsync': True}\n\n subset = self._spatial_subset_params(request) + self._temporal_subset_params(request)\n if len(subset) > 0:\n params['subset'] = subset\n\n for p, val in request.parameter_values():\n if type(val) == str:\n params[p] = val\n elif type(val) == bool:\n params[p] = str(val).lower()\n elif type(val) == list and type(val[0]) != str:\n params[p] = ','.join([str(v) for v in val])\n else:\n params[p] = val\n\n return params",
"def initial(self, request, *args, **kwargs):\n\n # It's checks the permissions for the third party endpoint or not. It give access if key present.\n bool_value, message = self.check_api_keys(request)\n if bool_value:\n super(ProjectRestrictedGenericViewSet, self).initial(request, *args, **kwargs)\n # Check action permissions\n self.check_action_permissions(request)\n else:\n self.app_permission_denied(request, message)",
"def set_related_params(self,request,responsedata):\n pass",
"def get_request_extra_params(self, **kwargs):\n params = self.request_extra_params.copy()\n params.update(kwargs)\n return params",
"def request_vars(self):",
"def prepare(self, request):\n pass",
"def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)",
"def __update_request(self, request_dict, namespace, apikey):\n request_dict['namespace'] = namespace if namespace else self.namespace\n request_dict['apikey'] = apikey if apikey else self.apikey",
"def set_params(self, **kwargs):\n ...",
"def updateParameters(self):\n\n return",
"def process(self, request):\n if \"type\"+self.id in request.args:\n self.lms.type = request.args[\"type\"+self.id][0]\n if \"studentpost\"+self.id in request.args:\n self.lms.studentpost = request.args[\"studentpost\"+self.id][0]\n if \"groupmode\"+self.id in request.args:\n self.lms.groupmode = request.args[\"groupmode\"+self.id][0]\n if \"visible\"+self.id in request.args:\n self.lms.visible = request.args[\"visible\"+self.id][0]\n if \"subscription\"+self.id in request.args:\n self.lms.subscription = request.args[\"subscription\"+self.id][0]\n if \"other\"+self.id in request.args:\n self.lms.otherLabel = request.args[\"other\"+self.id][0]\n if \"url\"+self.id in request.args:\n self.lms.otherUrl = request.args[\"url\"+self.id][0]",
"def _parse_in_request(self, request):\n error = None\n self.logger.debug(\"Http method: %s\" % request.method)\n if request.method == 'GET':\n self._params = request.args.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)\n \n elif request.method == 'POST':\n self._params = request.form.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)",
"def updateParameters(self, parameters):",
"def get_request_args():\n args = {}\n args['user_id'] = request.args.get('user_id', default=None, type=int)\n args['is_examiner'] = request.args.get('is_examiner', default=None, type=int)\n if args['is_examiner'] is not None: args['is_examiner'] = args['is_examiner']==1\n args['first_name'] = request.args.get('first_name', default=None)\n args['last_name'] = request.args.get('last_name', default=None)\n\n args['exam_warning_id'] = request.args.get('exam_warning_id', default=None, type=int)\n args['exam_recording_id'] = request.args.get('exam_recording_id', default=None, type=int)\n args['in_progress'] = request.args.get('in_progress', default=None, type=int)\n if args['in_progress'] is not None: args['in_progress'] = args['in_progress']==1\n args['exam_id'] = request.args.get('exam_id', default=None, type=int)\n args['subject_id'] = request.args.get('subject_id', default=None, type=int)\n args['login_code'] = request.args.get('login_code', default=None)\n args['exam_name'] = request.args.get('exam_name', default=None)\n\n args['warning_count'] = request.args.get('warning_count', default=None, type=int)\n args['min_warnings'] = request.args.get('min_warnings', default=None, type=int)\n args['max_warnings'] = request.args.get('max_warnings', default=None, type=int)\n\n args['period_start'] = request.args.get('period_start', default=timedelta(days=10))\n args['period_end'] = request.args.get('period_end', default=timedelta(days=10))\n if args['period_start'] == timedelta(days=10): args['period_start'] = None\n if args['period_end'] == timedelta(days=10): args['period_end'] = None\n args['order_by'] = request.args.get('order_by', default='default').lower()\n args['order'] = request.args.get('order', default='desc').lower()\n \n args['page_number'] = request.args.get('page_number', default=1, type=int)\n args['results_length'] = request.args.get('results_length', default=25, type=int)\n if args['page_number'] < 1: args['page_number'] = 1\n if args['results_length'] < 1: args['results_length'] = 1\n\n return args",
"def set_params(self):\r\n pass",
"def _update_params(self):\n _load = not self.san_interface.runmode\n params={}\n if ('iosched' in self._updatedattr or _load) and self.iosched<>IoSchedType.default:\n params['iosched']=str(self.iosched)\n if ('readahead' in self._updatedattr or _load) and self.readahead :\n params['readahead']=self.readahead\n if params:\n for pt in self.paths():\n pt.provider.set_dev_params(pt,params)",
"def prepare_request_params(\n request_params: Dict, model_id: Text, model_data: Dict\n) -> Dict:\n request_params = correct_types(request_params, model_data[\"columns_data\"])\n if model_data[\"hashed_indexes\"]:\n request_params = reverse_hash_names(model_id, request_params)\n return request_params",
"def update_parameters(self):\n self.alignment_factor = rospy.get_param('/dyn_reconf/alignment_factor')\n self.cohesion_factor = rospy.get_param('/dyn_reconf/cohesion_factor')\n self.separation_factor = rospy.get_param('/dyn_reconf/separation_factor')\n self.avoid_factor = rospy.get_param('/dyn_reconf/avoid_factor')\n self.max_speed = rospy.get_param('/dyn_reconf/max_speed')\n self.max_force = rospy.get_param('/dyn_reconf/max_force')\n self.friction = rospy.get_param('/dyn_reconf/friction')\n self.crowd_radius = rospy.get_param('/dyn_reconf/crowd_radius')\n self.search_radius = rospy.get_param('/dyn_reconf/search_radius')\n\n rospy.loginfo(rospy.get_caller_id() + \" -> Parameters updated\")\n if DEBUG:\n print('alignment_factor: ', self.alignment_factor)\n print('cohesion_factor: ', self.cohesion_factor)\n print('separation_factor: ', self.separation_factor)\n print('avoid_factor: ', self.avoid_factor)\n print('max_speed: ', self.max_speed)\n print('max_force: ', self.max_force)\n print('friction: ', self.friction)\n print('crowd_radius: ', self.crowd_radius)\n print('search_radius: ', self.search_radius)",
"def _get_request_args(self):\n str_args = False\n request_args = {}\n if request.method == \"POST\" or request.method == \"PUT\":\n # Use only body args and ignore any args from query string\n if request.headers.get(\"content-type\", \"\").startswith(CONT_TYPE_JSON):\n # JSON body request\n if request.data:\n request_args = json_loads(request.data)\n if GATEWAY_ARG_PARAMS not in request_args:\n # Magic fallback: Directly use JSON first level as args if params key not present\n request_args = {GATEWAY_ARG_PARAMS: request_args}\n elif request.form:\n # Form encoded payload\n if GATEWAY_ARG_JSON in request.form:\n payload = request.form[GATEWAY_ARG_JSON]\n request_args = json_loads(payload)\n if GATEWAY_ARG_PARAMS not in request_args:\n # Magic fallback: Directly use JSON first level as args if params key not present\n request_args = {GATEWAY_ARG_PARAMS: request_args}\n else:\n # Fallback: Directly use form values\n str_args = True\n request_args = {GATEWAY_ARG_PARAMS: request.form.to_dict(flat=True)}\n else:\n # No args found in body\n request_args = {GATEWAY_ARG_PARAMS: {}}\n\n # Extract file args\n for file_arg in request.files:\n try:\n file_handle = request.files[file_arg]\n arg_val = file_handle.read()\n request_args[GATEWAY_ARG_PARAMS][file_arg] = arg_val\n except Exception as ex:\n log.exception(\"Error reading request file argument %s\", file_arg)\n\n elif request.method == \"GET\":\n str_args = True\n REQ_ARGS_SPECIAL = {\"authtoken\", \"timeout\", \"headers\"}\n args_dict = request.args.to_dict(flat=True)\n request_args = {k: request.args[k] for k in args_dict if k in REQ_ARGS_SPECIAL}\n req_params = {k: request.args[k] for k in args_dict if k not in REQ_ARGS_SPECIAL}\n request_args[GATEWAY_ARG_PARAMS] = req_params\n\n request_args[\"str_args\"] = str_args # Indicate downstream that args are str (GET or form encoded)\n #log.info(\"Request args: %s\" % request_args)\n return request_args",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return",
"def updateParameters(self, parameters):\r\n return"
] | [
"0.64624816",
"0.6333226",
"0.6288839",
"0.6280795",
"0.61579335",
"0.6024394",
"0.59283715",
"0.5907796",
"0.5885023",
"0.5873847",
"0.5864146",
"0.5857229",
"0.58306986",
"0.57982206",
"0.5796583",
"0.5779755",
"0.575566",
"0.57194436",
"0.5705869",
"0.56948435",
"0.56885666",
"0.5654454",
"0.56496567",
"0.5601495",
"0.5583696",
"0.5582282",
"0.5582282",
"0.5582282",
"0.5582282",
"0.5582282"
] | 0.6348457 | 1 |
Recursively replace group layers with permitted sublayers and return resulting layer list. | def expand_group_layers(self, requested_layers, restricted_group_layers):
permitted_layers = []
for layer in requested_layers:
if layer in restricted_group_layers.keys():
# expand sublayers
sublayers = restricted_group_layers.get(layer)
permitted_layers += self.expand_group_layers(
sublayers, restricted_group_layers
)
else:
# leaf layer or permitted group layer
permitted_layers.append(layer)
return permitted_layers | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def all_layers(parent=None):\n if parent is None:\n parent = QgsProject.instance().layerTreeRoot()\n result = []\n def do_a_group(grp, level=0):\n for child in grp.children():\n if isinstance(child, QgsLayerTreeGroup):\n do_a_group(child, level=level + 1)\n elif isinstance(child, QgsLayerTreeLayer):\n result.append(child)\n\n do_a_group(parent)\n return result",
"def all_layers_with_name(name, parent=None):\n if parent is None:\n parent = QgsProject.instance().layerTreeRoot()\n result = []\n def do_a_group(grp, level=0):\n for child in grp.children():\n if isinstance(child, QgsLayerTreeGroup):\n do_a_group(child, level=level + 1)\n elif isinstance(child, QgsLayerTreeLayer):\n if child.name() == name:\n result.append(child)\n\n do_a_group(parent)\n return result",
"def _get_leaf_layers(self, match_layer):\n\n if not match_layer.input_layers:\n return [match_layer.layer]\n\n # If 2 different layers point to the same input, or if a layer uses the\n # same input multiple times, the input layer can be repeated. But it\n # preserves a bit of structure.\n\n leaf_layers = []\n for inp in match_layer.input_layers:\n leaf_layers.extend(self._get_leaf_layers(inp))\n\n return leaf_layers",
"def upgrade_sublayer(self, layer_name_pattern: Union[str, List[str]],\n handle_func: Callable[[nn.Layer, str], nn.Layer]) -> Dict[str, nn.Layer]:\n\n if not isinstance(layer_name_pattern, list):\n layer_name_pattern = [layer_name_pattern]\n\n hit_layer_pattern_list = []\n for pattern in layer_name_pattern:\n # parse pattern to find target layer and its parent\n layer_list = parse_pattern_str(pattern=pattern, parent_layer=self)\n if not layer_list:\n continue\n sub_layer_parent = layer_list[-2][\"layer\"] if len(layer_list) > 1 else self\n\n sub_layer = layer_list[-1][\"layer\"]\n sub_layer_name = layer_list[-1][\"name\"]\n sub_layer_index = layer_list[-1][\"index\"]\n\n new_sub_layer = handle_func(sub_layer, pattern)\n\n if sub_layer_index:\n getattr(sub_layer_parent, sub_layer_name)[sub_layer_index] = new_sub_layer\n else:\n setattr(sub_layer_parent, sub_layer_name, new_sub_layer)\n\n hit_layer_pattern_list.append(pattern)\n return hit_layer_pattern_list",
"def parse_layer(layer, elements, parents):\n for e in layer:\n if isinstance(e, rp.Shape):\n elements['Shapes'].append([e, parents])\n elif isinstance(e, rp.Stroke):\n elements['Strokes'].append([e, parents])\n elif isinstance(e, rp.Layer):\n parents_copy = parents[:]\n parents_copy.insert(0, e)\n elements = parse_layer(e, elements, parents_copy)\n return elements",
"def get_layers(layer_data: Dict[str, Any]) -> List[Layer]:\n try:\n l_data = layer_data['kicad_pcb']\n l_data = get_dict_by_key(l_data, 'layers')\n res: List[Layer] = list()\n for layer in l_data['layers']:\n layer_data = list(layer.values())[0]\n new_layer = Layer(name=layer_data[0], layer_type=layer_data[1])\n res.append(new_layer)\n return res\n\n except KeyError:\n print(\"Wrong file structure, unable to get layers\")",
"def all_groups(parent=None):\n if parent is None:\n parent = QgsProject.instance().layerTreeRoot()\n\n def do_a_group(grp, level=0):\n for child in grp.children():\n if isinstance(child, QgsLayerTreeGroup):\n yield child\n do_a_group(child, level=level + 1)\n\n do_a_group(parent)",
"def refreshLayerLists(self):\n self.layers = self.iface.legendInterface().layers()\n self.lineLayerIndexMap = dict()\n self.pointLayerIndexMap = dict()\n self.lineLayerList = [] # holds the filtered layer names\n self.pointLayerList = [] # holds the filtered layer names\n for i, layer in enumerate(self.layers):\n try:\n if layer.geometryType() == 0: # 0: point, 1: line\n self.pointLayerIndexMap[len(self.pointLayerList)] = i # put the index pair in the dictionary\n self.pointLayerList.append(layer.name()) # add the layer name to the list\n elif layer.geometryType() == 1: # 0: point, 1: line\n self.lineLayerIndexMap[len(self.lineLayerList)] = i # put the index pair in the dictionary\n self.lineLayerList.append(layer.name()) # add the layer name to the list\n except AttributeError:\n # if the above checks failed, i.e. because of a raster layer, skip it\n continue",
"def random_layers(self) -> List[List[tuple]]:\n root = [self.items]\n layers = [root]\n\n for i in range(self.depth):\n regions = []\n partitions = []\n for r in layers[i * 2]:\n mid = len(r) // 2\n permutation = self.random_state.permutation(r).tolist()\n p0 = tuple(sorted(permutation[:mid]))\n p1 = tuple(sorted(permutation[mid:]))\n regions.append(p0)\n regions.append(p1)\n partitions.append((p0, p1))\n layers.append(partitions)\n layers.append(regions)\n\n return layers",
"def build_layers(node: md.Document, *, disambiguate_names: bool = True) -> list[dict]:\n layers = []\n names = []\n for i, folder in enumerate(get(node, \"Folder\")):\n name = val(get1(folder, \"name\"))\n geojson = build_feature_collection(folder, name)\n if geojson[\"features\"]:\n layers.append(geojson)\n names.append(name)\n\n if not layers:\n # No folders, so use the root node\n name = val(get1(node, \"name\"))\n geojson = build_feature_collection(node, name)\n if geojson[\"features\"]:\n layers.append(geojson)\n names.append(name)\n\n if disambiguate_names:\n new_names = disambiguate(names)\n new_layers = []\n for i, layer in enumerate(layers):\n layer[\"name\"] = new_names[i]\n new_layers.append(layer)\n layers = new_layers\n\n return layers",
"def process(e):\n result = []\n current = first_unprocessed_expansion(e)\n\n # Handle cases where no processing is required\n if not current:\n return [e]\n\n copies = []\n if isinstance(current, AlternativeSet):\n dictation_children = [] # again, not necessarily only dictation.\n jsgf_only_children = []\n for child in current.children:\n if dictation_in_expansion(child):\n dictation_children.append(child)\n else:\n jsgf_only_children.append(child)\n\n # Create a replacements list, create copies of the expansion tree and\n # replace the copy of the AlternativeSet currently being processed\n if len(jsgf_only_children) == 1:\n replacements = jsgf_only_children\n else:\n replacements = [AlternativeSet(*jsgf_only_children)]\n replacements.extend(dictation_children)\n\n elif isinstance(current, (OptionalGrouping, KleeneStar)):\n # Handle not required - remove from a copy\n copy = deepcopy(current.root_expansion)\n copy_x = find_expansion(copy, current)\n copy_parent = copy_x.parent\n ancestor = copy_parent\n\n # Traverse up the parent tree and remove copy_x or one of its ancestors\n # where there is another child\n while ancestor:\n if ancestor.children > 1:\n ancestor.children.remove(copy_x)\n break\n\n copy_x = ancestor\n ancestor = ancestor.parent\n\n # copy_x or one of its ancestors was removed from the tree correctly\n # If this isn't true, the expansion is an empty tree and shouldn't be\n # added.\n if ancestor:\n copies.append(copy)\n\n # Let replacement loop handle required\n if isinstance(current, OptionalGrouping):\n replacements = [current.child]\n else:\n replacements = [Repeat(current.child)]\n else:\n replacements = []\n\n for replacement in replacements:\n # Find the copy of the current AlternativeSet being processed\n copy = deepcopy(current.root_expansion)\n copy_x = find_expansion(copy, current)\n copy_parent = copy_x.parent\n if copy_parent:\n index = copy_parent.children.index(copy_x)\n copy_parent.children.remove(copy_x)\n copy_parent.children.insert(index, replacement)\n else:\n # copy is the root expansion.\n copy = replacement\n copies.append(copy)\n\n for copy in copies:\n next_unprocessed = first_unprocessed_expansion(copy)\n if not next_unprocessed and copy not in result:\n result.append(copy)\n else:\n # Process the next unprocessed expansion and add the result\n result.extend(process(next_unprocessed))\n\n return result",
"def all_tree_items(parent=None):\n if parent is None:\n parent = QgsProject.instance().layerTreeRoot()\n\n def do_a_group(grp, level=0):\n for child in grp.children():\n if isinstance(child, QgsLayerTreeGroup):\n yield child\n do_a_group(child, level=level + 1)\n elif isinstance(child, QgsLayerTreeLayer):\n yield child\n\n do_a_group(parent)",
"def remove_all_rules ( ec2_conn, grps, deep = False, base_name = None, nat_secgrp = None ) :\n ## Must re-get all the groups, because newly created groups\n ## don't necessarily have all the rules loaded yet.\n secgrps = aws_waits( ec2_conn.get_all_security_groups, [ grp.id for grp in grps ] )\n for grp in secgrps :\n for rule in grp.rules :\n for grant in rule.grants :\n ec2_conn.revoke_security_group(\n group_id = grp.id,\n src_security_group_group_id = grant.group_id,\n from_port = rule.from_port,\n to_port = rule.to_port,\n cidr_ip = grant.cidr_ip,\n ip_protocol = rule.ip_protocol )\n for rule in grp.rules_egress :\n for grant in rule.grants :\n ec2_conn.revoke_security_group_egress(\n group_id = grp.id,\n src_group_id = grant.group_id,\n from_port = rule.from_port,\n to_port = rule.to_port,\n cidr_ip = grant.cidr_ip,\n ip_protocol = rule.ip_protocol )\n\n if deep :\n grp_ids = [ grp.id for grp in secgrps ]\n if not nat_secgrp :\n nat_secgrp = find_group( ec2_conn, base_name, 'NAT' )\n if not nat_secgrp :\n print \"Could not find NAT secgrp for deep security group removal!\"\n sys.exit( 4 )\n\n for rule in nat_secgrp.rules :\n for grant in rule.grants :\n if grant.group_id in grp_ids :\n ec2_conn.revoke_security_group(\n group_id = nat_secgrp.id,\n src_security_group_group_id = grant.group_id,\n from_port = rule.from_port,\n to_port = rule.to_port,\n cidr_ip = grant.cidr_ip,\n ip_protocol = rule.ip_protocol )\n for rule in nat_secgrp.rules_egress:\n for grant in rule.grants :\n if grant.group_id in grp_ids :\n ec2_conn.revoke_security_group_egress(\n group_id = nat_secgrp.id,\n src_group_id = grant.group_id,\n from_port = rule.from_port,\n to_port = rule.to_port,\n cidr_ip = grant.cidr_ip,\n ip_protocol = rule.ip_protocol )",
"def get_layers(self):\n layers = set()\n for element in itertools.chain(self.polygons, self.paths):\n layers.update(element.layers)\n for reference in self.references:\n layers.update(reference.ref_cell.get_layers())\n for label in self.labels:\n layers.add(label.layer)\n return layers",
"def reenable_layers(style, layers):\n layer_select = '|'.join([l.replace('\\\\', '\\\\\\\\').replace('|', '\\\\|')\n .replace('.', '\\\\.').replace('+', '\\\\+')\n .replace('*', '\\\\*') for l in layers])\n style = re.sub(\n r'(<Layer[^>]+name=[\"\\'](?:{})[\"\\'][^>]+)status=[\"\\']off[\"\\']'.format(layer_select),\n r'\\1', style, flags=re.DOTALL)\n style = re.sub(\n r'(<Layer[^>]+)status=[\"\\']off[\"\\']([^>]+name=[\"\\'](?:{})[\"\\'])'.format(layer_select),\n r'\\1\\2', style, flags=re.DOTALL)\n return style",
"def layers(self):\n\n if not self.last_node:\n return []\n return nuke.layers(self.last_node)",
"def convert_to_layers(layer_data: Union[List[str], str])-> List[Layer]:\n if not isinstance(layer_data, List):\n layer_data = [layer_data]\n # change *. to F. and B.\n star_layer = [layer for layer in layer_data if \"*.\" in layer]\n b_stars = [layer.replace(\"*\", \"B\") for layer in star_layer]\n f_stars = [layer.replace(\"*\", \"F\") for layer in star_layer]\n layer_data = [layer for layer in layer_data if \"*.\" not in layer]\n layer_data.extend(b_stars)\n layer_data.extend(f_stars)\n result: List[Layer] = list()\n for layer in layer_data:\n layer = layer.replace('\"', '')\n if layer in layer_list:\n layer_type = \"signal\" if layer in ['F.Cu', 'B.Cu'] else \"user\"\n result.append(Layer(name=layer, layer_type=layer_type))\n else:\n print('Unknown layer %s' % layer)\n return result",
"def change_grp_all_f(src,dst):\n\tcopytree2(src,dst)\n\trm_dirs(src)\n\treturn 0",
"def group_layers(inputs, outputs, biases, connections, nodes):\n layers = []\n s = set(inputs + biases)\n while True:\n # Find candidate nodes c for the next layer. These nodes should connect\n # a node in s to a node not in s.\n c = set(b for (a, b) in connections if (a in s) and (b not in s))\n # Keep only the used nodes whose entire input set is contained in s.\n t = set()\n for n in c:\n if all(a in s for (a, b) in connections if b == n):\n t.add(n)\n\n if not t:\n break\n\n layers.append(t)\n s = s.union(t)\n\n return layers",
"def filter_layers(self, root, name_dict):\n for g in root.xpath(\"//svg:g\", namespaces=inkex.NSS):\n attr = inkex.addNS('label', ns='inkscape')\n if attr not in g.attrib:\n # Not a layer, skip.\n continue\n label = g.attrib[attr]\n if '%' not in label:\n # Nothing to be done, skip.\n continue\n\n # Treat %IF_???% layers\n match = re.match('.*%IF_([^%]*)%', label)\n if match is not None:\n lookup = match.groups()[0]\n try:\n var = name_dict[lookup]\n except KeyError:\n errormsg(_('Column \"' + lookup + '\" not in the csv file'))\n continue\n if var and (var.lower() not in ('0', 'false', 'no')):\n # Set group visibility to true.\n if 'style' in g.attrib:\n del g.attrib['style']\n # Include the group.\n continue\n else:\n # Remove the group's content.\n g.clear()\n\n # Treat %UNLESS_???% layers\n match = re.match('.*%UNLESS_([^%]*)%', label)\n if match is not None:\n lookup = match.groups()[0]\n try:\n var = name_dict[lookup]\n except KeyError:\n errormsg(_('Column \"' + lookup + '\" not in the csv file'))\n continue\n if not(var) or (var.lower() in ('0', 'false', 'no')):\n # Set group visibility to true.\n if 'style' in g.attrib:\n del g.attrib['style']\n # Include the group.\n continue\n else:\n # Remove the group's content.\n g.clear()",
"def get_all_structural(self):\n\n layer_names = rs.LayerNames()\n\n layers = []\n\n for layer_name in layer_names:\n\n layer = GiraffeLayer(layer_name)\n \n if layer.is_structural():\n\n layers.append(layer)\n\n # sort layers to make sure numbered nodes are added first and to maintain regular order\n layers.sort(key = lambda x: x.to_int())\n\n return layers",
"def expand_vertex_layers(x):\n base_src = x['__id']\n base_tgt = x['__id']\n bitfield = shift_and_bitstrings(x['layers'], x['layers'])\n return expand_causal_edges_from_bitfield(bitfield, base_src, base_tgt, max_id)",
"def groups(self, deep=False, exclude_prefix=None):\n\n for group in self._groups:\n if exclude_prefix is None or not group.startswith(exclude_prefix):\n yield group\n if deep:\n yield from (group + \"/\" + subgroup\n for subgroup in self[group].groups(deep))",
"def adjust_params(self, params, permission):\n ogc_service = params.get('SERVICE', '')\n ogc_request = params.get('REQUEST', '').upper()\n\n if ogc_service == 'WMS' and ogc_request == 'GETMAP':\n requested_layers = params.get('LAYERS')\n if requested_layers:\n # replace restricted group layers with permitted sublayers\n requested_layers = requested_layers.split(',')\n restricted_group_layers = permission['restricted_group_layers']\n permitted_layers = self.expand_group_layers(\n requested_layers, restricted_group_layers\n )\n\n params['LAYERS'] = \",\".join(permitted_layers)\n\n elif ogc_service == 'WMS' and ogc_request == 'GETFEATUREINFO':\n requested_layers = params.get('QUERY_LAYERS')\n if requested_layers:\n # replace restricted group layers with permitted sublayers\n requested_layers = requested_layers.split(',')\n restricted_group_layers = permission['restricted_group_layers']\n permitted_layers = self.expand_group_layers(\n reversed(requested_layers), restricted_group_layers\n )\n\n # filter by queryable layers\n queryable_layers = permission['queryable_layers']\n permitted_layers = [\n l for l in permitted_layers if l in queryable_layers\n ]\n\n # reverse layer order\n permitted_layers = reversed(permitted_layers)\n\n params['QUERY_LAYERS'] = \",\".join(permitted_layers)\n\n elif (ogc_service == 'WMS' and\n ogc_request in ['GETLEGENDGRAPHIC', 'GETLEGENDGRAPHICS']):\n requested_layers = params.get('LAYER')\n if requested_layers:\n # replace restricted group layers with permitted sublayers\n requested_layers = requested_layers.split(',')\n restricted_group_layers = permission['restricted_group_layers']\n permitted_layers = self.expand_group_layers(\n requested_layers, restricted_group_layers\n )\n\n params['LAYER'] = \",\".join(permitted_layers)\n\n elif ogc_service == 'WMS' and ogc_request == 'GETPRINT':\n # find map layers param for GetPrint (usually 'map0:LAYERS')\n map_layers_param = None\n for key, value in params.items():\n if key.endswith(\":LAYERS\"):\n map_layers_param = key\n break\n\n requested_layers = params.get(map_layers_param)\n if requested_layers:\n # replace restricted group layers with permitted sublayers\n requested_layers = requested_layers.split(',')\n restricted_group_layers = permission['restricted_group_layers']\n permitted_layers = self.expand_group_layers(\n requested_layers, restricted_group_layers\n )\n\n params[map_layers_param] = \",\".join(permitted_layers)\n\n elif ogc_service == 'WMS' and ogc_request == 'DESCRIBELAYER':\n requested_layers = params.get('LAYERS')\n if requested_layers:\n # replace restricted group layers with permitted sublayers\n requested_layers = requested_layers.split(',')\n restricted_group_layers = permission['restricted_group_layers']\n permitted_layers = self.expand_group_layers(\n reversed(requested_layers), restricted_group_layers\n )\n\n # reverse layer order\n permitted_layers = reversed(permitted_layers)\n\n params['LAYERS'] = \",\".join(permitted_layers)",
"def updateLayer(self):\n if self.num_layers == 0:\n self.box[0].setDisabled(False)\n for i in range(1,4):\n self.box[i].setDisabled(True)\n elif self.num_layers == 1:\n self.box[0].setDisabled(False)\n self.box[1].setDisabled(False)\n for i in range(2,4):\n self.box[i].setDisabled(True)\n elif self.num_layers == 2:\n self.box[0].setDisabled(False)\n self.box[1].setDisabled(False)\n self.box[2].setDisabled(False)\n self.box[3].setDisabled(True)\n else:\n self.box[0].setDisabled(False)\n self.box[1].setDisabled(False)\n self.box[2].setDisabled(False)\n self.box[3].setDisabled(False)",
"def set_layers_affected(self):\n scene = self.set_as_active()\n\n if w_var.cb_only_selected:\n layers_affected = [False, ]*20\n\n for obj in scene.objects:\n if obj.select and obj.type == 'MESH':\n layers_affected = b_tools.manipulate_layerlists('add', layers_affected, obj.layers)\n\n else:\n layers_affected = list(scene.wirebomb.layers_affected)\n\n return layers_affected",
"def layers(self, layers):\n self._layers = layers\n prev = None\n for layer in layers:\n if not layer.inputs and prev is not None:\n layer.inputs = [prev]\n prev = layer",
"def UpdateLayers(self):\n pass",
"def replace_groups(self):\n newstr = []\n for state in self._parsed:\n newstr.append(self._handle_state(state))\n return ''.join(newstr)",
"def work_tree2(obj, **kwargs):\n if 'exclusions' in kwargs:\n exclusions = kwargs['exclusions']\n else:\n exclusions = Exclusions([], [], [])\n #groups_done = {}\n classes = NodeResults(nodetype='classes')\n params = NodeResults(nodetype='params')\n if hasattr(obj, 'hostname') and not hasattr(obj, 'name'):\n obj.name = obj.hostname\n to_index = [(obj, 1)]\n\n # loop opts\n index_pop = to_index.pop\n index_extend = to_index.extend\n egroups, eclasses, eparams = exclusions\n add_classes = classes.add_entries\n add_params = params.add_entries\n\n while to_index:\n (obj, depth) = index_pop()\n #objname = obj.name\n #if objname in groups_done and groups_done[objname] <= depth:\n #continue\n try:\n objclasses = obj.classes.exclude(classname__in=eclasses)\n add_classes(objclasses, \"classname\", \"classparams\", depth)\n objparams = obj.parameters.exclude(paramkey__in=eparams)\n add_params(objparams, \"paramkey\", \"paramvalue\", depth)\n except RuntimeError, e:\n return (\"Fail\", \"Fail\") # or just let it bubble up to the caller\n\n #groups_done[objname] = depth\n depth += 1\n children = [(group, depth) for group in obj.groups.exclude(name__in=egroups)]\n index_extend(children)\n\n return classes.as_dict(), params.as_dict() # or (classes.entries, params.entries)"
] | [
"0.6112601",
"0.58890396",
"0.560602",
"0.5505712",
"0.5438574",
"0.5391276",
"0.53751844",
"0.534607",
"0.5330864",
"0.5309719",
"0.52444714",
"0.52316076",
"0.52214426",
"0.52197057",
"0.51853377",
"0.5167963",
"0.5166585",
"0.51286995",
"0.51079917",
"0.5064433",
"0.5043467",
"0.49633062",
"0.49595672",
"0.49271947",
"0.49264368",
"0.49089283",
"0.49062207",
"0.49033776",
"0.48946887",
"0.48918775"
] | 0.7018871 | 0 |
Forward request to QGIS server and return filtered response. | def forward_request(self, method, hostname, params, permission):
ogc_service = params.get('SERVICE', '')
ogc_request = params.get('REQUEST', '').upper()
stream = True
if ogc_request in [
'GETCAPABILITIES', 'GETPROJECTSETTINGS', 'GETFEATUREINFO',
'DESCRIBEFEATURETYPE'
]:
# do not stream if response is filtered
stream = False
# forward to QGIS server
project_name = permission['qgs_project']
url = urljoin(self.qgis_server_url, project_name)
if method == 'POST':
# log forward URL and params
self.logger.info("Forward POST request to %s" % url)
self.logger.info(" %s" % ("\n ").join(
("%s = %s" % (k, v) for k, v, in params.items()))
)
response = requests.post(url, headers={'host': hostname},
data=params, stream=stream)
else:
# log forward URL and params
self.logger.info("Forward GET request to %s?%s" %
(url, urlencode(params)))
response = requests.get(url, headers={'host': hostname},
params=params, stream=stream)
if response.status_code != requests.codes.ok:
# handle internal server error
self.logger.error("Internal Server Error:\n\n%s" % response.text)
exception = {
'code': "UnknownError",
'message': "The server encountered an internal error or "
"misconfiguration and was unable to complete your "
"request."
}
return Response(
self.service_exception(exception['code'], exception['message']),
content_type='text/xml; charset=utf-8',
status=200
)
# return filtered response
elif ogc_service == 'WMS' and ogc_request in [
'GETCAPABILITIES', 'GETPROJECTSETTINGS'
]:
return self.wms_getcapabilities(response, params, permission)
elif ogc_service == 'WMS' and ogc_request == 'GETFEATUREINFO':
return self.wms_getfeatureinfo(response, params, permission)
# TODO: filter DescribeFeatureInfo
else:
# unfiltered streamed response
return Response(
stream_with_context(response.iter_content(chunk_size=16*1024)),
content_type=response.headers['content-type'],
status=response.status_code
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def proxy_request_to_geoserver(map_protocol: str, query_string: str) -> Response:\n geoserver_url = current_app.config['GEOSERVER_URL']\n geoserver_request_url = f'{geoserver_url}/dmis/{map_protocol}?{query_string}'\n\n raw_response = requests.get(geoserver_request_url)\n\n if raw_response.status_code != 200:\n current_app.logger.error(f'Geoserver returned error response {raw_response.status_code}')\n\n # Flask can't serialize response from requests, so have to generate a Flask response\n flask_response = Response(raw_response, status=raw_response.status_code,\n content_type=raw_response.headers['Content-Type'])\n\n return flask_response",
"def _request(self, *args):\n self._silent_request(*args)\n return self._get_response()",
"def __call__(self, request: HttpRequest) -> HttpResponse:\n ip_address = remote_addr(request)\n request.geo_data = self.geo_data(ip_address)\n response = self.get_response(request)\n if self.add_response_headers(request):\n annotate_response(response, request.geo_data)\n return response",
"def handle_request(self, path=None):\n req = get_request()\n resp = super().handle_request(req)\n return to_response(resp)",
"def exec_worker_map_filter(self, endpoint, args, request):\n if endpoint != 'search':\n raise APIException(\"service of type 'map_filter' does \"\n \"not support /list\")\n\n if is_https(self.url) and request.method == 'GET':\n method = tls1_get\n else:\n method = getattr(requests, request.method.lower())\n try:\n headers = {'Authorization':\n request.headers['Authorization']}\n except KeyError:\n headers = {}\n response = method(self.url,\n params=request.args,\n headers=headers,\n stream=True)\n if response.ok:\n path = '.'.join(filter(None, [self.json_path, 'item']))\n results = ijson.items(FileLikeWrapper(response), path)\n\n return Response(\n result_generator(process_by_client(self, results),\n lambda: {}),\n mimetype='application/json')\n else:\n raise APIException('response from external service: {}'\n .format(response))",
"def BrowserFilter(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def __call__(self, request):\n try:\n if self.filter and self.filter(request):\n request.ipinfo = None\n else:\n request.ipinfo = self.ipinfo.getDetails(\n self.ip_selector.get_ip(request)\n )\n except Exception as exc:\n request.ipinfo = None\n LOGGER.error(traceback.format_exc())\n\n response = self.get_response(request)\n return response",
"def _assemble_and_send_request(self):\r\n # Fire off the query.\r\n response = self.client.service.processShipment(WebAuthenticationDetail=self.WebAuthenticationDetail,\r\n ClientDetail=self.ClientDetail,\r\n TransactionDetail=self.TransactionDetail,\r\n Version=self.VersionId,\r\n RequestedShipment=self.RequestedShipment)\r\n return response",
"def _process_request(self, request, response):\n ...",
"def handle_request(self):\n\t\ttry:\n\t\t\tr,w,e=select.select([self.socket],[],[], 1.0)\n\t\t\tif not r:\n\t\t\t\treturn\n\t\t\trequest, client_address=self.socket.accept()\n\t\texcept:\n\t\t\treturn\t\t\n\t\t\n\t\ttry:\n\t\t\tif self.debug:\n\t\t\t\tprint \"got request\"\n\t\t\tself.process_request(request, client_address)\n\t\texcept:\n\t\t\tself.handle_error(request, client_address)",
"def bind_forward(request):\n zones_list = export_bind_forward()\n return Response(zones_list)",
"def process_request(self, request):\n raise NotImplementedError('process_request not implemented in BaseService')",
"def handle_map_request(map_protocol: str, query_string: str) -> Response:\n if map_protocol.lower() == 'wms':\n return MapService.proxy_request_to_geoserver(map_protocol, query_string)\n elif map_protocol.lower() == 'geojson':\n return MapService.handle_geojson_request(query_string)\n else:\n raise MapServiceClientError(f'Unknown map protocol: {map_protocol}')",
"def run(self, req):\n\n if(self.isROS):\n featureList = []\n self.fov = self.geoPathToGPD(req.fov)\n else:\n self.fov = self.pickleToGPD(req)\n\n\n \n response = []\n \n featuresInView = gpd.sjoin(self.featureDataframe, self.fov, op='within') \n for index, feature in featuresInView.iterrows():\n if(self.isROS):\n response.append(enc_feature_msg(feature[\"name\"], feature[\"longitude\"],feature[\"latitude\"], feature[\"fid\"]))\n else:\n response.append((feature[\"name\"], feature[\"longitude\"],feature[\"latitude\"], feature[\"fid\"]))\n if(self.isROS):\n return enc_query_srvResponse(response)\n return response",
"def handle(self):\n data = self.request.recv(1024)\n self.request.send(data)",
"def render(self, request):\r\n # set up and evaluate a connection to the target server\r\n if self.port == 80:\r\n host = self.host\r\n else:\r\n host = \"%s:%d\" % (self.host, self.port)\r\n request.requestHeaders.addRawHeader('host', host)\r\n request.content.seek(0, 0)\r\n qs = urlparse.urlparse(request.uri)[4]\r\n if qs:\r\n rest = self.path + '?' + qs\r\n else:\r\n rest = self.path\r\n\r\n global_self = self.getGlobalSelf()\r\n\r\n clientFactory = self.proxyClientFactoryClass(\r\n request.method, rest, request.clientproto,\r\n request.getAllHeaders(), request.content.read(), request,\r\n global_self # this is new\r\n )\r\n self.reactor.connectTCP(self.host, self.port, clientFactory)\r\n\r\n return NOT_DONE_YET",
"def __call__(self, request):\n response = self.get_request(request)\n return response",
"def proxy_request(event, *_):\n print(f'EVENT {json.dumps(event)}')\n print(f'BASE_PATH {BASE_PATH!r}')\n\n # Get HTTP request method/path\n method = event.get('httpMethod')\n path = event.get('path').strip('/')\n\n # Get HTTP response\n if method in ['GET', 'HEAD']:\n res = get_response(path)\n else:\n res = reject(403)\n\n # Return proxy response\n status = res['statusCode']\n print(f'RESPONSE [{status}] {json.dumps(res)}')\n return res",
"def _filter_in_request(self):\n pass",
"def RequestViewStream(self, request_iterator, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def _request(self, *args):\n raise NotImplementedError",
"def handle_request(self):\n\t\ttry:\n\t\t\trequest, client_address = self.get_request()\n\t\texcept socket.error:\n\t\t\treturn\n\t\tif self.verify_request(request, client_address):\n\t\t\ttry:\n\t\t\t\tself.process_request(request, client_address)\n\t\t\texcept:\n\t\t\t\tself.handle_error(request, client_address)\n\t\t\t\tself.close_request(request)",
"def process_response(self, request, response):\n return response",
"def process_response(self, request, response):\n return response",
"def BrowserFilterAsync(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')",
"def __call__(self):\n params, method = parse_xmlrpc_request(self.request)\n return xmlrpc_response(getattr(self,method)(*params))",
"def handler(self):\n\t\treturn self.handle_request",
"def _send_request(self):\n route_chosen = self.comboBox_route_list.currentText()\n route_id = route_chosen.split(',')[0] #to get the id of the route\n trip_headsign_chosen = self.comboBox_trip_headsign_list.currentText()\n stop_chosen = self.comboBox_stop_list.currentText()\n self.request(route_id, trip_headsign_chosen, stop_chosen)",
"def export_serveRequest(self,requestType):\n gLogger.info(\"RequestHandler.serveRequest: Attempting to serve request type\", requestType)\n try:\n res = requestDB.serveRequest(requestType)\n return res\n except Exception,x:\n errStr = \"RequestManagerHandler.serveRequest: Exception while serving request.\"\n gLogger.exception(errStr,requestType,lException=x)\n return S_ERROR(errStr)",
"def render_POST(self, request, query=None):\n # make a parser and parse the request\n parser = qp.QueryParser(request)\n if not query: query = request.content.read() \n try: \n # run the query locally\n d = parser.runquery(self.db, query)\n except Exception, e:\n log.err(\"Failing query: \" + str(query))\n log.err()\n setResponseCode(request, e, 400)\n return str(e)\n else:\n # and send the reply\n request.setHeader('Content-type', 'application/json')\n\n if not query.strip().startswith('apply'):\n # apply streams the output out itself\n d.addCallback(lambda reply: (request, reply))\n d.addCallback(self.send_reply)\n d.addErrback(lambda x: self.send_error(request, x))\n return server.NOT_DONE_YET"
] | [
"0.60810876",
"0.59270155",
"0.5860892",
"0.578528",
"0.575682",
"0.5711819",
"0.5620652",
"0.55773866",
"0.5518297",
"0.5466804",
"0.5417686",
"0.5411383",
"0.534138",
"0.53378296",
"0.5283023",
"0.5280902",
"0.52778715",
"0.5232238",
"0.52306527",
"0.52265555",
"0.5209146",
"0.52042294",
"0.51988274",
"0.51988274",
"0.51934093",
"0.5149318",
"0.5128653",
"0.5127765",
"0.51223314",
"0.5106918"
] | 0.70910263 | 0 |
Return WMS GetCapabilities or GetProjectSettings filtered by permissions. | def wms_getcapabilities(self, response, params, permission):
xml = response.text
if response.status_code == requests.codes.ok:
# parse capabilities XML
ElementTree.register_namespace('', 'http://www.opengis.net/wms')
ElementTree.register_namespace('qgs', 'http://www.qgis.org/wms')
ElementTree.register_namespace('sld', 'http://www.opengis.net/sld')
ElementTree.register_namespace(
'xlink', 'http://www.w3.org/1999/xlink'
)
root = ElementTree.fromstring(xml)
# use default namespace for XML search
# namespace dict
ns = {'ns': 'http://www.opengis.net/wms'}
# namespace prefix
np = 'ns:'
if not root.tag.startswith('{http://'):
# do not use namespace
ns = {}
np = ''
root_layer = root.find('%sCapability/%sLayer' % (np, np), ns)
if root_layer is not None:
# remove broken info format 'application/vnd.ogc.gml/3.1.1'
feature_info = root.find('.//%sGetFeatureInfo' % np, ns)
if feature_info is not None:
for format in feature_info.findall('%sFormat' % np, ns):
if format.text == 'application/vnd.ogc.gml/3.1.1':
feature_info.remove(format)
# filter and update layers by permission
permitted_layers = permission['public_layers']
queryable_layers = permission['queryable_layers']
for group in root_layer.findall('.//%sLayer/..' % np, ns):
for layer in group.findall('%sLayer' % np, ns):
layer_name = layer.find('%sName' % np, ns).text
if layer_name not in permitted_layers:
# remove not permitted layer
group.remove(layer)
else:
# update queryable
if layer_name in queryable_layers:
layer.set('queryable', '1')
else:
layer.set('queryable', '0')
# get permitted attributes for layer
permitted_attributes = permission['layers'].get(
layer_name, {}
)
# remove layer displayField if attribute not permitted
# (for QGIS GetProjectSettings)
display_field = layer.get('displayField')
if (display_field and
display_field not in permitted_attributes):
layer.attrib.pop('displayField')
# filter layer attributes by permission
# (for QGIS GetProjectSettings)
attributes = layer.find('%sAttributes' % np, ns)
if attributes is not None:
for attr in attributes.findall(
'%sAttribute' % np, ns
):
if attr.get('name') not in permitted_attributes:
# remove not permitted attribute
attributes.remove(attr)
# update queryable for root layer
if queryable_layers:
root_layer.set('queryable', '1')
else:
root_layer.set('queryable', '0')
# filter LayerDrawingOrder by permission
# (for QGIS GetProjectSettings)
layer_drawing_order = root.find(
'.//%sLayerDrawingOrder' % np, ns
)
if layer_drawing_order is not None:
layers = layer_drawing_order.text.split(',')
# remove not permitted layers
layers = [
l for l in layers if l in permitted_layers
]
layer_drawing_order.text = ','.join(layers)
# filter ComposerTemplates by permission
# (for QGIS GetProjectSettings)
templates = root.find(
'%sCapability/%sComposerTemplates' % (np, np), ns
)
if templates is not None:
permitted_templates = permission.get('print_templates', [])
for template in templates.findall(
'%sComposerTemplate' % np, ns
):
template_name = template.get('name')
if template_name not in permitted_templates:
# remove not permitted print template
templates.remove(template)
if not templates.find('%sComposerTemplate' % np, ns):
# remove ComposerTemplates if empty
root.find('%sCapability' % np, ns).remove(templates)
# write XML to string
xml = ElementTree.tostring(
root, encoding='utf-8', method='xml'
)
return Response(
xml,
content_type=response.headers['content-type'],
status=response.status_code
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def octopus_permissions_get(self, msg, args):\r\n return self.permissions.get_permissions()",
"def get_permissions(self):\n return self.settings[\"permissions\"]",
"def get_permissions():\n return config.get_cfg_storage(ID_PERMISSION)",
"def permissions(self) -> 'outputs.PermissionsResponse':\n return pulumi.get(self, \"permissions\")",
"def getcapabilities(self):\n reader = WFSCapabilitiesReader(self.version, auth=self.auth)\n return openURL(\n reader.capabilities_url(self.url), timeout=self.timeout,\n headers=self.headers, auth=self.auth\n )",
"def permissions(self):\n return [DSSWorkspacePermissionItem(permission) for permission in self.settings['permissions']]",
"def get_capabilities(params,defaults):\n cap = CapabilitiesController (params,defaults)\n return cap.get_capabilities()",
"async def fetch_permissions(self, condensed=False):\n\n logging.debug(\"Getting permissions (%scondensed)\" % (\n \"\" if condensed else \"not \"))\n\n if condensed:\n perms = await self.client.request.get(\n \"/auth/permissions\", params={\"condensed\": True})\n return perms[\"data\"]\n else:\n perms = await self.client.request.get(\"/auth/permissions\")\n return [BasePermission.build_permission(\n self.client, perm, self.loop) for perm in perms[\"data\"]]",
"def get_capabilities(self):\n\n service = self.__get_service()\n capability = self.__get_capability()\n contents = {\"service\" : service, \"capability\" : capability}\n return contents, self.params['format']",
"def get_capabilities(self, method='get'):\n self.client.getcapabilities()\n\n self._has_capabilities = True",
"def __get_capability(self):\n requests = self.__get_capability_request()\n exception = self.__get_capability_exception()\n layers = self.__get_capability_layer()\n \n capability = { \"requests\": requests,\n \"exception\" : exception,\n \"layers\" : layers}\n return capability",
"def permissions(self):\n return self.get_permissions()",
"def control_capabilities(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"control_capabilities\"), kwargs)",
"def get_cli_permissions():\n query = {\"type\": \"op\", \"cmd\": \"<show><cli><permissions></permissions></cli></show>\"}\n\n return __proxy__[\"panos.call\"](query)",
"def get_permissions(self):\n permissions = [IsAdminUser]\n return [permission() for permission in permissions]",
"def permissions(self) -> Optional[pulumi.Input['KeyVaultSpecAccessPoliciesPermissionsArgs']]:\n return pulumi.get(self, \"permissions\")",
"def get_permissions(self):\n if self.action in ['signup', 'login']:\n permissions = [AllowAny]\n return [permission() for permission in permissions]",
"def get_permissions(self):\n if self.action in ['signup', 'login']:\n permissions = [AllowAny]\n elif self.action in ['retrieve']:\n permissions = [IsAuthenticated, IsAccountOwner]\n else:\n permissions = [AllowAny]\n return [p() for p in permissions]",
"def permissions(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['DataSetResourcePermissionArgs']]]]:\n return pulumi.get(self, \"permissions\")",
"def get_capabilities(http_conn):\n parsed, conn = http_conn\n headers = {'Accept-Encoding': 'gzip'}\n conn.request('GET', parsed.path, '', headers)\n resp = conn.getresponse()\n body = resp.read()\n http_log((parsed.geturl(), 'GET',), {'headers': headers}, resp, body)\n if resp.status < 200 or resp.status >= 300:\n raise ClientException.from_response(\n resp, 'Capabilities GET failed', body)\n resp_headers = resp_header_dict(resp)\n return parse_api_response(resp_headers, body)",
"def get_capabilities(self):\n return Capabilities(javabridge.call(self.jobject, \"getCapabilities\", \"()Lweka/core/Capabilities;\"))",
"def permissions(self) -> pulumi.Output[Optional[Sequence['outputs.DataSetResourcePermission']]]:\n return pulumi.get(self, \"permissions\")",
"def permissions(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"permissions\")",
"def permissions(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"permissions\")",
"def capability(self):\n code, data, capabilities = (\n self.__send_command(\"CAPABILITY\", withcontent=True))\n if code == \"OK\":\n return capabilities\n return None",
"def get_permissions(self):\n if self.action in ['list', 'retrieve']:\n permission_classes = [IsAuthenticated]\n else:\n permission_classes = [IsAdminUser]\n return [permission() for permission in permission_classes]",
"def get_capabilities(self, config_section):\n get_opt = self.shishito_support.get_opt\n test_platform = self.shishito_support.test_platform\n if (test_platform == 'web'):\n # Get logging levels from config\n logging_driver = get_opt(config_section, 'logging_driver', default='WARNING').upper()\n logging_browser = get_opt(config_section, 'logging_browser', default='WARNING').upper()\n logging_performance = get_opt(config_section, 'logging_performance', default='WARNING').upper()\n\n capabilities = {\n 'browserName': get_opt(config_section, 'browser').lower(),\n 'version': get_opt(config_section, 'browser_version'),\n 'resolution': get_opt(config_section, 'resolution'),\n 'javascriptEnabled': True,\n 'acceptSslCerts': get_opt('accept_ssl_cert').lower() == 'true',\n 'goog:loggingPrefs': {'driver': logging_driver,\n 'browser': logging_browser,\n 'performance': logging_performance}\n }\n if (test_platform == 'mobile'):\n capabilities = {\n 'browserName': get_opt(config_section, 'browser').lower(),\n 'javascriptEnabled': True,\n 'acceptSslCerts': get_opt('accept_ssl_cert').lower() == 'true',\n }\n\n self.add_cmdline_arguments_to_browser(capabilities, config_section)\n self.add_extensions_to_browser(capabilities, config_section)\n self.add_experimental_option(capabilities, config_section)\n return capabilities",
"async def get_manipulation_permissions(self, requester: Requester,\n model: Model) -> Tuple[\n ManipulationPermissions, Dict[str, Any]]:\n raise NotImplementedError",
"def get_criterions(self, **kwargs):\n return self.get('criterions.json', **kwargs)",
"def get(self):\n try:\n response = requests.get(CONF.api.github_api_capabilities_url)\n LOG.debug(\"Response Status: %s / Used Requests Cache: %s\" %\n (response.status_code,\n getattr(response, 'from_cache', False)))\n if response.status_code == 200:\n regex = re.compile('^[0-9]{4}\\.[0-9]{2}\\.json$')\n capability_files = []\n for rfile in response.json():\n if rfile[\"type\"] == \"file\" and regex.search(rfile[\"name\"]):\n capability_files.append(rfile[\"name\"])\n return capability_files\n else:\n LOG.warning('Github returned non-success HTTP '\n 'code: %s' % response.status_code)\n pecan.abort(response.status_code)\n\n except requests.exceptions.RequestException as e:\n LOG.warning('An error occurred trying to get GitHub '\n 'repository contents: %s' % e)\n pecan.abort(500)"
] | [
"0.63442475",
"0.61894834",
"0.5825412",
"0.5782706",
"0.57521266",
"0.57215184",
"0.566947",
"0.566892",
"0.55879563",
"0.5579568",
"0.5542131",
"0.5524528",
"0.5521492",
"0.5516311",
"0.5491324",
"0.54859173",
"0.5396104",
"0.53960353",
"0.5392035",
"0.53709376",
"0.53560346",
"0.5332197",
"0.53198445",
"0.5306845",
"0.5299511",
"0.5289863",
"0.52807295",
"0.52806973",
"0.5264148",
"0.5257484"
] | 0.62657297 | 1 |
Return WMS GetFeatureInfo filtered by permissions. | def wms_getfeatureinfo(self, response, params, permission):
feature_info = response.text
if response.status_code == requests.codes.ok:
info_format = params.get('INFO_FORMAT', 'text/plain')
if info_format == 'text/plain':
feature_info = self.wms_getfeatureinfo_plain(
feature_info, permission
)
elif info_format == 'text/html':
feature_info = self.wms_getfeatureinfo_html(
feature_info, permission
)
elif info_format == 'text/xml':
feature_info = self.wms_getfeatureinfo_xml(
feature_info, permission
)
elif info_format == 'application/vnd.ogc.gml':
feature_info = self.wms_getfeatureinfo_gml(
feature_info, permission
)
# NOTE: application/vnd.ogc.gml/3.1.1 is broken in QGIS server
return Response(
feature_info,
content_type=response.headers['content-type'],
status=response.status_code
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wms_getfeatureinfo_plain(self, feature_info, permission):\n \"\"\"\n GetFeatureInfo results\n\n Layer 'Grundstuecke'\n Feature 1\n t_id = '1234'\n nbident = 'SO0123456789'\n nummer = '1234'\n ...\n \"\"\"\n if feature_info.startswith('GetFeatureInfo'):\n lines = []\n\n layer_pattern = re.compile(\"^Layer '(.+)'$\")\n attr_pattern = re.compile(\"^(.+) = .+$\")\n permitted_attributes = {}\n\n # filter feature attributes by permission\n for line in feature_info.splitlines():\n m = attr_pattern.match(line)\n if m is not None:\n # attribute line\n # check if layer attribute is permitted\n attr = m.group(1)\n if attr not in permitted_attributes:\n # skip not permitted attribute\n continue\n else:\n m = layer_pattern.match(line)\n if m is not None:\n # layer line\n # get permitted attributes for layer\n current_layer = m.group(1)\n permitted_attributes = self.permitted_info_attributes(\n current_layer, permission\n )\n\n # keep line\n lines.append(line)\n\n # join filtered lines\n feature_info = '\\n'.join(lines)\n\n return feature_info",
"def wms_getfeatureinfo_xml(self, feature_info, permission):\n ElementTree.register_namespace('', 'http://www.opengis.net/ogc')\n root = ElementTree.fromstring(feature_info)\n\n for layer in root.findall('./Layer'):\n # get permitted attributes for layer\n permitted_attributes = self.permitted_info_attributes(\n layer.get('name'), permission\n )\n\n for feature in layer.findall('Feature'):\n for attr in feature.findall('Attribute'):\n if attr.get('name') not in permitted_attributes:\n # remove not permitted attribute\n feature.remove(attr)\n\n # write XML to string\n return ElementTree.tostring(root, encoding='utf-8', method='xml')",
"def wms_getfeatureinfo_gml(self, feature_info, permission):\n ElementTree.register_namespace('gml', 'http://www.opengis.net/gml')\n ElementTree.register_namespace('qgs', 'http://qgis.org/gml')\n ElementTree.register_namespace('wfs', 'http://www.opengis.net/wfs')\n root = ElementTree.fromstring(feature_info)\n\n # namespace dict\n ns = {\n 'gml': 'http://www.opengis.net/gml',\n 'qgs': 'http://qgis.org/gml'\n }\n\n qgs_attr_pattern = re.compile(\"^{%s}(.+)\" % ns['qgs'])\n\n for feature in root.findall('./gml:featureMember', ns):\n for layer in feature:\n # get layer name from fid, as spaces are removed in tag name\n layer_name = '.'.join(layer.get('fid', '').split('.')[:-1])\n\n # get permitted attributes for layer\n permitted_attributes = self.permitted_info_attributes(\n layer_name, permission\n )\n\n for attr in layer.findall('*'):\n m = qgs_attr_pattern.match(attr.tag)\n if m is not None:\n # attribute tag\n attr_name = m.group(1)\n if attr_name not in permitted_attributes:\n # remove not permitted attribute\n layer.remove(attr)\n\n # write XML to string\n return ElementTree.tostring(\n root, encoding='utf-8', method='xml', short_empty_elements=False\n )",
"def permitted_info_attributes(self, info_layer_name, permission):\n # get WMS layer name for info result layer\n wms_layer_name = permission.get('feature_info_aliases', {}) \\\n .get(info_layer_name, info_layer_name)\n\n # return permitted attributes for layer\n return permission['layers'].get(wms_layer_name, {})",
"def wms_getfeatureinfo_html(self, feature_info, permission):\n # NOTE: info content is not valid XML, parse as text\n if feature_info.startswith('<HEAD>'):\n lines = []\n\n layer_pattern = re.compile(\n \"^<TR>.+>Layer<\\/TH><TD>(.+)<\\/TD><\\/TR>$\"\n )\n table_pattern = re.compile(\"^.*<TABLE\")\n attr_pattern = re.compile(\"^<TR><TH>(.+)<\\/TH><TD>.+</TD><\\/TR>$\")\n next_tr_is_feature = False\n permitted_attributes = {}\n\n for line in feature_info.splitlines():\n m = attr_pattern.match(line)\n if m is not None:\n # attribute line\n # check if layer attribute is permitted\n attr = m.group(1)\n if next_tr_is_feature:\n # keep 'Feature', filter subsequent attributes\n next_tr_is_feature = False\n elif attr not in permitted_attributes:\n # skip not permitted attribute\n continue\n elif table_pattern.match(line):\n # mark next tr as 'Feature'\n next_tr_is_feature = True\n else:\n m = layer_pattern.match(line)\n if m is not None:\n # layer line\n # get permitted attributes for layer\n current_layer = m.group(1)\n permitted_attributes = self.permitted_info_attributes(\n current_layer, permission\n )\n\n # keep line\n lines.append(line)\n\n # join filtered lines\n feature_info = '\\n'.join(lines)\n\n return feature_info",
"def get_features(self, request, **kwargs):\n raise NotImplementedError()",
"def wms_getcapabilities(self, response, params, permission):\n xml = response.text\n\n if response.status_code == requests.codes.ok:\n # parse capabilities XML\n ElementTree.register_namespace('', 'http://www.opengis.net/wms')\n ElementTree.register_namespace('qgs', 'http://www.qgis.org/wms')\n ElementTree.register_namespace('sld', 'http://www.opengis.net/sld')\n ElementTree.register_namespace(\n 'xlink', 'http://www.w3.org/1999/xlink'\n )\n root = ElementTree.fromstring(xml)\n\n # use default namespace for XML search\n # namespace dict\n ns = {'ns': 'http://www.opengis.net/wms'}\n # namespace prefix\n np = 'ns:'\n if not root.tag.startswith('{http://'):\n # do not use namespace\n ns = {}\n np = ''\n\n root_layer = root.find('%sCapability/%sLayer' % (np, np), ns)\n if root_layer is not None:\n # remove broken info format 'application/vnd.ogc.gml/3.1.1'\n feature_info = root.find('.//%sGetFeatureInfo' % np, ns)\n if feature_info is not None:\n for format in feature_info.findall('%sFormat' % np, ns):\n if format.text == 'application/vnd.ogc.gml/3.1.1':\n feature_info.remove(format)\n\n # filter and update layers by permission\n permitted_layers = permission['public_layers']\n queryable_layers = permission['queryable_layers']\n for group in root_layer.findall('.//%sLayer/..' % np, ns):\n for layer in group.findall('%sLayer' % np, ns):\n layer_name = layer.find('%sName' % np, ns).text\n if layer_name not in permitted_layers:\n # remove not permitted layer\n group.remove(layer)\n else:\n # update queryable\n if layer_name in queryable_layers:\n layer.set('queryable', '1')\n else:\n layer.set('queryable', '0')\n\n # get permitted attributes for layer\n permitted_attributes = permission['layers'].get(\n layer_name, {}\n )\n\n # remove layer displayField if attribute not permitted\n # (for QGIS GetProjectSettings)\n display_field = layer.get('displayField')\n if (display_field and\n display_field not in permitted_attributes):\n layer.attrib.pop('displayField')\n\n # filter layer attributes by permission\n # (for QGIS GetProjectSettings)\n attributes = layer.find('%sAttributes' % np, ns)\n if attributes is not None:\n for attr in attributes.findall(\n '%sAttribute' % np, ns\n ):\n if attr.get('name') not in permitted_attributes:\n # remove not permitted attribute\n attributes.remove(attr)\n\n # update queryable for root layer\n if queryable_layers:\n root_layer.set('queryable', '1')\n else:\n root_layer.set('queryable', '0')\n\n # filter LayerDrawingOrder by permission\n # (for QGIS GetProjectSettings)\n layer_drawing_order = root.find(\n './/%sLayerDrawingOrder' % np, ns\n )\n if layer_drawing_order is not None:\n layers = layer_drawing_order.text.split(',')\n # remove not permitted layers\n layers = [\n l for l in layers if l in permitted_layers\n ]\n layer_drawing_order.text = ','.join(layers)\n\n # filter ComposerTemplates by permission\n # (for QGIS GetProjectSettings)\n templates = root.find(\n '%sCapability/%sComposerTemplates' % (np, np), ns\n )\n if templates is not None:\n permitted_templates = permission.get('print_templates', [])\n for template in templates.findall(\n '%sComposerTemplate' % np, ns\n ):\n template_name = template.get('name')\n if template_name not in permitted_templates:\n # remove not permitted print template\n templates.remove(template)\n\n if not templates.find('%sComposerTemplate' % np, ns):\n # remove ComposerTemplates if empty\n root.find('%sCapability' % np, ns).remove(templates)\n\n # write XML to string\n xml = ElementTree.tostring(\n root, encoding='utf-8', method='xml'\n )\n\n return Response(\n xml,\n content_type=response.headers['content-type'],\n status=response.status_code\n )",
"def octopus_permissions_get(self, msg, args):\r\n return self.permissions.get_permissions()",
"def features(self) -> Optional[pulumi.Input['DevToolPortalFeatureSettingsArgs']]:\n return pulumi.get(self, \"features\")",
"def _getInfo(self):\n # args for Vimba call\n featureInfo = structs.VimbaFeatureInfo()\n\n # Vimba DLL will return an error code\n errorCode = VimbaDLL.featureInfoQuery(self._handle,\n self._name,\n byref(featureInfo),\n sizeof(featureInfo))\n if errorCode != 0:\n raise VimbaException(errorCode)\n\n return featureInfo",
"def readFeatures(self):\n\t\treturn self._fileSystem.readFeatures()",
"def compute_filtered_features(self, features):\n return [\n feature\n for feature in features\n if \"\".join(feature.qualifiers.get(\"is_edit\", \"false\")) != \"true\"\n ]",
"def get_feature_permission(request, feature, operation=None):\n feature_info = FEATURE_MAP.get(feature)\n if not feature_info:\n raise ValueError(\"The requested feature '%(feature)s' is unknown. \"\n \"Please make sure to specify a feature defined \"\n \"in FEATURE_MAP.\")\n\n # Check dashboard settings\n feature_config = feature_info.get('config')\n if feature_config:\n if not setting_utils.get_dict_config('OPENSTACK_NEUTRON_NETWORK',\n feature_config['name']):\n return False\n\n # Check policy\n feature_policies = feature_info.get('policies')\n if feature_policies:\n policy_name = feature_policies.get(operation)\n if not policy_name:\n raise ValueError(\"The 'operation' parameter for \"\n \"get_feature_permission '%(feature)s' \"\n \"is invalid. It should be one of %(allowed)s\"\n % {'feature': feature,\n 'allowed': ' '.join(feature_policies.keys())})\n role = (('network', policy_name),)\n if not policy.check(role, request):\n return False\n\n # Check if a required extension is enabled\n feature_extension = feature_info.get('extension')\n if feature_extension:\n try:\n return is_extension_supported(request, feature_extension)\n except Exception:\n LOG.info(\"Failed to check Neutron '%s' extension is not supported\",\n feature_extension)\n return False\n\n # If all checks are passed, now a given feature is allowed.\n return True",
"def supported_features(self):\n return self._support_flags",
"def supported_features(self):\n return self._support_flags",
"def supported_features(self):\n return self._support_flags",
"def supported_features(self):\n return self._support_flags",
"def get_features(self):\n return self._features",
"def specialFeatures(self):\r\n return self._specialFeatures",
"def special_features(self):\r\n return self._special_features",
"def getFeatures(self, state, action):\n util.raiseNotDefined()",
"def getFeatures(self, state, action):\n util.raiseNotDefined()",
"def getFeatures(self, state, action):\n util.raiseNotDefined()",
"def getFeatures(self, state, action):\n util.raiseNotDefined()",
"def get_who_features(self):\n return self.who_made_features",
"def get_feature_flags(self, account, signing_account=None):\n account = Account(account, hive_instance=self.hive)\n feature_flags = self._conveyor_method(account, signing_account,\n \"conveyor.get_feature_flags\",\n [account['name']])\n if \"result\" in feature_flags:\n return feature_flags[\"result\"]\n else:\n return feature_flags",
"def _get_features(self, session):\n feature_utils.qsr_feature_extractor( session, get_location_objects = feature_utils.get_location_objects_most_active )\n feature_utils.standardize_simple(session, self.config)\n\n # feature_utils.marker_feature_extractor( session, get_location_objects = feature_utils.get_location_objects_most_active )\n\n return session[SESSION_FEAT]",
"def findFeatures(self):\n\t\tpass",
"def supported_features(self):\n return SUPPORT_FLAGS",
"def supported_features(self):\n return SUPPORT_FLAGS"
] | [
"0.67674375",
"0.62862855",
"0.62192285",
"0.6204722",
"0.61557966",
"0.60812426",
"0.60116345",
"0.5949941",
"0.5930301",
"0.5890336",
"0.58115965",
"0.572291",
"0.5692847",
"0.5690536",
"0.5690536",
"0.5690536",
"0.5690536",
"0.56159514",
"0.5606485",
"0.55573076",
"0.55388355",
"0.55388355",
"0.55388355",
"0.55278903",
"0.5519559",
"0.5501582",
"0.548649",
"0.5467781",
"0.5467216",
"0.5467216"
] | 0.65387255 | 1 |
Parse feature info text and filter feature attributes by permission. | def wms_getfeatureinfo_plain(self, feature_info, permission):
"""
GetFeatureInfo results
Layer 'Grundstuecke'
Feature 1
t_id = '1234'
nbident = 'SO0123456789'
nummer = '1234'
...
"""
if feature_info.startswith('GetFeatureInfo'):
lines = []
layer_pattern = re.compile("^Layer '(.+)'$")
attr_pattern = re.compile("^(.+) = .+$")
permitted_attributes = {}
# filter feature attributes by permission
for line in feature_info.splitlines():
m = attr_pattern.match(line)
if m is not None:
# attribute line
# check if layer attribute is permitted
attr = m.group(1)
if attr not in permitted_attributes:
# skip not permitted attribute
continue
else:
m = layer_pattern.match(line)
if m is not None:
# layer line
# get permitted attributes for layer
current_layer = m.group(1)
permitted_attributes = self.permitted_info_attributes(
current_layer, permission
)
# keep line
lines.append(line)
# join filtered lines
feature_info = '\n'.join(lines)
return feature_info | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wms_getfeatureinfo_html(self, feature_info, permission):\n # NOTE: info content is not valid XML, parse as text\n if feature_info.startswith('<HEAD>'):\n lines = []\n\n layer_pattern = re.compile(\n \"^<TR>.+>Layer<\\/TH><TD>(.+)<\\/TD><\\/TR>$\"\n )\n table_pattern = re.compile(\"^.*<TABLE\")\n attr_pattern = re.compile(\"^<TR><TH>(.+)<\\/TH><TD>.+</TD><\\/TR>$\")\n next_tr_is_feature = False\n permitted_attributes = {}\n\n for line in feature_info.splitlines():\n m = attr_pattern.match(line)\n if m is not None:\n # attribute line\n # check if layer attribute is permitted\n attr = m.group(1)\n if next_tr_is_feature:\n # keep 'Feature', filter subsequent attributes\n next_tr_is_feature = False\n elif attr not in permitted_attributes:\n # skip not permitted attribute\n continue\n elif table_pattern.match(line):\n # mark next tr as 'Feature'\n next_tr_is_feature = True\n else:\n m = layer_pattern.match(line)\n if m is not None:\n # layer line\n # get permitted attributes for layer\n current_layer = m.group(1)\n permitted_attributes = self.permitted_info_attributes(\n current_layer, permission\n )\n\n # keep line\n lines.append(line)\n\n # join filtered lines\n feature_info = '\\n'.join(lines)\n\n return feature_info",
"def parse_feature(self, feature_key, lines):\n ...",
"def extract_features(tlc):\n text = clean_text(tlc['body'])\n fields = dict()\n # add features here #\n fields['Top_comment_word_count'] = len(text.split(' '))\n fields['Top_comment_text'] = text\n\n # Extract time-based features\n def get_day_of_week(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1\n\n def get_day_of_month(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day\n\n def get_time_of_day(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour\n time_local = time.localtime(tlc['created_utc'])\n time_local = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n fields['Top_comment_day'] = get_day_of_month(time_local)\n fields['Top_comment_day_of_week'] = get_day_of_week(time_local)\n fields['Top_comment_hour'] = get_time_of_day(time_local)\n\n # Extract gender value\n gp = GenderPerformr()\n probs, _ = gp.predict(tlc['author'])\n # Rescale it from [0,1] to [-1,1]\n fields['Top_comment_author_gender_value'] = 2 * probs - 1\n\n # Extract percentage of mispellings\n check = SpellChecker(\"en_US\")\n tokenizer = get_tokenizer(\"en_US\")\n # Prevent the denominator from 0\n def weird_division(n, d):\n return n / d if d else 0\n\n def get_mispellings_percentage(text):\n mispelling_count = 0\n total_count = 0\n if text == 'nan':\n return total_count\n else:\n check.set_text(text)\n for err in check:\n mispelling_count = mispelling_count + 1\n for w in tokenizer(text):\n total_count = total_count + 1\n value = weird_division(mispelling_count, total_count)\n return value\n fields['Top_comment_mispellings'] = get_mispellings_percentage(text)\n\n # Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]\n ar = Agreementr()\n pr = Politenessr()\n sr = Supportr()\n fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5\n fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5\n fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5\n\n # Get toxicity scores\n KEY = \"yourkey.txt\" # os.getenv(\"GOOGLE_API_KEY\")\n service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)\n\n def get_results(request_id, response, exception):\n toxicity_scores.append((request_id, response))\n\n toxicity_scores = []\n count = 0\n batch = service.new_batch_http_request(callback=get_results)\n analyze_request = {\n 'comment': {'text': text},\n \"requestedAttributes\": {\n \"TOXICITY\": {},\n \"SEVERE_TOXICITY\": {},\n \"ATTACK_ON_COMMENTER\": {}\n }\n }\n batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))\n batch.execute()\n toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']\n attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']\n if toxic_score > 0.5:\n fields['Top_comment_untuned_toxicity'] = 1\n else:\n fields['Top_comment_untuned_toxicity'] = 0\n if toxic_score > 0.8 and attack_score > 0.5:\n fields['Top_comment_tuned_toxicity'] = 1\n else:\n fields['Top_comment_tuned_toxicity'] = 0\n # end of feature extractions #\n return fields",
"def _features_of(entry: _LexiconEntry) -> str:\n return entry[\"features\"]",
"def _get_apt_features(self, jdict):\n\n try:\n fdict_list = jdict['props']['homeDetails']['features']['attributes']\n features = []\n for fdict in fdict_list:\n # find the extra features \n try:\n value = fdict['formattedValue']\n try:\n key = fdict['formattedName']\n features.append(f'{key}:{value}')\n except:\n features.append(value)\n except:\n next\n # stick all the features together, seperated by |\n return features\n except:\n return None",
"def extract_other_feature_args(self, line):\n result = {'feature_type': line[2], 'indices': [int(line[3]), int(line[4])]}\n attribs = self.parse_attributes(line[8])\n result.update(attribs)\n return result",
"def extract_features_only(self, text):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.sentences = sentences\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n featList = [(feat.getType(), feat.getStartPos(), feat.getEndPos(), feat.getString()) for feat in featObjList]\n return featList",
"def _parse_features(self):\n for root in self.roots:\n for feature in root.iter('feature'):\n api = feature.attrib.get('api', '')\n feature_name = feature.attrib.get('name', '')\n feature_number = int(float(feature.attrib.get('number', '')) * 10.0)\n\n # filter by api\n if api != 'gl':\n continue\n\n for require in feature.iter('require'):\n require_profile = require.attrib.get('profile', '')\n if require_profile and require_profile != 'core':\n # filter by profile\n continue\n\n for enum in require.iter('enum'):\n enum_name = enum.attrib.get('name', '')\n self.enum_list.append(enum_name)\n self.enum_required_by_feature[enum_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': require_profile\n })\n for command in require.iter('command'):\n command_name = command.attrib['name']\n self.command_list.append(command_name)\n self.command_required_by_feature[command_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': require_profile\n })\n\n for remove in feature.iter('remove'):\n remove_profile = remove.attrib.get('profile', '')\n if require_profile and require_profile != 'core':\n # filter by profile\n continue\n\n for enum in remove.iter('enum'):\n enum_name = enum.attrib.get('name', '')\n self.enum_removed_by_feature[enum_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': remove_profile\n })\n for command in remove.iter('command'):\n command_name = command.attrib['name']\n self.command_removed_by_feature[command_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': remove_profile\n })",
"def _parseFeature(self, name, value=None):\n supported = self._parse([(name, value)])\n return supported.getFeature(name)",
"def feature_extraction(_data):\n # Find the digits in the given string Example - data='18-20' digits = '1820'\n digits = str(''.join(c for c in _data if c.isdigit()))\n # calculate the length of the string\n len_digits = len(digits)\n # splitting digits in to values example - digits = '1820' ages = [18, 20]\n ages = [int(digits[i:i + 2]) for i in range(0, len_digits, 2)]\n # checking for special character in the given data\n special_character = '.+-<>?'\n spl_char = ''.join([c for c in list(special_character) if c in _data])\n # handling decimal age data\n if len_digits == 3:\n spl_char = '.'\n age = \"\".join([str(ages[0]), '.', str(ages[1])])\n # normalizing\n age = int(float(age) - 0.5)\n ages = [age]\n # Finding the maximum, minimum, average age values\n max_age = 0\n min_age = 0\n mean_age = 0\n if len(ages):\n max_age = max(ages)\n min_age = min(ages)\n if len(ages) == 2:\n mean_age = int((max_age + min_age) / 2)\n else:\n mean_age = max_age\n # specially added for 18 years cases\n only_18 = 0\n is_y = 0\n if ages == [18]:\n only_18 = 1\n if 'y' in _data or 'Y' in _data:\n is_y = 1\n under_18 = 0\n if 1 < max_age < 18:\n under_18 = 1\n above_65 = 0\n if mean_age >= 65:\n above_65 = 1\n # verifying whether digit is found in the given string or not.\n # Example - data='18-20' digits_found=True data='????' digits_found=False\n digits_found = 1\n if len_digits == 1:\n digits_found = 1\n max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = 0, 0, 0, 0, 0, 0, 0\n elif len_digits == 0:\n digits_found, max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = -1, -1, -1, -1, -1, -1, -1, -1\n \n feature = {\n 'ages': tuple(ages),\n 'len(ages)': len(ages),\n 'spl_chr': spl_char,\n 'is_digit': digits_found,\n 'max_age': max_age,\n 'mean_age': mean_age,\n 'only_18': only_18,\n 'is_y': is_y,\n 'above_65': above_65,\n 'under_18': under_18\n }\n\n return feature",
"def compute_filtered_features(self, features):\n return [\n feature\n for feature in features\n if \"\".join(feature.qualifiers.get(\"is_edit\", \"false\")) != \"true\"\n ]",
"def parse_features(self, skip=...):\n ...",
"def parse_features(self, skip=...):\n ...",
"def apply_filter(input_file, output_file, features):\n lines = input_file.readlines()\n lines = list(map(clean, lines))\n\n for i in range(0, len(lines)):\n line = lines[i]\n feat = extract(line[\"features\"], features)\n output_line = line[\"rank\"] + \" \" + line[\"qid\"]\n for key in features:\n output_line += \" \" + str(key) + \":\" + str(feat[key])\n output_line += \" #\" + line[\"comment\"]\n output_file.write(output_line)",
"def permitted_info_attributes(self, info_layer_name, permission):\n # get WMS layer name for info result layer\n wms_layer_name = permission.get('feature_info_aliases', {}) \\\n .get(info_layer_name, info_layer_name)\n\n # return permitted attributes for layer\n return permission['layers'].get(wms_layer_name, {})",
"def parse(value: str):\n return [member for member in FilterMode if member.name == value][0]",
"def wms_getfeatureinfo_gml(self, feature_info, permission):\n ElementTree.register_namespace('gml', 'http://www.opengis.net/gml')\n ElementTree.register_namespace('qgs', 'http://qgis.org/gml')\n ElementTree.register_namespace('wfs', 'http://www.opengis.net/wfs')\n root = ElementTree.fromstring(feature_info)\n\n # namespace dict\n ns = {\n 'gml': 'http://www.opengis.net/gml',\n 'qgs': 'http://qgis.org/gml'\n }\n\n qgs_attr_pattern = re.compile(\"^{%s}(.+)\" % ns['qgs'])\n\n for feature in root.findall('./gml:featureMember', ns):\n for layer in feature:\n # get layer name from fid, as spaces are removed in tag name\n layer_name = '.'.join(layer.get('fid', '').split('.')[:-1])\n\n # get permitted attributes for layer\n permitted_attributes = self.permitted_info_attributes(\n layer_name, permission\n )\n\n for attr in layer.findall('*'):\n m = qgs_attr_pattern.match(attr.tag)\n if m is not None:\n # attribute tag\n attr_name = m.group(1)\n if attr_name not in permitted_attributes:\n # remove not permitted attribute\n layer.remove(attr)\n\n # write XML to string\n return ElementTree.tostring(\n root, encoding='utf-8', method='xml', short_empty_elements=False\n )",
"def extract_feats(word, nlp):\n feat_dict = {}\n feat_string = ''\n doc = nlp(word).to_dict()[0][0]\n if 'feats' in doc:\n for pair in doc['feats'].split('|'):\n feat, val = pair.split('=')\n feat_dict[feat] = val\n feat_string += feat + ': ' + val + ', '\n if feat_string:\n feat_string = ' (' + feat_string[:-2] + ')'\n return feat_dict, feat_string",
"def extract_critic_input(self, data):\n return data[1]",
"def wms_getfeatureinfo(self, response, params, permission):\n feature_info = response.text\n\n if response.status_code == requests.codes.ok:\n info_format = params.get('INFO_FORMAT', 'text/plain')\n if info_format == 'text/plain':\n feature_info = self.wms_getfeatureinfo_plain(\n feature_info, permission\n )\n elif info_format == 'text/html':\n feature_info = self.wms_getfeatureinfo_html(\n feature_info, permission\n )\n elif info_format == 'text/xml':\n feature_info = self.wms_getfeatureinfo_xml(\n feature_info, permission\n )\n elif info_format == 'application/vnd.ogc.gml':\n feature_info = self.wms_getfeatureinfo_gml(\n feature_info, permission\n )\n\n # NOTE: application/vnd.ogc.gml/3.1.1 is broken in QGIS server\n\n return Response(\n feature_info,\n content_type=response.headers['content-type'],\n status=response.status_code\n )",
"def _entry_features_annotation_is_valid(entry: _LexiconEntry) -> None:\n features = _features_of(entry)\n\n if not (features == \"~\" or _FEATURES_REGEX.fullmatch(features)):\n raise InvalidLexiconEntryError(\n \"Entry features annotation is invalid. Features need to be annotated\"\n \" as '+[Category_1=Value_x]...+[Category_n=Value_y].\")",
"def format_arg_features(features):\n\n features = features.replace(\" \", \"\")\n features = features.replace(\"[\", \"\")\n features = features.replace(\"]\", \"\")\n\n # Remove quotation and double quotation marks\n features = features.replace(\"'\", \"\")\n features = features.replace('\"', \"\")\n\n # Split by comma\n list_of_features = features.rsplit(\",\")\n\n return list_of_features",
"def read_feature_dict(path):\n feature_dict = []\n with open(path, 'r', encoding='utf-8') as dictfile:\n for line in dictfile:\n if line.lstrip(' \\t').startswith('#'):\n # This line is a comment line, ignore it\n continue\n else:\n # This line contains one or more tokens, split them up and wrap them in the format for VisaS POS files.\n tokens = line.rstrip(' \\t\\n').rstrip(' \\t').split()\n dict_tokens = \"\"\n for token in tokens:\n quantifier = \"\"\n if re.match(\"\\(.+\\)([?*+])\",token):\n quantifier = re.match(\"\\(.+\\)([?*+])\",token).group(1)\n token = token.lstrip('(').rstrip(')?*+')\n if '_' in token:\n if token.startswith('_'):\n # token starts with '_' and is a POS tag\n if quantifier:\n dict_tokens = dict_tokens + \"(?:(\\d+)\\.(\\d+):\\S+?_\" + token.lstrip('_').replace(\"(\",\"(?:\") + \" )\" + quantifier\n else:\n dict_tokens = dict_tokens + \"(\\d+)\\.(\\d+):\\S+?_\" + token.lstrip('_').replace(\"(\",\"(?:\") + \" \"\n else:\n try:\n # token is a lemma with POS tag attached, split the lemma and pos tag\n pos_token = token.split('_')\n if quantifier:\n dict_tokens = dict_tokens + \"(?:(\\d+)\\.(\\d+):\" + pos_token[0].replace(\"(\",\"(?:\") + \"_\" + pos_token[1].replace(\"(\",\"(?:\") + \" )\" + quantifier\n else:\n dict_tokens = dict_tokens + \"(\\d+)\\.(\\d+):\" + pos_token[0].replace(\"(\",\"(?:\") + \"_\" + pos_token[1].replace(\"(\",\"(?:\") + \" \"\n\n except IndexError:\n print(\"Warning! Invalid token found in line '\" + line + \"'\")\n elif token == '...':\n # ... is converted to one or more arbitrary tokens\n dict_tokens = dict_tokens + \"(?:(\\d+)\\.(\\d+):\\S+_\\S+? )+\"\n else:\n # token is a lemma without POS tag\n if quantifier:\n dict_tokens = dict_tokens + \"(?:(\\d+)\\.(\\d+):\" + token.replace(\"(\", \"(?:\") + \"_\\S+? )\" + quantifier\n else:\n dict_tokens = dict_tokens + \"(\\d+)\\.(\\d+):\" + token.replace(\"(\", \"(?:\") + \"_\\S+? \"\n if dict_tokens:\n feature_dict.append(dict_tokens)\n if len(feature_dict) is 0:\n print(\"Warning! No valid entries found in dictionary \" + path)\n return None\n else:\n return feature_dict",
"def get_features(feature_list, these_feature):\n features = {}\n def feat_filter(feature, this):\n try:\n mapper = lambda x, feat: filter(lambda y: feat in y, x.split(\" \"))[0]\n val = mapper(this, feature)\n if '+' in val:\n return TRUE\n return FALSE\n except:\n return UNDEF\n for feat in feature_list:\n features[feat] = feat_filter(feat, these_feature)\n return features",
"def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list",
"def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list",
"def parse_cds_features(self, handle, alphabet=..., tags2id=...):\n ...",
"def _parse_features(cls, node: OMNode) -> Dict[str, Dict[str, bool]]:\n features = {}\n for sectname in node:\n section = node[sectname]\n if not isinstance(section, dict) or '_types' not in section:\n continue\n features[sectname] = {}\n for opt in section:\n if not opt.startswith('has'):\n continue\n value = section[opt]\n if not isinstance(value, bool):\n continue\n option = opt[3:]\n features[sectname][option] = value\n return features",
"def _filter_features(\n record_batch: pa.RecordBatch,\n feature_allowlist: List[types.FeatureName]) -> pa.RecordBatch:\n columns_to_select = []\n column_names_to_select = []\n for feature_name in feature_allowlist:\n col = arrow_util.get_column(record_batch, feature_name, missing_ok=True)\n if col is None:\n continue\n columns_to_select.append(col)\n column_names_to_select.append(feature_name)\n return pa.RecordBatch.from_arrays(columns_to_select, column_names_to_select)",
"def get_text_features(text, word_features):\n words = word_tokenize(text)\n features = {}\n for w in word_features:\n features[w] = (w in words)\n\n return features"
] | [
"0.59919715",
"0.57230556",
"0.5480841",
"0.53591084",
"0.53377175",
"0.5277078",
"0.5272189",
"0.526255",
"0.5243017",
"0.52359664",
"0.5134618",
"0.5117117",
"0.5117117",
"0.51116365",
"0.5048336",
"0.5006495",
"0.49421442",
"0.4911873",
"0.48802274",
"0.48569468",
"0.48559645",
"0.48129547",
"0.48106182",
"0.48091385",
"0.4807208",
"0.4807208",
"0.4802508",
"0.4799106",
"0.47790375",
"0.47591156"
] | 0.6658838 | 0 |
Parse feature info HTML and filter feature attributes by permission. | def wms_getfeatureinfo_html(self, feature_info, permission):
# NOTE: info content is not valid XML, parse as text
if feature_info.startswith('<HEAD>'):
lines = []
layer_pattern = re.compile(
"^<TR>.+>Layer<\/TH><TD>(.+)<\/TD><\/TR>$"
)
table_pattern = re.compile("^.*<TABLE")
attr_pattern = re.compile("^<TR><TH>(.+)<\/TH><TD>.+</TD><\/TR>$")
next_tr_is_feature = False
permitted_attributes = {}
for line in feature_info.splitlines():
m = attr_pattern.match(line)
if m is not None:
# attribute line
# check if layer attribute is permitted
attr = m.group(1)
if next_tr_is_feature:
# keep 'Feature', filter subsequent attributes
next_tr_is_feature = False
elif attr not in permitted_attributes:
# skip not permitted attribute
continue
elif table_pattern.match(line):
# mark next tr as 'Feature'
next_tr_is_feature = True
else:
m = layer_pattern.match(line)
if m is not None:
# layer line
# get permitted attributes for layer
current_layer = m.group(1)
permitted_attributes = self.permitted_info_attributes(
current_layer, permission
)
# keep line
lines.append(line)
# join filtered lines
feature_info = '\n'.join(lines)
return feature_info | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wms_getfeatureinfo_plain(self, feature_info, permission):\n \"\"\"\n GetFeatureInfo results\n\n Layer 'Grundstuecke'\n Feature 1\n t_id = '1234'\n nbident = 'SO0123456789'\n nummer = '1234'\n ...\n \"\"\"\n if feature_info.startswith('GetFeatureInfo'):\n lines = []\n\n layer_pattern = re.compile(\"^Layer '(.+)'$\")\n attr_pattern = re.compile(\"^(.+) = .+$\")\n permitted_attributes = {}\n\n # filter feature attributes by permission\n for line in feature_info.splitlines():\n m = attr_pattern.match(line)\n if m is not None:\n # attribute line\n # check if layer attribute is permitted\n attr = m.group(1)\n if attr not in permitted_attributes:\n # skip not permitted attribute\n continue\n else:\n m = layer_pattern.match(line)\n if m is not None:\n # layer line\n # get permitted attributes for layer\n current_layer = m.group(1)\n permitted_attributes = self.permitted_info_attributes(\n current_layer, permission\n )\n\n # keep line\n lines.append(line)\n\n # join filtered lines\n feature_info = '\\n'.join(lines)\n\n return feature_info",
"def _parse_features(self):\n for root in self.roots:\n for feature in root.iter('feature'):\n api = feature.attrib.get('api', '')\n feature_name = feature.attrib.get('name', '')\n feature_number = int(float(feature.attrib.get('number', '')) * 10.0)\n\n # filter by api\n if api != 'gl':\n continue\n\n for require in feature.iter('require'):\n require_profile = require.attrib.get('profile', '')\n if require_profile and require_profile != 'core':\n # filter by profile\n continue\n\n for enum in require.iter('enum'):\n enum_name = enum.attrib.get('name', '')\n self.enum_list.append(enum_name)\n self.enum_required_by_feature[enum_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': require_profile\n })\n for command in require.iter('command'):\n command_name = command.attrib['name']\n self.command_list.append(command_name)\n self.command_required_by_feature[command_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': require_profile\n })\n\n for remove in feature.iter('remove'):\n remove_profile = remove.attrib.get('profile', '')\n if require_profile and require_profile != 'core':\n # filter by profile\n continue\n\n for enum in remove.iter('enum'):\n enum_name = enum.attrib.get('name', '')\n self.enum_removed_by_feature[enum_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': remove_profile\n })\n for command in remove.iter('command'):\n command_name = command.attrib['name']\n self.command_removed_by_feature[command_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': remove_profile\n })",
"def wms_getfeatureinfo_gml(self, feature_info, permission):\n ElementTree.register_namespace('gml', 'http://www.opengis.net/gml')\n ElementTree.register_namespace('qgs', 'http://qgis.org/gml')\n ElementTree.register_namespace('wfs', 'http://www.opengis.net/wfs')\n root = ElementTree.fromstring(feature_info)\n\n # namespace dict\n ns = {\n 'gml': 'http://www.opengis.net/gml',\n 'qgs': 'http://qgis.org/gml'\n }\n\n qgs_attr_pattern = re.compile(\"^{%s}(.+)\" % ns['qgs'])\n\n for feature in root.findall('./gml:featureMember', ns):\n for layer in feature:\n # get layer name from fid, as spaces are removed in tag name\n layer_name = '.'.join(layer.get('fid', '').split('.')[:-1])\n\n # get permitted attributes for layer\n permitted_attributes = self.permitted_info_attributes(\n layer_name, permission\n )\n\n for attr in layer.findall('*'):\n m = qgs_attr_pattern.match(attr.tag)\n if m is not None:\n # attribute tag\n attr_name = m.group(1)\n if attr_name not in permitted_attributes:\n # remove not permitted attribute\n layer.remove(attr)\n\n # write XML to string\n return ElementTree.tostring(\n root, encoding='utf-8', method='xml', short_empty_elements=False\n )",
"def _get_apt_features(self, jdict):\n\n try:\n fdict_list = jdict['props']['homeDetails']['features']['attributes']\n features = []\n for fdict in fdict_list:\n # find the extra features \n try:\n value = fdict['formattedValue']\n try:\n key = fdict['formattedName']\n features.append(f'{key}:{value}')\n except:\n features.append(value)\n except:\n next\n # stick all the features together, seperated by |\n return features\n except:\n return None",
"def wms_getfeatureinfo_xml(self, feature_info, permission):\n ElementTree.register_namespace('', 'http://www.opengis.net/ogc')\n root = ElementTree.fromstring(feature_info)\n\n for layer in root.findall('./Layer'):\n # get permitted attributes for layer\n permitted_attributes = self.permitted_info_attributes(\n layer.get('name'), permission\n )\n\n for feature in layer.findall('Feature'):\n for attr in feature.findall('Attribute'):\n if attr.get('name') not in permitted_attributes:\n # remove not permitted attribute\n feature.remove(attr)\n\n # write XML to string\n return ElementTree.tostring(root, encoding='utf-8', method='xml')",
"def extract_features(tlc):\n text = clean_text(tlc['body'])\n fields = dict()\n # add features here #\n fields['Top_comment_word_count'] = len(text.split(' '))\n fields['Top_comment_text'] = text\n\n # Extract time-based features\n def get_day_of_week(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1\n\n def get_day_of_month(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day\n\n def get_time_of_day(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour\n time_local = time.localtime(tlc['created_utc'])\n time_local = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n fields['Top_comment_day'] = get_day_of_month(time_local)\n fields['Top_comment_day_of_week'] = get_day_of_week(time_local)\n fields['Top_comment_hour'] = get_time_of_day(time_local)\n\n # Extract gender value\n gp = GenderPerformr()\n probs, _ = gp.predict(tlc['author'])\n # Rescale it from [0,1] to [-1,1]\n fields['Top_comment_author_gender_value'] = 2 * probs - 1\n\n # Extract percentage of mispellings\n check = SpellChecker(\"en_US\")\n tokenizer = get_tokenizer(\"en_US\")\n # Prevent the denominator from 0\n def weird_division(n, d):\n return n / d if d else 0\n\n def get_mispellings_percentage(text):\n mispelling_count = 0\n total_count = 0\n if text == 'nan':\n return total_count\n else:\n check.set_text(text)\n for err in check:\n mispelling_count = mispelling_count + 1\n for w in tokenizer(text):\n total_count = total_count + 1\n value = weird_division(mispelling_count, total_count)\n return value\n fields['Top_comment_mispellings'] = get_mispellings_percentage(text)\n\n # Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]\n ar = Agreementr()\n pr = Politenessr()\n sr = Supportr()\n fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5\n fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5\n fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5\n\n # Get toxicity scores\n KEY = \"yourkey.txt\" # os.getenv(\"GOOGLE_API_KEY\")\n service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)\n\n def get_results(request_id, response, exception):\n toxicity_scores.append((request_id, response))\n\n toxicity_scores = []\n count = 0\n batch = service.new_batch_http_request(callback=get_results)\n analyze_request = {\n 'comment': {'text': text},\n \"requestedAttributes\": {\n \"TOXICITY\": {},\n \"SEVERE_TOXICITY\": {},\n \"ATTACK_ON_COMMENTER\": {}\n }\n }\n batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))\n batch.execute()\n toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']\n attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']\n if toxic_score > 0.5:\n fields['Top_comment_untuned_toxicity'] = 1\n else:\n fields['Top_comment_untuned_toxicity'] = 0\n if toxic_score > 0.8 and attack_score > 0.5:\n fields['Top_comment_tuned_toxicity'] = 1\n else:\n fields['Top_comment_tuned_toxicity'] = 0\n # end of feature extractions #\n return fields",
"def _get_features(self, soup):\n beds, baths, htype = None, None, None\n\n try:\n ppt_details = soup.find('div', class_='listing-info__items')\n features = ppt_details.find_all('dl', class_='listing-info__box-content')\n features = [feature.get_text() for feature in features]\n \n # try to identify the room type\n for feature in features:\n if 'beds' in feature.lower():\n beds = self._extract_num(feature)\n if 'baths' in feature.lower():\n baths = self._extract_num(feature)\n if (not 'beds' in feature.lower()) and \\\n (not 'baths' in feature.lower()) and \\\n (not 'half baths' in feature.lower()):\n htype = feature.strip()\n\n return beds, baths, htype\n except:\n return beds, baths, htype",
"def _features_of(entry: _LexiconEntry) -> str:\n return entry[\"features\"]",
"def extract_listing_features(soup, rules):\r\n features_dict = {}\r\n for feature in rules:\r\n try:\r\n features_dict[feature] = extract_element_data(soup, rules[feature])\r\n except:\r\n features_dict[feature] = 'empty'\r\n \r\n return features_dict",
"def _parseFeature(self, name, value=None):\n supported = self._parse([(name, value)])\n return supported.getFeature(name)",
"def parse_features(self, skip=...):\n ...",
"def parse_features(self, skip=...):\n ...",
"def extract_feature(self, article) :\n pass",
"def parse_feature(self, feature_key, lines):\n ...",
"def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()",
"def permitted_info_attributes(self, info_layer_name, permission):\n # get WMS layer name for info result layer\n wms_layer_name = permission.get('feature_info_aliases', {}) \\\n .get(info_layer_name, info_layer_name)\n\n # return permitted attributes for layer\n return permission['layers'].get(wms_layer_name, {})",
"def extract_attributes_html(url, request):\n func, args, kwargs = resolve(url)\n response = func(request, *args, **kwargs)\n response.render()\n\n soup = bs4.BeautifulSoup(response.content, 'lxml')\n details = soup.find(id=\"properties\")\n if details is None:\n raise ValueError('Content is of detail page is invalid')\n\n # Remove \"Add\" buttons\n for p in details('p'):\n if 'autohide' in p.get('class', ''):\n p.extract()\n # Remove Javascript\n for s in details('script'):\n s.extract()\n # Remove images (Appy.pod fails with them)\n for i in details('img'):\n i.replaceWith(i.get('title', ''))\n # Remove links (Appy.pod sometimes shows empty strings)\n for a in details('a'):\n a.replaceWith(a.text)\n # Prettify (ODT compat.) and convert unicode to XML entities\n cooked = details.prettify('ascii', formatter='html').decode()\n return cooked",
"def _parse_features(cls, node: OMNode) -> Dict[str, Dict[str, bool]]:\n features = {}\n for sectname in node:\n section = node[sectname]\n if not isinstance(section, dict) or '_types' not in section:\n continue\n features[sectname] = {}\n for opt in section:\n if not opt.startswith('has'):\n continue\n value = section[opt]\n if not isinstance(value, bool):\n continue\n option = opt[3:]\n features[sectname][option] = value\n return features",
"def extractFeatures(self, datum):\n abstract",
"def wms_getfeatureinfo(self, response, params, permission):\n feature_info = response.text\n\n if response.status_code == requests.codes.ok:\n info_format = params.get('INFO_FORMAT', 'text/plain')\n if info_format == 'text/plain':\n feature_info = self.wms_getfeatureinfo_plain(\n feature_info, permission\n )\n elif info_format == 'text/html':\n feature_info = self.wms_getfeatureinfo_html(\n feature_info, permission\n )\n elif info_format == 'text/xml':\n feature_info = self.wms_getfeatureinfo_xml(\n feature_info, permission\n )\n elif info_format == 'application/vnd.ogc.gml':\n feature_info = self.wms_getfeatureinfo_gml(\n feature_info, permission\n )\n\n # NOTE: application/vnd.ogc.gml/3.1.1 is broken in QGIS server\n\n return Response(\n feature_info,\n content_type=response.headers['content-type'],\n status=response.status_code\n )",
"def compute_filtered_features(self, features):\n return [\n feature\n for feature in features\n if \"\".join(feature.qualifiers.get(\"is_edit\", \"false\")) != \"true\"\n ]",
"def get_features(item, GP):\n contents_url = '%s/contents' % item['url']\n\n # scrape readme\n gf.get_readme_length(contents_url, GP)\n\n # scrape file-by-file stats\n digest_repo(contents_url, GP)\n\n # scrape commit history\n gf.get_repo_commit_history(item, GP)\n\n # scrape stargazers\n GP.n_stars = item['stargazers_count']\n\n # scrape forks\n GP.n_forks = item['forks_count']\n\n return GP",
"def extract_features_only(self, text):\n \n featurelist = []\n \n sentences = util.sentence_tokenize(text)\n taggedSentences = [] \n for sentnumber, sentence0 in enumerate(sentences):\n \n sentence = self.clean_text(sentence0)\n \n # tokenize each sentence to have a list of words to be processed\n tokens = nltk.word_tokenize(sentence)\n #run the above procedure\n sentence_to_parse = self.get_untagged(tokens)\n \n # Save tagged sentences for later computing of expose date\n taggedSentences.append(sentence_to_parse)\n \n #only if the cleaned sentence is NOT empty we parse it\n if sentence_to_parse!=[]:\n tree = self.cp.parse(sentence_to_parse)\n tree1 = self.cp1.parse(sentence_to_parse)\n \n# new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.node in self.st_filter])\n new_sentence_to_parse = ','.join([' '.join(nltk.tag.untag(subtree.leaves())) + ' ' for subtree in tree.subtrees() if subtree.label() in self.st_filter])\n\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(', ,', ',')\n #here we delete the dash and replace it with whitespace to convert post-vac to post vac\n new_sentence_to_parse = new_sentence_to_parse.replace(',', ', ')\n\n new_sentence_to_parse = nltk.word_tokenize(new_sentence_to_parse)\n\n #run the above procedure\n new_sentence_to_parse = self.get_untagged(new_sentence_to_parse)\n \n if new_sentence_to_parse!=[]:\n tree2 = self.cp.parse(new_sentence_to_parse)\n for subtree in tree2.subtrees():\n if subtree.label() in self.st_filter: \n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n \n for subtree in tree1.subtrees():\n if subtree.label() in self.labels_gram1:\n featString = self.massage_features(subtree)\n featurelist.append((subtree.label(), featString, sentnumber, subtree.leaves()))\n\n self.sentences = sentences\n \n n = len(sentences)\n locsSentStarts = [-1] * n\n curpt = 0\n for i in range(n):\n pos = text[curpt:].find(sentences[i])\n locsSentStarts[i] = pos + curpt\n curpt = locsSentStarts[i] + len(sentences[i])\n self.sentence_startPos = locsSentStarts\n \n featObjList = self.initialize_feature_obj_list(featurelist)\n \n featList = [(feat.getType(), feat.getStartPos(), feat.getEndPos(), feat.getString()) for feat in featObjList]\n return featList",
"def extract_one( html: str, fpath: Path ):\n # %%\n doc = BeautifulSoup( html, features='html.parser')\n\n ret = { 'linkedin_handle': fpath.name.split('.')[0] }\n _parse_top_card( ret, doc )\n # %%\n ret['about'] = _extract_about( doc )\n # if len(ret['about']) < 100 and ret['about'].find('ver más') > 0:\n # print( f\"\\nVer más detected: \\nabout:{ret['about']} fpath={fpath}\" )\n\n ret['about_stats'] = {'about_eng_ratio': _common_english_ratio(ret['about'])}\n # %%\n ret['work_experience'] = _parse_experiences( doc )\n ret['work_stats'] = calc_work_stats( ret['work_experience'])\n # %%\n ret['skills'] = proc_skills_section( doc )\n ret['education'] = _parse_education( doc )\n ret['education_stats'] = _education_stats( ret['education'])\n ret['accomplishments'] = _extract_accomplishments(doc)\n ret['profile_text_stats'] = profile_text_stats( doc )\n # %%\n return ret\n # %%",
"def extract_other_feature_args(self, line):\n result = {'feature_type': line[2], 'indices': [int(line[3]), int(line[4])]}\n attribs = self.parse_attributes(line[8])\n result.update(attribs)\n return result",
"def getLineInformation(html, reg=['(?<=<\\/h2><li>).*?<\\/a>', '<.*a.*?>', '查公交上八六八四cn']):\n tmp = re.findall(reg[0], html)[0]\n Info = re.sub(reg[1], '', tmp)\n info = re.sub(reg[2], '', Info)\n return info",
"def extract_basic_features(self):\n if self.basic_features:\n self.basic_features = {}\n\n for s in self.report.get(\"signatures\", []):\n name = s.get(\"name\", \"\")\n description = s.get(\"description\", \"\")\n if name:\n self.basic_features[name] = description\n continue\n if description:\n self.basic_features[hash(description)] = description",
"def feature_extraction(_data):\n # Find the digits in the given string Example - data='18-20' digits = '1820'\n digits = str(''.join(c for c in _data if c.isdigit()))\n # calculate the length of the string\n len_digits = len(digits)\n # splitting digits in to values example - digits = '1820' ages = [18, 20]\n ages = [int(digits[i:i + 2]) for i in range(0, len_digits, 2)]\n # checking for special character in the given data\n special_character = '.+-<>?'\n spl_char = ''.join([c for c in list(special_character) if c in _data])\n # handling decimal age data\n if len_digits == 3:\n spl_char = '.'\n age = \"\".join([str(ages[0]), '.', str(ages[1])])\n # normalizing\n age = int(float(age) - 0.5)\n ages = [age]\n # Finding the maximum, minimum, average age values\n max_age = 0\n min_age = 0\n mean_age = 0\n if len(ages):\n max_age = max(ages)\n min_age = min(ages)\n if len(ages) == 2:\n mean_age = int((max_age + min_age) / 2)\n else:\n mean_age = max_age\n # specially added for 18 years cases\n only_18 = 0\n is_y = 0\n if ages == [18]:\n only_18 = 1\n if 'y' in _data or 'Y' in _data:\n is_y = 1\n under_18 = 0\n if 1 < max_age < 18:\n under_18 = 1\n above_65 = 0\n if mean_age >= 65:\n above_65 = 1\n # verifying whether digit is found in the given string or not.\n # Example - data='18-20' digits_found=True data='????' digits_found=False\n digits_found = 1\n if len_digits == 1:\n digits_found = 1\n max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = 0, 0, 0, 0, 0, 0, 0\n elif len_digits == 0:\n digits_found, max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = -1, -1, -1, -1, -1, -1, -1, -1\n \n feature = {\n 'ages': tuple(ages),\n 'len(ages)': len(ages),\n 'spl_chr': spl_char,\n 'is_digit': digits_found,\n 'max_age': max_age,\n 'mean_age': mean_age,\n 'only_18': only_18,\n 'is_y': is_y,\n 'above_65': above_65,\n 'under_18': under_18\n }\n\n return feature",
"def get_filter(feature, value):\r\n return {\r\n 'gender': {'user__profile__gender': value},\r\n 'level_of_education': {'user__profile__level_of_education': value},\r\n }[feature]",
"def get_facts_and_features(content):\n facts_features = content.find(\"div\", {\"class\": \"ds-home-facts-and-features reso-facts-features sheety-facts-features\"})\n items_list = [li.get_text(strip=True) for uls in facts_features.find_all(\"ul\") for li in uls]\n item_keys = ['_'.join(parse_text(item, ':', 0).split()).lower() for item in items_list]\n item_values = [parse_text(item, ':', -1) for item in items_list]\n\n return dict(zip(item_keys, item_values))"
] | [
"0.6403695",
"0.57270837",
"0.5669662",
"0.5665216",
"0.53554523",
"0.5286516",
"0.5278501",
"0.5267335",
"0.52538925",
"0.5215828",
"0.51774246",
"0.51774246",
"0.51503915",
"0.5149752",
"0.51478523",
"0.50912863",
"0.50403017",
"0.50143975",
"0.49901897",
"0.49863157",
"0.4967227",
"0.49412602",
"0.49323285",
"0.4881218",
"0.4836475",
"0.48299894",
"0.48167914",
"0.47919044",
"0.4766727",
"0.47392786"
] | 0.7270897 | 0 |
Parse feature info XML and filter feature attributes by permission. | def wms_getfeatureinfo_xml(self, feature_info, permission):
ElementTree.register_namespace('', 'http://www.opengis.net/ogc')
root = ElementTree.fromstring(feature_info)
for layer in root.findall('./Layer'):
# get permitted attributes for layer
permitted_attributes = self.permitted_info_attributes(
layer.get('name'), permission
)
for feature in layer.findall('Feature'):
for attr in feature.findall('Attribute'):
if attr.get('name') not in permitted_attributes:
# remove not permitted attribute
feature.remove(attr)
# write XML to string
return ElementTree.tostring(root, encoding='utf-8', method='xml') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _parse_features(self):\n for root in self.roots:\n for feature in root.iter('feature'):\n api = feature.attrib.get('api', '')\n feature_name = feature.attrib.get('name', '')\n feature_number = int(float(feature.attrib.get('number', '')) * 10.0)\n\n # filter by api\n if api != 'gl':\n continue\n\n for require in feature.iter('require'):\n require_profile = require.attrib.get('profile', '')\n if require_profile and require_profile != 'core':\n # filter by profile\n continue\n\n for enum in require.iter('enum'):\n enum_name = enum.attrib.get('name', '')\n self.enum_list.append(enum_name)\n self.enum_required_by_feature[enum_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': require_profile\n })\n for command in require.iter('command'):\n command_name = command.attrib['name']\n self.command_list.append(command_name)\n self.command_required_by_feature[command_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': require_profile\n })\n\n for remove in feature.iter('remove'):\n remove_profile = remove.attrib.get('profile', '')\n if require_profile and require_profile != 'core':\n # filter by profile\n continue\n\n for enum in remove.iter('enum'):\n enum_name = enum.attrib.get('name', '')\n self.enum_removed_by_feature[enum_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': remove_profile\n })\n for command in remove.iter('command'):\n command_name = command.attrib['name']\n self.command_removed_by_feature[command_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': remove_profile\n })",
"def wms_getfeatureinfo_gml(self, feature_info, permission):\n ElementTree.register_namespace('gml', 'http://www.opengis.net/gml')\n ElementTree.register_namespace('qgs', 'http://qgis.org/gml')\n ElementTree.register_namespace('wfs', 'http://www.opengis.net/wfs')\n root = ElementTree.fromstring(feature_info)\n\n # namespace dict\n ns = {\n 'gml': 'http://www.opengis.net/gml',\n 'qgs': 'http://qgis.org/gml'\n }\n\n qgs_attr_pattern = re.compile(\"^{%s}(.+)\" % ns['qgs'])\n\n for feature in root.findall('./gml:featureMember', ns):\n for layer in feature:\n # get layer name from fid, as spaces are removed in tag name\n layer_name = '.'.join(layer.get('fid', '').split('.')[:-1])\n\n # get permitted attributes for layer\n permitted_attributes = self.permitted_info_attributes(\n layer_name, permission\n )\n\n for attr in layer.findall('*'):\n m = qgs_attr_pattern.match(attr.tag)\n if m is not None:\n # attribute tag\n attr_name = m.group(1)\n if attr_name not in permitted_attributes:\n # remove not permitted attribute\n layer.remove(attr)\n\n # write XML to string\n return ElementTree.tostring(\n root, encoding='utf-8', method='xml', short_empty_elements=False\n )",
"def wms_getfeatureinfo_html(self, feature_info, permission):\n # NOTE: info content is not valid XML, parse as text\n if feature_info.startswith('<HEAD>'):\n lines = []\n\n layer_pattern = re.compile(\n \"^<TR>.+>Layer<\\/TH><TD>(.+)<\\/TD><\\/TR>$\"\n )\n table_pattern = re.compile(\"^.*<TABLE\")\n attr_pattern = re.compile(\"^<TR><TH>(.+)<\\/TH><TD>.+</TD><\\/TR>$\")\n next_tr_is_feature = False\n permitted_attributes = {}\n\n for line in feature_info.splitlines():\n m = attr_pattern.match(line)\n if m is not None:\n # attribute line\n # check if layer attribute is permitted\n attr = m.group(1)\n if next_tr_is_feature:\n # keep 'Feature', filter subsequent attributes\n next_tr_is_feature = False\n elif attr not in permitted_attributes:\n # skip not permitted attribute\n continue\n elif table_pattern.match(line):\n # mark next tr as 'Feature'\n next_tr_is_feature = True\n else:\n m = layer_pattern.match(line)\n if m is not None:\n # layer line\n # get permitted attributes for layer\n current_layer = m.group(1)\n permitted_attributes = self.permitted_info_attributes(\n current_layer, permission\n )\n\n # keep line\n lines.append(line)\n\n # join filtered lines\n feature_info = '\\n'.join(lines)\n\n return feature_info",
"def wms_getfeatureinfo_plain(self, feature_info, permission):\n \"\"\"\n GetFeatureInfo results\n\n Layer 'Grundstuecke'\n Feature 1\n t_id = '1234'\n nbident = 'SO0123456789'\n nummer = '1234'\n ...\n \"\"\"\n if feature_info.startswith('GetFeatureInfo'):\n lines = []\n\n layer_pattern = re.compile(\"^Layer '(.+)'$\")\n attr_pattern = re.compile(\"^(.+) = .+$\")\n permitted_attributes = {}\n\n # filter feature attributes by permission\n for line in feature_info.splitlines():\n m = attr_pattern.match(line)\n if m is not None:\n # attribute line\n # check if layer attribute is permitted\n attr = m.group(1)\n if attr not in permitted_attributes:\n # skip not permitted attribute\n continue\n else:\n m = layer_pattern.match(line)\n if m is not None:\n # layer line\n # get permitted attributes for layer\n current_layer = m.group(1)\n permitted_attributes = self.permitted_info_attributes(\n current_layer, permission\n )\n\n # keep line\n lines.append(line)\n\n # join filtered lines\n feature_info = '\\n'.join(lines)\n\n return feature_info",
"def _read_attributes(root):\n output_list = []\n for _, value in enumerate(root[0][2]):\n attr = Attribute(value)\n output_list.append(attr)\n return output_list",
"def permitted_info_attributes(self, info_layer_name, permission):\n # get WMS layer name for info result layer\n wms_layer_name = permission.get('feature_info_aliases', {}) \\\n .get(info_layer_name, info_layer_name)\n\n # return permitted attributes for layer\n return permission['layers'].get(wms_layer_name, {})",
"def _parse_features(cls, node: OMNode) -> Dict[str, Dict[str, bool]]:\n features = {}\n for sectname in node:\n section = node[sectname]\n if not isinstance(section, dict) or '_types' not in section:\n continue\n features[sectname] = {}\n for opt in section:\n if not opt.startswith('has'):\n continue\n value = section[opt]\n if not isinstance(value, bool):\n continue\n option = opt[3:]\n features[sectname][option] = value\n return features",
"def _get_apt_features(self, jdict):\n\n try:\n fdict_list = jdict['props']['homeDetails']['features']['attributes']\n features = []\n for fdict in fdict_list:\n # find the extra features \n try:\n value = fdict['formattedValue']\n try:\n key = fdict['formattedName']\n features.append(f'{key}:{value}')\n except:\n features.append(value)\n except:\n next\n # stick all the features together, seperated by |\n return features\n except:\n return None",
"def _parseFeature(self, name, value=None):\n supported = self._parse([(name, value)])\n return supported.getFeature(name)",
"def parse_features(self, skip=...):\n ...",
"def parse_features(self, skip=...):\n ...",
"def parse_feature(self, feature_key, lines):\n ...",
"def wms_getcapabilities(self, response, params, permission):\n xml = response.text\n\n if response.status_code == requests.codes.ok:\n # parse capabilities XML\n ElementTree.register_namespace('', 'http://www.opengis.net/wms')\n ElementTree.register_namespace('qgs', 'http://www.qgis.org/wms')\n ElementTree.register_namespace('sld', 'http://www.opengis.net/sld')\n ElementTree.register_namespace(\n 'xlink', 'http://www.w3.org/1999/xlink'\n )\n root = ElementTree.fromstring(xml)\n\n # use default namespace for XML search\n # namespace dict\n ns = {'ns': 'http://www.opengis.net/wms'}\n # namespace prefix\n np = 'ns:'\n if not root.tag.startswith('{http://'):\n # do not use namespace\n ns = {}\n np = ''\n\n root_layer = root.find('%sCapability/%sLayer' % (np, np), ns)\n if root_layer is not None:\n # remove broken info format 'application/vnd.ogc.gml/3.1.1'\n feature_info = root.find('.//%sGetFeatureInfo' % np, ns)\n if feature_info is not None:\n for format in feature_info.findall('%sFormat' % np, ns):\n if format.text == 'application/vnd.ogc.gml/3.1.1':\n feature_info.remove(format)\n\n # filter and update layers by permission\n permitted_layers = permission['public_layers']\n queryable_layers = permission['queryable_layers']\n for group in root_layer.findall('.//%sLayer/..' % np, ns):\n for layer in group.findall('%sLayer' % np, ns):\n layer_name = layer.find('%sName' % np, ns).text\n if layer_name not in permitted_layers:\n # remove not permitted layer\n group.remove(layer)\n else:\n # update queryable\n if layer_name in queryable_layers:\n layer.set('queryable', '1')\n else:\n layer.set('queryable', '0')\n\n # get permitted attributes for layer\n permitted_attributes = permission['layers'].get(\n layer_name, {}\n )\n\n # remove layer displayField if attribute not permitted\n # (for QGIS GetProjectSettings)\n display_field = layer.get('displayField')\n if (display_field and\n display_field not in permitted_attributes):\n layer.attrib.pop('displayField')\n\n # filter layer attributes by permission\n # (for QGIS GetProjectSettings)\n attributes = layer.find('%sAttributes' % np, ns)\n if attributes is not None:\n for attr in attributes.findall(\n '%sAttribute' % np, ns\n ):\n if attr.get('name') not in permitted_attributes:\n # remove not permitted attribute\n attributes.remove(attr)\n\n # update queryable for root layer\n if queryable_layers:\n root_layer.set('queryable', '1')\n else:\n root_layer.set('queryable', '0')\n\n # filter LayerDrawingOrder by permission\n # (for QGIS GetProjectSettings)\n layer_drawing_order = root.find(\n './/%sLayerDrawingOrder' % np, ns\n )\n if layer_drawing_order is not None:\n layers = layer_drawing_order.text.split(',')\n # remove not permitted layers\n layers = [\n l for l in layers if l in permitted_layers\n ]\n layer_drawing_order.text = ','.join(layers)\n\n # filter ComposerTemplates by permission\n # (for QGIS GetProjectSettings)\n templates = root.find(\n '%sCapability/%sComposerTemplates' % (np, np), ns\n )\n if templates is not None:\n permitted_templates = permission.get('print_templates', [])\n for template in templates.findall(\n '%sComposerTemplate' % np, ns\n ):\n template_name = template.get('name')\n if template_name not in permitted_templates:\n # remove not permitted print template\n templates.remove(template)\n\n if not templates.find('%sComposerTemplate' % np, ns):\n # remove ComposerTemplates if empty\n root.find('%sCapability' % np, ns).remove(templates)\n\n # write XML to string\n xml = ElementTree.tostring(\n root, encoding='utf-8', method='xml'\n )\n\n return Response(\n xml,\n content_type=response.headers['content-type'],\n status=response.status_code\n )",
"def compute_filtered_features(self, features):\n return [\n feature\n for feature in features\n if \"\".join(feature.qualifiers.get(\"is_edit\", \"false\")) != \"true\"\n ]",
"def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}",
"def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()",
"def parse_xml(infile, tags, skip_empties=False, verbose=False):\n # output feature vector in map form\n feature_vector = {}\n # get the xml root\n tree = ET.parse(infile)\n root = tree.getroot()\n\n # get the patient information node in the xml\n patient_info = None\n for c in root:\n print(c.tag)\n if c.tag.endswith('patient'):\n patient_info = c\n break\n\n if not patient_info:\n print(\"No patient info found on file [{}]\".format(infile))\n exit(1)\n\n for x in patient_info.iter():\n key = re.sub(\"\\{.*\\}\", \"\", x.tag.strip())\n if x.text == None:\n x.text = \"\"\n value = x.text.strip()\n # skip empties\n if skip_empties:\n if not value: continue\n # use the entry if it's in the input tagset or no such set is defined\n if (tags and key not in tags):\n continue\n if key in feature_vector:\n i = 0\n while \"{}_{}\".format(key,i) in feature_vector: i+=1\n newkey = \"{}_{}\".format(key,i)\n print(\"Key {} already in vector, renaming new instance to {}\".format(key, newkey))\n key = newkey\n feature_vector[key] = value\n if verbose:\n for i, (key, value) in enumerate(feature_vector.items()):\n print(\"{}/{} : [{}] : [{}]\".format(i, len(feature_vector), key, value))\n return feature_vector",
"def wms_getfeatureinfo(self, response, params, permission):\n feature_info = response.text\n\n if response.status_code == requests.codes.ok:\n info_format = params.get('INFO_FORMAT', 'text/plain')\n if info_format == 'text/plain':\n feature_info = self.wms_getfeatureinfo_plain(\n feature_info, permission\n )\n elif info_format == 'text/html':\n feature_info = self.wms_getfeatureinfo_html(\n feature_info, permission\n )\n elif info_format == 'text/xml':\n feature_info = self.wms_getfeatureinfo_xml(\n feature_info, permission\n )\n elif info_format == 'application/vnd.ogc.gml':\n feature_info = self.wms_getfeatureinfo_gml(\n feature_info, permission\n )\n\n # NOTE: application/vnd.ogc.gml/3.1.1 is broken in QGIS server\n\n return Response(\n feature_info,\n content_type=response.headers['content-type'],\n status=response.status_code\n )",
"def has_wfs_member(self, xml):\n service = self.metadata.service\n version = service.service_type.version\n if version == OGCServiceVersionEnum.V_1_0_0.value:\n return len([child for child in xml.getroot() if child.tag.endswith('featureMember')]) != 1\n if version == OGCServiceVersionEnum.V_1_1_0.value:\n return len([child for child in xml.getroot() if child.tag.endswith('featureMember')]) != 1\n if version == OGCServiceVersionEnum.V_2_0_0.value:\n return len([child for child in xml.getroot() if child.tag.endswith('member')]) != 1\n if version == OGCServiceVersionEnum.V_2_0_2.value:\n return len([child for child in xml.getroot() if child.tag.endswith('member')]) != 1",
"def extract_other_feature_args(self, line):\n result = {'feature_type': line[2], 'indices': [int(line[3]), int(line[4])]}\n attribs = self.parse_attributes(line[8])\n result.update(attribs)\n return result",
"def extract_feature(self, article) :\n pass",
"def handle_attributes_features(\n\ttffeatures,\n\tvalfeatures,\n\tattributes,\n\tdefault_value=DEFAULT_VAL_IF_NOT_EXIST,\n\tfeature_separator=FEATURE_SEPARATOR\n\t):\n\tfor (key, val) in attributes.items():\n\t\tif (isinstance(val, dict)):\n\t\t\tfor (inner_key, inner_val) in val.items():\n\t\t\t\tfeature_name = ATTRIBUTES + feature_separator + key + feature_separator + inner_key\n\n\t\t\t\tif is_boolean_set(inner_val):\n\t\t\t\t\t# inner feature is true/false feature\n\t\t\t\t\ttffeatures.append(feature_name)\n\t\t\t\telse:\n\t\t\t\t\t# inner feature is val feature\n\t\t\t\t\tinner_val.add(default_value)\n\t\t\t\t\tvalfeatures[feature_name] = inner_val\n\t\telse:\n\t\t\tfeature_name = ATTRIBUTES + feature_separator + key\n\n\t\t\tif is_boolean_set(val):\n\t\t\t\t# feature is true/false feature\n\t\t\t\ttffeatures.append(feature_name)\n\t\t\telse:\n\t\t\t\t# feature is val feature\n\t\t\t\tval.add(default_value)\n\t\t\t\tvalfeatures[feature_name] = val",
"def extractFeatures(self, datum):\n abstract",
"def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list",
"def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list",
"def findFeatures(self):\n\t\tpass",
"def _parse_cabling_xml(self, xml):\n parseString(xml, self._cabling_handler)\n return self._cabling_handler.get_data()",
"def readFeatures(self):\n\t\treturn self._fileSystem.readFeatures()",
"def _features_of(entry: _LexiconEntry) -> str:\n return entry[\"features\"]",
"def get_ea_attributes(path, logger):\n reattrib = None\n try:\n reattrib = requests.get(PAYLOAD['url'] + \"extensibleattributedef?\",\n auth=(PAYLOAD['username'],\n PAYLOAD['password']),\n verify=False)\n reattrib.raise_for_status()\n except requests.exceptions.ConnectionError as eaerrt:\n logger.error(\"Can't reach IPAM! Check your VPN or Local Access, %s\",\n eaerrt)\n exit()\n except requests.exceptions.HTTPError as eahrrt:\n logger.error(\"Check your credentials! %s\", eahrrt)\n exit()\n\n rutfeattrib = reattrib.content.decode('utf-8')\n rjsoneattrib = json.loads(rutfeattrib)\n eattl = []\n for att in rjsoneattrib:\n for key, value in att.items():\n if key == 'name':\n eattl.append(value)\n eattl.sort()\n pickle.dump(eattl, open(path, \"wb\"))"
] | [
"0.62178385",
"0.6131267",
"0.5999587",
"0.5670946",
"0.52712256",
"0.5198279",
"0.5161996",
"0.51611716",
"0.5016191",
"0.4958712",
"0.4958712",
"0.49565393",
"0.48952377",
"0.48616266",
"0.4837101",
"0.48286694",
"0.48118487",
"0.47954863",
"0.47357622",
"0.47289896",
"0.4679851",
"0.46645984",
"0.46588936",
"0.46522778",
"0.46522778",
"0.4647388",
"0.46426687",
"0.4636439",
"0.46153075",
"0.4588742"
] | 0.6285287 | 0 |
Parse feature info GML and filter feature attributes by permission. | def wms_getfeatureinfo_gml(self, feature_info, permission):
ElementTree.register_namespace('gml', 'http://www.opengis.net/gml')
ElementTree.register_namespace('qgs', 'http://qgis.org/gml')
ElementTree.register_namespace('wfs', 'http://www.opengis.net/wfs')
root = ElementTree.fromstring(feature_info)
# namespace dict
ns = {
'gml': 'http://www.opengis.net/gml',
'qgs': 'http://qgis.org/gml'
}
qgs_attr_pattern = re.compile("^{%s}(.+)" % ns['qgs'])
for feature in root.findall('./gml:featureMember', ns):
for layer in feature:
# get layer name from fid, as spaces are removed in tag name
layer_name = '.'.join(layer.get('fid', '').split('.')[:-1])
# get permitted attributes for layer
permitted_attributes = self.permitted_info_attributes(
layer_name, permission
)
for attr in layer.findall('*'):
m = qgs_attr_pattern.match(attr.tag)
if m is not None:
# attribute tag
attr_name = m.group(1)
if attr_name not in permitted_attributes:
# remove not permitted attribute
layer.remove(attr)
# write XML to string
return ElementTree.tostring(
root, encoding='utf-8', method='xml', short_empty_elements=False
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def wms_getfeatureinfo_plain(self, feature_info, permission):\n \"\"\"\n GetFeatureInfo results\n\n Layer 'Grundstuecke'\n Feature 1\n t_id = '1234'\n nbident = 'SO0123456789'\n nummer = '1234'\n ...\n \"\"\"\n if feature_info.startswith('GetFeatureInfo'):\n lines = []\n\n layer_pattern = re.compile(\"^Layer '(.+)'$\")\n attr_pattern = re.compile(\"^(.+) = .+$\")\n permitted_attributes = {}\n\n # filter feature attributes by permission\n for line in feature_info.splitlines():\n m = attr_pattern.match(line)\n if m is not None:\n # attribute line\n # check if layer attribute is permitted\n attr = m.group(1)\n if attr not in permitted_attributes:\n # skip not permitted attribute\n continue\n else:\n m = layer_pattern.match(line)\n if m is not None:\n # layer line\n # get permitted attributes for layer\n current_layer = m.group(1)\n permitted_attributes = self.permitted_info_attributes(\n current_layer, permission\n )\n\n # keep line\n lines.append(line)\n\n # join filtered lines\n feature_info = '\\n'.join(lines)\n\n return feature_info",
"def wms_getfeatureinfo_html(self, feature_info, permission):\n # NOTE: info content is not valid XML, parse as text\n if feature_info.startswith('<HEAD>'):\n lines = []\n\n layer_pattern = re.compile(\n \"^<TR>.+>Layer<\\/TH><TD>(.+)<\\/TD><\\/TR>$\"\n )\n table_pattern = re.compile(\"^.*<TABLE\")\n attr_pattern = re.compile(\"^<TR><TH>(.+)<\\/TH><TD>.+</TD><\\/TR>$\")\n next_tr_is_feature = False\n permitted_attributes = {}\n\n for line in feature_info.splitlines():\n m = attr_pattern.match(line)\n if m is not None:\n # attribute line\n # check if layer attribute is permitted\n attr = m.group(1)\n if next_tr_is_feature:\n # keep 'Feature', filter subsequent attributes\n next_tr_is_feature = False\n elif attr not in permitted_attributes:\n # skip not permitted attribute\n continue\n elif table_pattern.match(line):\n # mark next tr as 'Feature'\n next_tr_is_feature = True\n else:\n m = layer_pattern.match(line)\n if m is not None:\n # layer line\n # get permitted attributes for layer\n current_layer = m.group(1)\n permitted_attributes = self.permitted_info_attributes(\n current_layer, permission\n )\n\n # keep line\n lines.append(line)\n\n # join filtered lines\n feature_info = '\\n'.join(lines)\n\n return feature_info",
"def _parse_features(self):\n for root in self.roots:\n for feature in root.iter('feature'):\n api = feature.attrib.get('api', '')\n feature_name = feature.attrib.get('name', '')\n feature_number = int(float(feature.attrib.get('number', '')) * 10.0)\n\n # filter by api\n if api != 'gl':\n continue\n\n for require in feature.iter('require'):\n require_profile = require.attrib.get('profile', '')\n if require_profile and require_profile != 'core':\n # filter by profile\n continue\n\n for enum in require.iter('enum'):\n enum_name = enum.attrib.get('name', '')\n self.enum_list.append(enum_name)\n self.enum_required_by_feature[enum_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': require_profile\n })\n for command in require.iter('command'):\n command_name = command.attrib['name']\n self.command_list.append(command_name)\n self.command_required_by_feature[command_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': require_profile\n })\n\n for remove in feature.iter('remove'):\n remove_profile = remove.attrib.get('profile', '')\n if require_profile and require_profile != 'core':\n # filter by profile\n continue\n\n for enum in remove.iter('enum'):\n enum_name = enum.attrib.get('name', '')\n self.enum_removed_by_feature[enum_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': remove_profile\n })\n for command in remove.iter('command'):\n command_name = command.attrib['name']\n self.command_removed_by_feature[command_name].append({\n 'api': api,\n 'name': feature_name,\n 'number': feature_number,\n 'profile': remove_profile\n })",
"def wms_getfeatureinfo_xml(self, feature_info, permission):\n ElementTree.register_namespace('', 'http://www.opengis.net/ogc')\n root = ElementTree.fromstring(feature_info)\n\n for layer in root.findall('./Layer'):\n # get permitted attributes for layer\n permitted_attributes = self.permitted_info_attributes(\n layer.get('name'), permission\n )\n\n for feature in layer.findall('Feature'):\n for attr in feature.findall('Attribute'):\n if attr.get('name') not in permitted_attributes:\n # remove not permitted attribute\n feature.remove(attr)\n\n # write XML to string\n return ElementTree.tostring(root, encoding='utf-8', method='xml')",
"def parse_feature(self, feature_key, lines):\n ...",
"def _get_apt_features(self, jdict):\n\n try:\n fdict_list = jdict['props']['homeDetails']['features']['attributes']\n features = []\n for fdict in fdict_list:\n # find the extra features \n try:\n value = fdict['formattedValue']\n try:\n key = fdict['formattedName']\n features.append(f'{key}:{value}')\n except:\n features.append(value)\n except:\n next\n # stick all the features together, seperated by |\n return features\n except:\n return None",
"def parse_features(self, skip=...):\n ...",
"def parse_features(self, skip=...):\n ...",
"def get_filter(feature, value):\r\n return {\r\n 'gender': {'user__profile__gender': value},\r\n 'level_of_education': {'user__profile__level_of_education': value},\r\n }[feature]",
"def extract_features(tlc):\n text = clean_text(tlc['body'])\n fields = dict()\n # add features here #\n fields['Top_comment_word_count'] = len(text.split(' '))\n fields['Top_comment_text'] = text\n\n # Extract time-based features\n def get_day_of_week(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').weekday() + 1\n\n def get_day_of_month(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').day\n\n def get_time_of_day(text):\n return datetime.datetime.strptime(text, '%Y-%m-%d %H:%M:%S').hour\n time_local = time.localtime(tlc['created_utc'])\n time_local = time.strftime(\"%Y-%m-%d %H:%M:%S\", time_local)\n fields['Top_comment_day'] = get_day_of_month(time_local)\n fields['Top_comment_day_of_week'] = get_day_of_week(time_local)\n fields['Top_comment_hour'] = get_time_of_day(time_local)\n\n # Extract gender value\n gp = GenderPerformr()\n probs, _ = gp.predict(tlc['author'])\n # Rescale it from [0,1] to [-1,1]\n fields['Top_comment_author_gender_value'] = 2 * probs - 1\n\n # Extract percentage of mispellings\n check = SpellChecker(\"en_US\")\n tokenizer = get_tokenizer(\"en_US\")\n # Prevent the denominator from 0\n def weird_division(n, d):\n return n / d if d else 0\n\n def get_mispellings_percentage(text):\n mispelling_count = 0\n total_count = 0\n if text == 'nan':\n return total_count\n else:\n check.set_text(text)\n for err in check:\n mispelling_count = mispelling_count + 1\n for w in tokenizer(text):\n total_count = total_count + 1\n value = weird_division(mispelling_count, total_count)\n return value\n fields['Top_comment_mispellings'] = get_mispellings_percentage(text)\n\n # Get politeness, agreement, support scores, and rescale them from [1,5] to [-1,1]\n ar = Agreementr()\n pr = Politenessr()\n sr = Supportr()\n fields['Top_comment_agreement_value'] = 0.5*float(ar.predict([text]))-1.5\n fields['Top_comment_politeness_value'] = 0.5*float(pr.predict([text]))-1.5\n fields['Top_comment_support_value'] = 0.5*float(sr.predict([text]))-1.5\n\n # Get toxicity scores\n KEY = \"yourkey.txt\" # os.getenv(\"GOOGLE_API_KEY\")\n service = discovery.build('commentanalyzer', 'v1alpha1', developerKey=KEY)\n\n def get_results(request_id, response, exception):\n toxicity_scores.append((request_id, response))\n\n toxicity_scores = []\n count = 0\n batch = service.new_batch_http_request(callback=get_results)\n analyze_request = {\n 'comment': {'text': text},\n \"requestedAttributes\": {\n \"TOXICITY\": {},\n \"SEVERE_TOXICITY\": {},\n \"ATTACK_ON_COMMENTER\": {}\n }\n }\n batch.add(service.comments().analyze(body=analyze_request), request_id=str(count))\n batch.execute()\n toxic_score = toxicity_scores[0][1]['attributeScores']['TOXICITY']['summaryScore']['value']\n attack_score = toxicity_scores[0][1]['attributeScores']['ATTACK_ON_COMMENTER']['summaryScore']['value']\n if toxic_score > 0.5:\n fields['Top_comment_untuned_toxicity'] = 1\n else:\n fields['Top_comment_untuned_toxicity'] = 0\n if toxic_score > 0.8 and attack_score > 0.5:\n fields['Top_comment_tuned_toxicity'] = 1\n else:\n fields['Top_comment_tuned_toxicity'] = 0\n # end of feature extractions #\n return fields",
"def permitted_info_attributes(self, info_layer_name, permission):\n # get WMS layer name for info result layer\n wms_layer_name = permission.get('feature_info_aliases', {}) \\\n .get(info_layer_name, info_layer_name)\n\n # return permitted attributes for layer\n return permission['layers'].get(wms_layer_name, {})",
"def wms_getfeatureinfo(self, response, params, permission):\n feature_info = response.text\n\n if response.status_code == requests.codes.ok:\n info_format = params.get('INFO_FORMAT', 'text/plain')\n if info_format == 'text/plain':\n feature_info = self.wms_getfeatureinfo_plain(\n feature_info, permission\n )\n elif info_format == 'text/html':\n feature_info = self.wms_getfeatureinfo_html(\n feature_info, permission\n )\n elif info_format == 'text/xml':\n feature_info = self.wms_getfeatureinfo_xml(\n feature_info, permission\n )\n elif info_format == 'application/vnd.ogc.gml':\n feature_info = self.wms_getfeatureinfo_gml(\n feature_info, permission\n )\n\n # NOTE: application/vnd.ogc.gml/3.1.1 is broken in QGIS server\n\n return Response(\n feature_info,\n content_type=response.headers['content-type'],\n status=response.status_code\n )",
"def extract_features(self):\n self.extract_features_static()\n self.extract_features_dynamic()",
"def compute_filtered_features(self, features):\n return [\n feature\n for feature in features\n if \"\".join(feature.qualifiers.get(\"is_edit\", \"false\")) != \"true\"\n ]",
"def get_features(item, GP):\n contents_url = '%s/contents' % item['url']\n\n # scrape readme\n gf.get_readme_length(contents_url, GP)\n\n # scrape file-by-file stats\n digest_repo(contents_url, GP)\n\n # scrape commit history\n gf.get_repo_commit_history(item, GP)\n\n # scrape stargazers\n GP.n_stars = item['stargazers_count']\n\n # scrape forks\n GP.n_forks = item['forks_count']\n\n return GP",
"def _parse_features(cls, node: OMNode) -> Dict[str, Dict[str, bool]]:\n features = {}\n for sectname in node:\n section = node[sectname]\n if not isinstance(section, dict) or '_types' not in section:\n continue\n features[sectname] = {}\n for opt in section:\n if not opt.startswith('has'):\n continue\n value = section[opt]\n if not isinstance(value, bool):\n continue\n option = opt[3:]\n features[sectname][option] = value\n return features",
"def _features_of(entry: _LexiconEntry) -> str:\n return entry[\"features\"]",
"def wms_getcapabilities(self, response, params, permission):\n xml = response.text\n\n if response.status_code == requests.codes.ok:\n # parse capabilities XML\n ElementTree.register_namespace('', 'http://www.opengis.net/wms')\n ElementTree.register_namespace('qgs', 'http://www.qgis.org/wms')\n ElementTree.register_namespace('sld', 'http://www.opengis.net/sld')\n ElementTree.register_namespace(\n 'xlink', 'http://www.w3.org/1999/xlink'\n )\n root = ElementTree.fromstring(xml)\n\n # use default namespace for XML search\n # namespace dict\n ns = {'ns': 'http://www.opengis.net/wms'}\n # namespace prefix\n np = 'ns:'\n if not root.tag.startswith('{http://'):\n # do not use namespace\n ns = {}\n np = ''\n\n root_layer = root.find('%sCapability/%sLayer' % (np, np), ns)\n if root_layer is not None:\n # remove broken info format 'application/vnd.ogc.gml/3.1.1'\n feature_info = root.find('.//%sGetFeatureInfo' % np, ns)\n if feature_info is not None:\n for format in feature_info.findall('%sFormat' % np, ns):\n if format.text == 'application/vnd.ogc.gml/3.1.1':\n feature_info.remove(format)\n\n # filter and update layers by permission\n permitted_layers = permission['public_layers']\n queryable_layers = permission['queryable_layers']\n for group in root_layer.findall('.//%sLayer/..' % np, ns):\n for layer in group.findall('%sLayer' % np, ns):\n layer_name = layer.find('%sName' % np, ns).text\n if layer_name not in permitted_layers:\n # remove not permitted layer\n group.remove(layer)\n else:\n # update queryable\n if layer_name in queryable_layers:\n layer.set('queryable', '1')\n else:\n layer.set('queryable', '0')\n\n # get permitted attributes for layer\n permitted_attributes = permission['layers'].get(\n layer_name, {}\n )\n\n # remove layer displayField if attribute not permitted\n # (for QGIS GetProjectSettings)\n display_field = layer.get('displayField')\n if (display_field and\n display_field not in permitted_attributes):\n layer.attrib.pop('displayField')\n\n # filter layer attributes by permission\n # (for QGIS GetProjectSettings)\n attributes = layer.find('%sAttributes' % np, ns)\n if attributes is not None:\n for attr in attributes.findall(\n '%sAttribute' % np, ns\n ):\n if attr.get('name') not in permitted_attributes:\n # remove not permitted attribute\n attributes.remove(attr)\n\n # update queryable for root layer\n if queryable_layers:\n root_layer.set('queryable', '1')\n else:\n root_layer.set('queryable', '0')\n\n # filter LayerDrawingOrder by permission\n # (for QGIS GetProjectSettings)\n layer_drawing_order = root.find(\n './/%sLayerDrawingOrder' % np, ns\n )\n if layer_drawing_order is not None:\n layers = layer_drawing_order.text.split(',')\n # remove not permitted layers\n layers = [\n l for l in layers if l in permitted_layers\n ]\n layer_drawing_order.text = ','.join(layers)\n\n # filter ComposerTemplates by permission\n # (for QGIS GetProjectSettings)\n templates = root.find(\n '%sCapability/%sComposerTemplates' % (np, np), ns\n )\n if templates is not None:\n permitted_templates = permission.get('print_templates', [])\n for template in templates.findall(\n '%sComposerTemplate' % np, ns\n ):\n template_name = template.get('name')\n if template_name not in permitted_templates:\n # remove not permitted print template\n templates.remove(template)\n\n if not templates.find('%sComposerTemplate' % np, ns):\n # remove ComposerTemplates if empty\n root.find('%sCapability' % np, ns).remove(templates)\n\n # write XML to string\n xml = ElementTree.tostring(\n root, encoding='utf-8', method='xml'\n )\n\n return Response(\n xml,\n content_type=response.headers['content-type'],\n status=response.status_code\n )",
"def _extract_features(self):\n # print(os.getpid())\n return {n:self._extract_feature(f) for (n,f) in self.features.items()}",
"def extract_other_feature_args(self, line):\n result = {'feature_type': line[2], 'indices': [int(line[3]), int(line[4])]}\n attribs = self.parse_attributes(line[8])\n result.update(attribs)\n return result",
"def get_features(feature_list, these_feature):\n features = {}\n def feat_filter(feature, this):\n try:\n mapper = lambda x, feat: filter(lambda y: feat in y, x.split(\" \"))[0]\n val = mapper(this, feature)\n if '+' in val:\n return TRUE\n return FALSE\n except:\n return UNDEF\n for feat in feature_list:\n features[feat] = feat_filter(feat, these_feature)\n return features",
"def _parseFeature(self, name, value=None):\n supported = self._parse([(name, value)])\n return supported.getFeature(name)",
"def apply_filter(input_file, output_file, features):\n lines = input_file.readlines()\n lines = list(map(clean, lines))\n\n for i in range(0, len(lines)):\n line = lines[i]\n feat = extract(line[\"features\"], features)\n output_line = line[\"rank\"] + \" \" + line[\"qid\"]\n for key in features:\n output_line += \" \" + str(key) + \":\" + str(feat[key])\n output_line += \" #\" + line[\"comment\"]\n output_file.write(output_line)",
"def extractFeatures(self, datum):\n abstract",
"def feature_extraction(_data):\n # Find the digits in the given string Example - data='18-20' digits = '1820'\n digits = str(''.join(c for c in _data if c.isdigit()))\n # calculate the length of the string\n len_digits = len(digits)\n # splitting digits in to values example - digits = '1820' ages = [18, 20]\n ages = [int(digits[i:i + 2]) for i in range(0, len_digits, 2)]\n # checking for special character in the given data\n special_character = '.+-<>?'\n spl_char = ''.join([c for c in list(special_character) if c in _data])\n # handling decimal age data\n if len_digits == 3:\n spl_char = '.'\n age = \"\".join([str(ages[0]), '.', str(ages[1])])\n # normalizing\n age = int(float(age) - 0.5)\n ages = [age]\n # Finding the maximum, minimum, average age values\n max_age = 0\n min_age = 0\n mean_age = 0\n if len(ages):\n max_age = max(ages)\n min_age = min(ages)\n if len(ages) == 2:\n mean_age = int((max_age + min_age) / 2)\n else:\n mean_age = max_age\n # specially added for 18 years cases\n only_18 = 0\n is_y = 0\n if ages == [18]:\n only_18 = 1\n if 'y' in _data or 'Y' in _data:\n is_y = 1\n under_18 = 0\n if 1 < max_age < 18:\n under_18 = 1\n above_65 = 0\n if mean_age >= 65:\n above_65 = 1\n # verifying whether digit is found in the given string or not.\n # Example - data='18-20' digits_found=True data='????' digits_found=False\n digits_found = 1\n if len_digits == 1:\n digits_found = 1\n max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = 0, 0, 0, 0, 0, 0, 0\n elif len_digits == 0:\n digits_found, max_age, min_age, mean_age, only_18, is_y, above_65, under_18 = -1, -1, -1, -1, -1, -1, -1, -1\n \n feature = {\n 'ages': tuple(ages),\n 'len(ages)': len(ages),\n 'spl_chr': spl_char,\n 'is_digit': digits_found,\n 'max_age': max_age,\n 'mean_age': mean_age,\n 'only_18': only_18,\n 'is_y': is_y,\n 'above_65': above_65,\n 'under_18': under_18\n }\n\n return feature",
"def get_feature_permission(request, feature, operation=None):\n feature_info = FEATURE_MAP.get(feature)\n if not feature_info:\n raise ValueError(\"The requested feature '%(feature)s' is unknown. \"\n \"Please make sure to specify a feature defined \"\n \"in FEATURE_MAP.\")\n\n # Check dashboard settings\n feature_config = feature_info.get('config')\n if feature_config:\n if not setting_utils.get_dict_config('OPENSTACK_NEUTRON_NETWORK',\n feature_config['name']):\n return False\n\n # Check policy\n feature_policies = feature_info.get('policies')\n if feature_policies:\n policy_name = feature_policies.get(operation)\n if not policy_name:\n raise ValueError(\"The 'operation' parameter for \"\n \"get_feature_permission '%(feature)s' \"\n \"is invalid. It should be one of %(allowed)s\"\n % {'feature': feature,\n 'allowed': ' '.join(feature_policies.keys())})\n role = (('network', policy_name),)\n if not policy.check(role, request):\n return False\n\n # Check if a required extension is enabled\n feature_extension = feature_info.get('extension')\n if feature_extension:\n try:\n return is_extension_supported(request, feature_extension)\n except Exception:\n LOG.info(\"Failed to check Neutron '%s' extension is not supported\",\n feature_extension)\n return False\n\n # If all checks are passed, now a given feature is allowed.\n return True",
"def extract_features(input):\n # TODO: sort before selecting the last item in list\n \n disease = gender = bmi = age = step = activity_week = activity_day \\\n = sleep = heartbeat = bp_diastolic = bp_systolic \\\n = weight = height = None\n \n if \"userinfo\" in input:\n \n userinfo = input[\"userinfo\"]\n \n if \"age\" in userinfo:\n age = userinfo[\"age\"]\n if \"height\" in userinfo:\n height = userinfo[\"height\"]\n if \"gender\" in userinfo:\n gender = userinfo[\"gender\"]\n if \"weight\" in userinfo:\n if \"weight\" in userinfo:\n weights = userinfo[\"weight\"]\n prev_date = None\n for item in weights:\n if \"date\" in item and \"value\" in item:\n if prev_date is None or item[\"date\"] > prev_date:\n weight = item[\"value\"]\n prev_date = item[\"date\"]\n \n if height and weight: bmi = height / weight\n \n disease = []\n if \"hypertension\" in userinfo and userinfo[\"hypertension\"]: \n disease = disease + [\"hypertension\"]\n if \"diabetes\" in userinfo and userinfo[\"diabetes\"]:\n disease = disease + [\"diabetes\"]\n if \"insomnia\" in userinfo and userinfo[\"insomnia\"]:\n disease = disease + [\"insomnia\"]\n if \"cardio\" in userinfo and userinfo[\"cardio\"]:\n disease = disease + [\"cardio\"]\n \n if \"bloodPressures\" in input:\n bloodPressures = input[\"bloodPressures\"]\n prev_date = None\n for item in bloodPressures:\n if \"date\" in item and \"systolic\" in item and \"diastolic\" in item:\n if prev_date is None or _datetime(item[\"date\"]) > prev_date:\n bp_systolic = item[\"systolic\"]\n bp_diastolic = item[\"diastolic\"]\n prev_date = _datetime(item[\"date\"])\n\n\n if \"heartBeats\" in input:\n heartBeats = input[\"heartBeats\"]\n prev_date = None\n for item in heartBeats:\n if \"date\" in item and \"count\" in item:\n if prev_date is None or _datetime(item[\"date\"]) > prev_date:\n heartbeat = item[\"count\"]\n prev_date = _datetime(item[\"date\"])\n \n if \"sleep\" in input:\n sleeps = input[\"sleep\"]\n prev_date = None\n for item in sleeps:\n if \"date\" in item and \"minutesAsleep\" in item:\n if prev_date is None or _datetime(item[\"date\"]) > prev_date:\n sleep = item[\"minutesAsleep\"]\n prev_date = _datetime(item[\"date\"])\n \n if \"activities\" in input:\n activities = input[\"activities\"]\n prev_date = None\n for item in activities:\n if \"date\" in item and \"duration\" in item:\n if prev_date is None or _datetime(item[\"date\"]) > prev_date:\n activity_day = item[\"duration\"]\n prev_date = _datetime(item[\"date\"])\n \n # filter the week\n# week_ago = datetime.today() - datetime.timedelta(7)\n# activity_week = sum([activity[\"duration\"] for activity in \\\n# input[\"activities\"] if _datetime(activity[\"date\"]) > week_ago])\n \n features = {\n \"bp systolic\": bp_systolic,\n \"bp diastolic\": bp_diastolic,\n \"heartbeat\": heartbeat,\n \"sleep\": sleep,\n \"activity day\": activity_day,\n \"activity week\": activity_week,\n \"step\": step,\n \"age\": age,\n \"bmi\": bmi,\n \"gender\": gender,\n \"disease\": disease\n }\n return features",
"def _entry_features_annotation_is_valid(entry: _LexiconEntry) -> None:\n features = _features_of(entry)\n\n if not (features == \"~\" or _FEATURES_REGEX.fullmatch(features)):\n raise InvalidLexiconEntryError(\n \"Entry features annotation is invalid. Features need to be annotated\"\n \" as '+[Category_1=Value_x]...+[Category_n=Value_y].\")",
"def extract_features_from_mme(patient: MmeRequest) -> List:\n features = [clean_feature_ids(feature.id) for feature in patient.features if feature.observed == Observed.yes]\n diseases = [clean_feature_ids(disease.id) for disease in patient.disorders]\n genes = [clean_feature_ids(genomic_feature.gene.id) for genomic_feature in patient.genomicFeatures]\n if features:\n return features\n elif diseases:\n return diseases\n elif genes:\n return genes",
"def getFeatureInfo(self,feature):\n geomRef = feature.GetGeometryRef()\n nameIndex = feature.GetFieldIndex(\"OBJNAM\")\n featureName = \"NO OBJNAM\"\n if(nameIndex != -1 and feature.GetFieldAsString(nameIndex) != \"\" ):\n featureName = feature.GetFieldAsString(nameIndex)\n featureInfo = (featureName, feature.GetFID(), geomRef.GetX(), geomRef.GetY())\n # rospy.loginfo(featureInfo)\n return featureInfo"
] | [
"0.6403832",
"0.6113277",
"0.60985994",
"0.5698372",
"0.5627288",
"0.56187725",
"0.5485077",
"0.5485077",
"0.5412043",
"0.53983635",
"0.5309526",
"0.525838",
"0.5250037",
"0.52406436",
"0.52171934",
"0.52121466",
"0.51926285",
"0.51715875",
"0.5127526",
"0.51179856",
"0.50818855",
"0.5032366",
"0.5002166",
"0.49827072",
"0.4955054",
"0.49465525",
"0.49370465",
"0.4919517",
"0.49190068",
"0.48911187"
] | 0.6419726 | 0 |
Get permitted attributes for a feature info result layer. | def permitted_info_attributes(self, info_layer_name, permission):
# get WMS layer name for info result layer
wms_layer_name = permission.get('feature_info_aliases', {}) \
.get(info_layer_name, info_layer_name)
# return permitted attributes for layer
return permission['layers'].get(wms_layer_name, {}) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_attributes(self):\n\n endpoint = self._get_api_endpoint() + '/attributes'\n results = self.tq.get(endpoint, withp='attribute')\n if 'data' not in results:\n return {}\n\n return results['data']\n # tr = {}\n # for attribute in results['data']:\n # tr[attribute['attribute']['name']] = attribute['value']\n # return tr",
"def getAttributes(self):\n pass",
"def attributes(self) -> pulumi.Output[Optional[Mapping[str, Any]]]:\n return pulumi.get(self, \"attributes\")",
"def attrib(self) -> Any:\n return self.attributes",
"def capability_attributes(self) -> Mapping[str, Any] | None:\n if state_class := self.state_class:\n return {ATTR_STATE_CLASS: state_class}\n\n if options := self.options:\n return {ATTR_OPTIONS: options}\n\n return None",
"def getAttributes(self):\n return self.attributes",
"def getAttributes(self):\n return self.attributes",
"def attributes(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"attributes\")",
"def attributes(self) -> Optional[pulumi.Input[Mapping[str, Any]]]:\n return pulumi.get(self, \"attributes\")",
"def get_attributes(self):\n return self.attributes",
"def get_attributes(self) -> Dict[str, str]:\n pass",
"def wms_getfeatureinfo_plain(self, feature_info, permission):\n \"\"\"\n GetFeatureInfo results\n\n Layer 'Grundstuecke'\n Feature 1\n t_id = '1234'\n nbident = 'SO0123456789'\n nummer = '1234'\n ...\n \"\"\"\n if feature_info.startswith('GetFeatureInfo'):\n lines = []\n\n layer_pattern = re.compile(\"^Layer '(.+)'$\")\n attr_pattern = re.compile(\"^(.+) = .+$\")\n permitted_attributes = {}\n\n # filter feature attributes by permission\n for line in feature_info.splitlines():\n m = attr_pattern.match(line)\n if m is not None:\n # attribute line\n # check if layer attribute is permitted\n attr = m.group(1)\n if attr not in permitted_attributes:\n # skip not permitted attribute\n continue\n else:\n m = layer_pattern.match(line)\n if m is not None:\n # layer line\n # get permitted attributes for layer\n current_layer = m.group(1)\n permitted_attributes = self.permitted_info_attributes(\n current_layer, permission\n )\n\n # keep line\n lines.append(line)\n\n # join filtered lines\n feature_info = '\\n'.join(lines)\n\n return feature_info",
"def attributes(self) -> Optional[Mapping[str, Sequence[str]]]:\n return pulumi.get(self, \"attributes\")",
"def attributes(self) -> pulumi.Input[Sequence[pulumi.Input[str]]]:\n return pulumi.get(self, \"attributes\")",
"def get_attributes(cls):\r\n return []",
"def describe_account_attributes():\n pass",
"def attributes(self):\n return self.problem.attributes",
"def _get_feature_attributes(self) -> dict:\n srs = pd.Series(dir(self))\n srs = srs[\n (~srs.str.startswith('_'))\n & (~srs.str.contains('as_'))\n & (srs != 'putin')\n & (srs != 'takeout')\n & (srs != 'intermediate_accesses')\n & (srs != 'geometry')\n & (srs != 'has_a_point')\n & (srs != 'centroid')\n ]\n srs = srs[srs.apply(lambda p: not hasattr(getattr(self, p), '__call__'))]\n return {key: getattr(self, key) for key in srs}",
"def get_attributes(cls):\n return cls._attributes",
"def get_attribute_list(self):\n return self.dp.get_attribute_list()",
"def GetAttributes(self):\r\n\r\n return self._attr",
"def resource_attributes(self) -> Optional[pulumi.Input['ResourceAttributesArgs']]:\n return pulumi.get(self, \"resource_attributes\")",
"def resource_attributes(self) -> Optional[pulumi.Input['ResourceAttributesArgs']]:\n return pulumi.get(self, \"resource_attributes\")",
"def get_info(self):\n self.exists = self.check_subscr()\n return self.attrs",
"def getAllAttributes(self, limit=None):\n return self.getAttributeRange(limit=limit)",
"def test_get_attributes(self):\n pass",
"def get_attributes(cls, entity):\n return entity.category.facts.all()",
"def __get_capability(self):\n requests = self.__get_capability_request()\n exception = self.__get_capability_exception()\n layers = self.__get_capability_layer()\n \n capability = { \"requests\": requests,\n \"exception\" : exception,\n \"layers\" : layers}\n return capability",
"def attributeInfo(*args, allAttributes: bool=True, bool: bool=True, enumerated: bool=True,\n hidden: bool=True, inherited: bool=True, internal: bool=True, leaf: bool=True,\n logicalAnd: bool=True, multi: bool=True, short: bool=True, userInterface:\n bool=True, writable: bool=True, type: AnyStr=\"\", **kwargs)->List[AnyStr]:\n pass",
"def get_attributes(self):\n return dict(self.attributes) # return the attributes"
] | [
"0.63910294",
"0.6372006",
"0.62900674",
"0.6062062",
"0.6054912",
"0.6051476",
"0.6051476",
"0.6046516",
"0.6046516",
"0.6044955",
"0.603735",
"0.58969605",
"0.57916987",
"0.5715229",
"0.5692241",
"0.56833255",
"0.5683263",
"0.5661067",
"0.5644788",
"0.5635615",
"0.563487",
"0.5609475",
"0.5609475",
"0.5576654",
"0.5531759",
"0.5525466",
"0.5512204",
"0.55013853",
"0.5501229",
"0.5499718"
] | 0.76015323 | 0 |
will create a dictionary with MIX of activation functions as keys(), and as values() a list of the paths where their result.json file are. | def create_path_dict(save_path):
act_fn = [sorted(['relu', 'antirelu', 'identity', 'tanh', 'sigmoid']),
sorted(['relu', 'antirelu', 'identity', 'sigmoid']),
sorted(['relu', 'antirelu', 'identity', 'tanh']),
sorted(['relu', 'antirelu', 'sigmoid', 'tanh']),
sorted(['relu', 'identity', 'sigmoid', 'tanh']),
sorted(['antirelu', 'identity', 'sigmoid', 'tanh']),
['relu'],
['sigmoid'],
['tanh'],
['antirelu'],
['None']]
# ['identity']]
act_fn = ['_'.join(act) for act in act_fn]
path_dict = defaultdict(list)
for (filepath, dirname, filename) in os.walk(save_path):
if 'results.json' in filename:
for act in act_fn:
temp = filepath.split('/')
if act == temp[-1] or act == temp[-2]:
path_dict[act].append(filepath)
print(path_dict)
return path_dict | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _build_functions_list():\n return {\"ec2-sg\": _build_ec2_mapping_from_sg,\n \"ec2-resources\": _build_ec2_mapping_from_resources,\n \"rds-sg\": _build_rds_mapping_from_sg,\n \"rds-resources\": _build_rds_mapping_from_resources,\n \"elbv2-sg\": _build_elbv2_mapping_from_sg,\n \"elbv2-resources\": _build_elbv2_mapping_from_resources}",
"def get_outputs(self, input_dict: Dict) -> Dict[str, np.ndarray]:\n activation_values = self.session.run(self.activation_names, input_dict)\n return dict(zip(self.sanitized_activation_names, activation_values))",
"def outputActivationFunctions(self):\n\n\t\treturn self._outputActivationFunctions",
"def _get_output_dictionary(self):\n\n return_dictionary = {}\n\n for output_path in self.provided_outputs:\n return_dictionary[output_path.full_path] = self.get_value(output_path)\n\n return return_dictionary",
"def apply(self, func):\n f = lambda path: func(GrowthSimulation.load(path))\n return {i: f(p) for i, p in self.simulation_paths.items()}",
"def collect_data(base='.'):\n files = glob(join(base, 'accuracy_*.json'))\n data = {}\n for file in files:\n fields = basename(file).split('_')\n fields = fields[1:-2]\n accuracy = load(open(file))\n weights = load(open(file.replace('accuracy', 'weights')))\n for model in accuracy.keys():\n # add both accuracy and weights\n key = tuple(fields + [model])\n value = accuracy[model] + weights[model]\n data[key] = value\n return data",
"def make_func_dict(self):\n\t\tindex = 0\n\t\twhile index < len(self.tokens):\n\t\t\tif self.tokens[index] == '\\\\': #Lambda\n\t\t\t\t#Every lambda looks like this:\n\t\t\t\t#(\\ (param1:type1, ...) : return_type\n\t\t\t\t# expression)\n\n\t\t\t\t#That expression can then be used as a function\n\t\t\t\t#i.e. ( (\\(...):type (...)) param1 param2 ...)\n\t\t\t\t# calls the lambda\n\n\t\t\t\t#Parentheses around entire function\n\t\t\t\ti = self.tokens.match_paren(index - 1)\n\n\t\t\t\t#Create unique function name\n\t\t\t\tfunc_name = 'f%d' % self.func_count\n\n\t\t\t\t# function body\n\t\t\t\tself.func_dict[func_name] = self.tokens[index-1:i+1].get_joined()\n\t\t\t\tself.func_count += 1\n\n\t\t\tindex += 1\n\n\t\treturn self.func_dict",
"def assign_functions(self):\n # get function declarations from json string\n self.functions = self.definitions.get(\"functions\", [])\n\n # generate function declaration in header file\n header = cls.header_from_function_name_and_args(\n _func[\"name\"], _func[\"args\"]\n )\n\n _functions = OrderedDict()\n for func in self.functions:\n _name = func[\"name\"]\n _type = func[\"type\"]\n _args = func[\"args\"]\n _deriv = self.get_derivatives(func.get(\"deriv\", []))\n _functions[_name] = {\n \"name\": _name,\n \"type\": _type,\n \"args\": _args,\n \"deriv\": _deriv,\n }\n self._functions = _functions",
"def _keys_for_activation(language, version):\n language = language.upper()\n version = version.upper().replace(\".\", \"_\")\n return ActivationKeys(\"_POLYSQUARE_ACTIVATED_{}_{}\".format(language,\n version),\n \"_POLYSQUARE_DEACTIVATED_%s_%s_{key}\" % (language,\n version),\n \"_POLYSQUARE_INSERTED_%s_%s_{key}\" % (language,\n version))",
"def get_funcs(self,var):\n fname = (var+\".p\") \n pickle_path = os.path.join(CWD_PATH,self.join_path,self.pick_path,fname)\n [coef,powers,intercept,mins,maxes] = pickle.load(open(pickle_path,'rb'))\n \n # The 3 function variables you need to-recreate this model & the min & max to set this in the environment.\n out = {'coef': coef, 'powers':powers,'intercept':intercept}\n return out, mins, maxes",
"def data():\n print (\"&\")\n res = {}\n\t\n # Load Data\n with open(DATA_PATH_TRAIN, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t \t\n with open(DATA_PATH_TEST, 'rb') as f:\n data = pickle.load(f)\n\t\t\n for d in data:\n for j in range(len(d)):\n if not d[j][\"addinfo\"][\"path\"] in res:\n res[d[j][\"addinfo\"][\"path\"]] = {}\n d[j][\"environment\"][\"text\"] = d[j][\"addinfo\"][\"text\"]\n res[d[j][\"addinfo\"][\"path\"]][d[j][\"addinfo\"][\"line\"]] = d[j][\"environment\"]\n\t\t\t\n with open('tasks/env/data/data.json', 'w') as outfile:\n json.dump(res, outfile)",
"def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')",
"def _create_model_out_dictkeys():\r\n model_names = []\r\n result_keys = []\r\n for model_name in model_zoo._ModelZooUrls.CONFIG_PATH_TO_URL_SUFFIX.keys():\r\n try:\r\n print(model_name, \":\")\r\n select_model(model_name)\r\n result = get_features_by_image_path(\"./sample.jpg\")\r\n model_names.append(model_name)\r\n result_keys.append(list(result.keys()))\r\n except RuntimeError as t:\r\n print(t)\r\n\r\n pd.DataFrame(list(zip(model_names, result_keys))).to_csv(\"d2_model_out_dictkeys.csv\")",
"def get_features(self):\n features = {}\n for i in self.binaries:\n features[i] = self.binaries[i].features\n return features",
"def acquisitions(self):\r\n\r\n acquisitions_dict = {}\r\n for key in self.files:\r\n if key != 'OR_KA08_2_2': \r\n print(self.files[key])\r\n matlab_file = scipy.io.loadmat(self.files[key])\r\n if len(self.files[key])>41:\r\n vibration_data=matlab_file[self.files[key][19:38]]['Y'][0][0][0][6][2]\r\n else:\r\n vibration_data=matlab_file[self.files[key][19:37]]['Y'][0][0][0][6][2]\r\n\r\n acquisitions_dict[key] = vibration_data[0]\r\n\r\n acquisitions_data = {}\r\n acquisitions_data['conditions'] = self.conditions\r\n acquisitions_data['dirdest'] = self.dirdest\r\n acquisitions_data['acquisitions'] = acquisitions_dict\r\n\r\n return acquisitions_data",
"def _build_lut(self):\n lut = {}\n for k, v in self.paths.items(): # k:v, <zipfile#>:<path of the extracted archive>\n for _, _, filenames in tf.io.gfile.walk(v):\n for fileName in filenames:\n lut[fileName] = os.path.join(v, 'images', fileName)\n return lut",
"def initialiseActivationFunctions(self):\n\n\t\t###uniform for output units\n\t\tif self._outputActivationFunctions == None or self._outputActivationDerivatives == None:\t\n\t\n\t\t\tself._outputActivationFunctions = []\n\t\t\tself._outputActivationDerivatives = []\n\n\t\t\tactFunc = lambda x : x\n\t\t\tdActFunc = lambda x : 1.0\n\t\n\t\t\tfor i in range(self.nOutputs):\n\t\t\t\t\n\t\t\t\tself._outputActivationFunctions.append(actFunc)\n\t\t\t\tself._outputActivationDerivatives.append(dActFunc)\n\n\t\t\tself._outputActivationFunctions = np.array(self._outputActivationFunctions)\n\t\t\tself._outputActivationDerivatives = np.array(self._outputActivationDerivatives)\n\t\t\t\n\n\t\tif self._hiddenActivationFunctions == None or self._hiddenActivationDerivatives == None:\n\n\t\t\tself._hiddenActivationFunctions = []\n\t\t\tself._hiddenActivationDerivatives = []\n\n\t\t\tfor i in range(self.nHiddenLayers):\n\n\t\t\t\tfTemp = []\n\t\t\t\tdTemp = []\n\t\t\t\t\n\t\t\t\t#Make the default sigmoid the one suggested in LeCun et al 1998\n\t\t\t\ttwist = 0.01\n\t\t\t\ta = 1.7159\n\t\t\t\tc = 2.0/3.0\n\n\t\t\t\tactFunc = lambda x : a*np.tanh(c*x) + twist*x\n\t\t\t\tdActFunc = lambda x : twist + a*c*(1.0 - (np.tanh(c*x)**2.0))\n\n#\t\t\t\tactFunc = lambda x : np.tanh(x)\n#\t\t\t\tdActFunc = lambda x : 1.0 - np.tanh(x)**2.0\n\n\t\t\t\t#plus all of the bias\n\t\t\t\tfor j in range(self.nUnitsPerLayer+1):\n\t\t\t\t\t\n\t\t\t\t\tfTemp.append(actFunc)\n\t\t\t\t\tdTemp.append(dActFunc)\n\t\t\t\t\n\t\t\t\tself._hiddenActivationFunctions.append(fTemp)\n\t\t\t\tself._hiddenActivationDerivatives.append(dTemp)\n\t\t\t\n\t\t\tself._hiddenActivationFunctions = np.array(self._hiddenActivationFunctions)\n\t\t\tself._hiddenActivationDerivatives = np.array(self._hiddenActivationDerivatives)",
"def get_saveables(self):\n\n saveables = dict()\n saveables['encoder'] = self.encoder\n saveables['decoder'] = self.decoder\n saveables['optim'] = self.optim\n return saveables",
"def generate(self) -> Dict[str, Any]:\n raise NotImplementedError",
"def get_activation_function(func_name):\n return {\n 'linear': lambda x: x,\n 'relu': lambda x: x * (x > 0),\n 'elu': lambda x: x * (x >= 0) + (T.exp(x) - 1) * (x < 0),\n 'softmax': T.nnet.softmax,\n 'tanh': T.tanh,\n 'log_softmax': log_softmax,\n 'sigmoid': T.nnet.sigmoid\n }[func_name]",
"def load_encoders():\n\n encoders = {}\n\n # Pclass\n pclass_encoder = LabelBinarizer()\n\n with open(os.path.join('encoders', 'pclass_encoder.json'),\n 'r', encoding='utf8', errors='ignore') as infile:\n pclass_encoder.classes_ = json.load(infile)\n encoders['pclass_encoder'] = pclass_encoder\n\n # Sex\n sex_encoder = LabelBinarizer()\n\n with open(os.path.join('encoders', 'sex_encoder.json'),\n 'r', encoding='utf8', errors='ignore') as infile:\n sex_encoder.classes_ = json.load(infile)\n encoders['sex_encoder'] = sex_encoder\n\n # Age\n age_encoder = LabelBinarizer()\n age_encoder.classes_ = list(range(10))\n\n with open(os.path.join('encoders', 'age_bins.json'),\n 'r', encoding='utf8', errors='ignore') as infile:\n age_bins = json.load(infile)\n encoders['age_bins'] = age_bins\n encoders['age_encoder'] = age_encoder\n\n # Siblings/Spouses Aboard\n siblings_spouses_aboard_encoder = LabelBinarizer()\n\n with open(os.path.join('encoders', 'siblings_spouses_aboard_encoder.json'),\n 'r', encoding='utf8', errors='ignore') as infile:\n siblings_spouses_aboard_encoder.classes_ = json.load(infile)\n encoders['siblings_spouses_aboard_encoder'] = siblings_spouses_aboard_encoder\n\n # Parents/Children Aboard\n parents_children_aboard_encoder = LabelBinarizer()\n\n with open(os.path.join('encoders', 'parents_children_aboard_encoder.json'),\n 'r', encoding='utf8', errors='ignore') as infile:\n parents_children_aboard_encoder.classes_ = json.load(infile)\n encoders['parents_children_aboard_encoder'] = parents_children_aboard_encoder\n\n # Fare\n fare_encoder = LabelBinarizer()\n fare_encoder.classes_ = list(range(10))\n\n with open(os.path.join('encoders', 'fare_bins.json'),\n 'r', encoding='utf8', errors='ignore') as infile:\n fare_bins = json.load(infile)\n encoders['fare_bins'] = fare_bins\n encoders['fare_encoder'] = fare_encoder\n\n # Target Field: Survived\n survived_encoder = LabelEncoder()\n\n with open(os.path.join('encoders', 'survived_encoder.json'),\n 'r', encoding='utf8', errors='ignore') as infile:\n survived_encoder.classes_ = np.array(json.load(infile))\n encoders['survived_encoder'] = survived_encoder\n\n return encoders",
"def outputs(self):\n return {\"path_to_mapping_json\": File_IO(\n self.node.outputs[0])}",
"def to_json(self) -> Dict:\n return {\"function\": self.function.__name__, \"kwargs\": self.kwargs_to_json()}",
"def _make_modules(is_train):\n return {\n 'conversion': functools.partial(\n conversion, is_train=is_train, is_extrapolation=False),\n 'time': functools.partial(time, is_train=is_train),\n }",
"def generate_tools_list():\n out = {}\n\n # Set BETYDB_LOCAL_CACHE_FOLDER = /tools directory\n print(\"Dumping BETY experiments file into \"+os.environ.get('BETYDB_LOCAL_CACHE_FOLDER', \"/home/extractor/\"))\n #dump_experiments()\n\n toollist = [\n \"bin2tif.py\",\n \"nrmac.py\",\n \"canopyCover.py\",\n \"fieldmosaic.py\",\n \"submit_clowder.py\",\n \"submit_bety.py\",\n \"submit_geo.py\",\n \"bety_experiments.json\"\n ]\n\n print(\"Including /tools directory files\")\n for t in toollist:\n #tool_daxf = create_daxf(t, os.path.join(\"tests/workflow/workflow-pilot/workflow_terra/tools\", t))\n tool_daxf = create_daxf(t, os.path.join(os.getcwd(), \"tools\", t))\n # Use filename as dict key in case we need it as input later\n out[t] = tool_daxf\n\n sensor_metadata_list = [\n \"ua-mac/sensor-metadata/sensors/stereo/sensor_fixed_metadata.json\",\n \"ua-mac/sensor-metadata/sensors/flirIrCamera/sensor_fixed_metadata.json\",\n \"ua-mac/sensor-metadata/sensors/scanner3D/sensor_fixed_metadata.json\",\n \"ua-mac/sensor-metadata/sensors/VNIR/sensor_fixed_metadata.json\",\n \"ua-mac/sensor-metadata/sensors/scanalyzer/sensor_fixed_metadata.json\"\n ]\n print(\"Including sensor fixed metadata\")\n for s in sensor_metadata_list:\n sensor_metadata_daxf = create_daxf(s, os.path.join(sites_dir, s))\n # Use '$SENSOR_fixed' as dict key in case we need it as input later\n out[s.split(\"/\")[-2]+\"_fixed\"] = sensor_metadata_daxf\n\n return out",
"def get_func_lookup():\n return {\n \"randomstr\": randomstr,\n \"random\": random,\n \"sha256\": sha256,\n \"ed25519\": ed25519_private_key,\n \"rsa\": rsa_private_key,\n \"rsapublic\": rsa_public_key,\n \"publickey\": public_key,\n \"reveal\": reveal,\n \"loweralphanum\": loweralphanum,\n \"basicauth\": basicauth,\n }",
"def _get_result_paths(self,data):\n return {'output':ResultPath(Path=data['output_path'],IsWritten=True)}",
"def save_conv_output(activations, name):\n def get_activation(m, i, o):\n activations[name] = F.relu(o).data.cpu().numpy()\n\n return get_activation",
"def _parse_additional_json(dir_name: Path) -> Dict[str, Any]:\n additional_json = {}\n for filename in dir_name.glob(\"*.json*\"):\n key = filename.name.split(\".\")[0]\n if key not in (\"custodian\", \"transformations\"):\n additional_json[key] = loadfn(filename, cls=None)\n return additional_json",
"def get_file_operations() -> dict:\n\n from FileWrangler.fileops.CompletelyReplace import CompletelyReplaceUIOperation\n from FileWrangler.fileops.Separator import SeparatorUIOperation\n from FileWrangler.fileops.PatternFinding import PatternExtractingUIOperation\n from FileWrangler.fileops.PathComponents import PathComponentsUIOperation\n operations = [\n CompletelyReplaceUIOperation(),\n SeparatorUIOperation(),\n PatternExtractingUIOperation(),\n PathComponentsUIOperation()\n ]\n return {x.name: x for x in operations}"
] | [
"0.5794404",
"0.57442963",
"0.5732238",
"0.566426",
"0.5630192",
"0.5596989",
"0.55111015",
"0.5485543",
"0.5448509",
"0.5376371",
"0.5322838",
"0.5299788",
"0.52848226",
"0.5236001",
"0.5222747",
"0.5220277",
"0.5215815",
"0.52142113",
"0.52097386",
"0.520471",
"0.51961166",
"0.51858896",
"0.51846987",
"0.5179289",
"0.5173727",
"0.51670367",
"0.51432735",
"0.5139698",
"0.5125148",
"0.51245207"
] | 0.6205458 | 0 |
Minimizes window to tray (statusicon) | def minimize_to_tray(self, widget, event, data=None):
if event.changed_mask & gtk.gdk.WINDOW_STATE_ICONIFIED:
if event.new_window_state & gtk.gdk.WINDOW_STATE_ICONIFIED:
logging.debug("minimize to tray")
self.window_state = self.check_window_state()
self.statusicon.set_visible(True)
self.window.hide_all() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def set_minimize_to_tray(self):\r\n\t\tself.statusicon = gtk.StatusIcon()\r\n\t\ticon_path = os.path.realpath(\".\" + \"\\\\icon.png\")\r\n\t\tself.statusicon = load_status_icon(icon_path, 128, 128, gtk.STOCK_GOTO_TOP)\r\n\t\tself.statusicon.set_tooltip(\"Claimtracker\")\r\n\t\tself.statusicon.connect(\"activate\", self.return_from_tray)\r\n\t\tself.window.connect(\"window-state-event\", self.minimize_to_tray)\r\n\t\tself.statusicon.set_visible(False)",
"def return_from_tray(self, event):\r\n\t\tlogging.debug(\"return from tray\")\r\n\t\tif self.window_state: # Was the window previously maximized?\r\n\t\t\tself.window.maximize()\r\n\t\tself.window.show_all()\r\n\t\tself.window.present()\r\n\t\tself.statusicon.set_visible(False)",
"def on_closing():\n if not app.is_minimize_to_system_tray.get():\n app.save_setting()\n app.destroy()\n else:\n app.withdraw()\n plus_image = os.path.join(\"data\", \"plus.gif\")\n image = Image.open(plus_image)\n menus = menu(item(\"Quit\", quit_window), item(\"Show\", show_window, default=True))\n icon = pystray.Icon(\"name\", image, \"My System Tray Icon\", menus)\n icon.run()",
"def minimize_app(appname,ui):\r\n ui=ui\r\n ui.doDefault_on_obj('Minimize', False, 'button') \r\n time.sleep(WAIT)",
"def on_window1_window_state_event(self, widget, event, *user_params):\n\t\tif not self.settings[\"min_icon\"]: return\n\t\tif (event.changed_mask == gtk.gdk.WINDOW_STATE_ICONIFIED):\t\t\t\t\t\t\t# minimize button clicked\n\t\t\tif ( (event.new_window_state == gtk.gdk.WINDOW_STATE_ICONIFIED) or\n\t\t\t (event.new_window_state == gtk.gdk.WINDOW_STATE_ICONIFIED | gtk.gdk.WINDOW_STATE_MAXIMIZED) ):\t# going to iconify\n\t\t\t\t#self.window1.iconify()\t\t\t# for smooth change with compiz\n\t\t\t\t#while gtk.events_pending():\r\n\t\t\t\t#\tgtk.main_iteration()\r\n\t\t\t\tself.stateico.set_visible(True)\n\t\t\t\tself.window1.set_property('visible', False)",
"def onMinimize(self, event):\n # if self.IsIconized():\n # self.Hide()\n self.Hide()",
"def activate_statusbar_icon_close():\n pass",
"def activate_statusbar_icon_mode():\n pass",
"def onMinimize(self, eventDict = None):\n self.mainWindow.minimize()",
"def show_window(icon, item):\n icon.stop()\n app.after(0, app.deiconify())",
"def start_tray_app(speed_test):\n app = QtWidgets.QApplication(sys.argv)\n app.setQuitOnLastWindowClosed(False)\n app.setApplicationName(\"Internet Speed Test\")\n w = QtWidgets.QWidget()\n tray_icon = SystemTrayIcon(QtGui.QIcon(r'icon.png'), w, speed_test)\n tray_icon.show()\n sys.exit(app.exec_())",
"def create_tray_icon(self):\n tray_icon = gtk.StatusIcon()\n tray_icon.set_from_file(AFM_LOGO_PATH)\n tray_icon.connect('popup-menu', self.popup_menu)\n tray_icon.set_tooltip('Audio Failure Monitor')\n tray_icon.set_visible(True)\n return tray_icon",
"def trayagent(forwho,trayclickedfunction):\r\n\r\n tray = QSystemTrayIcon(forwho.windowIcon(), forwho)\r\n forwho.connect(tray,SIGNAL(\"activated(QSystemTrayIcon::ActivationReason)\"),trayclickedfunction)\r\n tray.show()\r\n return tray",
"def ev_windowminimized(self, event: WindowEvent) -> None:",
"def open_status_tray(cr):\r\n ui = ui_utils.UI_Handler()\r\n ui.start_ui_root(cr)\r\n logging.info(\"Opening status tray\")\r\n ui.doDefault_on_obj(STATUS_TRAY_REGEXP, True, role='button')\r\n list=ui.get_name_role_list()\r\n return ui",
"def on_action_5_triggered(self):\n # TODO: not implemented yet\n print('最小化')\n self.showMinimized()",
"def minimize(self):\n lib.SDL_MinimizeWindow(self._ptr)",
"def ev_windowminimized(self, event: tcod.event.WindowEvent) -> T | None:",
"def create_trayicon(self):\n self.trayicon = Gtk.StatusIcon()\n self.trayicon.connect('activate', self.on_trayicon_activate)\n self.trayicon.connect('popup-menu', self.on_trayicon_popup_menu)\n self.trayicon.set_tooltip_text(_(\"Hotot: Click to Active.\"))\n self.trayicon_pixbuf[0] = GdkPixbuf.Pixbuf.new_from_file(\n utils.get_ui_object('image/ic24_hotot_mono_light.svg'))\n self.trayicon_pixbuf[1] = GdkPixbuf.Pixbuf.new_from_file(\n utils.get_ui_object('image/ic24_hotot_mono_light_blink.svg'))\n self.trayicon.set_from_pixbuf(self.trayicon_pixbuf[0])\n self.trayicon.set_visible(True)",
"def set_visible(self):\n\t\tself.hide()\n\t\tself.__sys_tray_icon.setVisible(True)",
"def create_system_tray_icon(self):\n\t\tself.__sys_tray_icon = SystemTrayIcon()\n\t\tself.__sys_tray_icon.setVisible(False)\n\t\tself.__sys_tray_icon.show_app.connect(self.show)\n\t\tself.__sys_tray_icon.close_app.connect(self.exit_app)",
"def Minimize(self):\r\n \r\n return self.SetFlag(self.optionMinimized, True)",
"def iconify(self):\n if self.active:\n self.master.withdraw()\n self.active = False",
"def maximize_option():\n Width=MaxWidth\n Height=MaxHeight - WinTitle -WinBorder\n PosX=LeftPadding\n PosY=TopPadding\n move_active(PosX,PosY,Width,Height)\n raise_window(\":ACTIVE:\")",
"def show_desktop(dsk_session: WebDriver):\n dsk_session.find_element_by_class_name(\"TrayShowDesktopButtonWClass\").click()",
"def show_window_background():\n \n window = win32gui.FindWindow(MINECRAFT_CLASS_NAME, MINECRAFT_TITLE + MINECRAFT_VERSION)\n win32gui.SetForegroundWindow(window)\n win32gui.BringWindowToTop(window)",
"def maximize_app( appname,ui):\r\n ui=ui\r\n ui.doDefault_on_obj(appname, False, 'button')",
"def minimize(self):\n\t\tself.__window.minimize()\n\t\tself.update_minimization()\n\t\treturn",
"def alarm(self, event):\r\n\r\n # top left corner of top level window\r\n x1_coordinate, y1_coordinate = self.winfo_rootx(), self.winfo_rooty()\r\n\r\n # bottom right corner of top level window\r\n x2_coordinate = x1_coordinate + self.winfo_width()\r\n y2_coordinate = y1_coordinate + self.winfo_height()\r\n if not (x1_coordinate < event.x_root < x2_coordinate and\r\n y1_coordinate < event.y_root < y2_coordinate):\r\n self.attributes(\"-alpha\", 0.1)\r\n self.bell()\r\n self.after(100, lambda: self.attributes(\"-alpha\", 1))",
"def activated(self, icon):\n self.statusicon.set_from_stock(gtk.STOCK_PRINT)\n self.statusicon.set_tooltip(\"FolderWatch\")\n subprocess.call([self._command], shell=True)"
] | [
"0.84101045",
"0.69568986",
"0.67686915",
"0.6701629",
"0.6687722",
"0.6662098",
"0.66309386",
"0.6508112",
"0.6386676",
"0.6386318",
"0.63793576",
"0.6371748",
"0.63305694",
"0.62862784",
"0.6282958",
"0.6187243",
"0.61576545",
"0.61076534",
"0.6025659",
"0.60221696",
"0.59649545",
"0.59358513",
"0.5925895",
"0.5914274",
"0.5843751",
"0.5826082",
"0.579857",
"0.57938725",
"0.5782969",
"0.5745685"
] | 0.78925043 | 1 |
Check that bucket has been selected | def _check_queryable(self):
if not self._bucket:
raise Exception('Bucket has not been selected') | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def is_control_panel_bucket(name, bucket):\n is_valid = name.startswith(\"alpha-app-\")\n if name.startswith(\"dev-\"):\n # Ignore this since it's a bucket associated with the dev instance.\n is_valid = False\n elif not is_valid:\n try:\n tags = bucket.Tagging()\n tags.load()\n for obj in tags.tag_set:\n if (\n obj[\"Key\"] == \"buckettype\"\n and obj[\"Value\"] == \"datawarehouse\"\n ):\n is_valid = True\n break\n except ClientError as ex:\n if ex.response.get(\"Error\", {}).get(\"Code\") != \"NoSuchTagSet\":\n raise\n # It wasn't possible to get the bucket's tags.\n is_valid = False\n logger.info(f\"{is_valid} - Bucket {name} is associated with CP\")\n return is_valid",
"def has_bucket_access(self, bucket, user_id):\n msg = \"has_bucket_access not implemented\"\n raise NotImplementedError(msg)",
"def test_bucket_availability(self):\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(app.config['S3_PHOTO_BUCKET'])\n exists = True\n try:\n s3.meta.client.head_bucket(Bucket=app.config['S3_PHOTO_BUCKET'])\n self.assertEqual(exists, True)\n except botocore.exceptions.ClientError as e:\n # If a client error is thrown, then check that it was a 404 error.\n # If it was a 404 error, then the bucket does not exist.\n error_code = e.response['Error']['Code']\n if error_code == '404':\n exists = False\n self.assertEqual(exists, True, msg='Bucket is not exist!')",
"def exists(self):\r\n return bool(self.bucket.lookup(self.name))",
"def _bucket_exists(self):\n try:\n self.resource.meta.client.head_bucket(Bucket=self.bucketname)\n except botocore.exceptions.ClientError as error:\n # If a client error is thrown, then check that it was a 404 error.\n # If it was a 404 error, then the bucket does not exist.\n error_code = int(error.response['Error']['Code'])\n if error_code == 404:\n raise LookupError(\"Bucket '%s' does not exist\", self.bucketname)\n else:\n # maybe a permissions issue\n raise error\n return True",
"def HasSelection(self):\n sel = super(EditraBaseStc, self).GetSelection()\n return sel[0] != sel[1]",
"def requires_selection(self) -> bool:\n return True",
"def test_get_bucket(self):\n pass",
"def check_combobox_selection(self, value):\n if self.sender() == self.cmbDepReqAction:\n if value != 0: self.cmbDepInstState.setCurrentIndex(0)\n elif self.sender() == self.cmbDepInstState:\n if value != 0: self.cmbDepReqAction.setCurrentIndex(0)\n elif self.sender() == self.cmbPropType:\n if value == 1:\n self.inpPropVal.setText(\"\")\n self.inpPropDef.setText(\"\")\n self.datamapper_properties.addMapping(self.cmbPropDef, 6)\n self.datamapper_properties.removeMapping(self.inpPropDef)\n self.cmbPropMulti.setCurrentIndex(0)\n self.cmbPropEdit.setCurrentIndex(0)\n self.inpPropVal.setEnabled(False)\n self.inpPropDef.setEnabled(False)\n self.cmbPropMulti.setEnabled(False)\n self.cmbPropEdit.setEnabled(False)\n self.cmbPropDef.setEnabled(True)\n self.cmbPropDef.setCurrentIndex(0)\n else:\n self.datamapper_properties.addMapping(self.inpPropDef, 6)\n self.datamapper_properties.removeMapping(self.cmbPropDef)\n self.inpPropVal.setEnabled(True)\n self.inpPropDef.setEnabled(True)\n self.cmbPropMulti.setEnabled(True)\n self.cmbPropEdit.setEnabled(True)\n self.cmbPropDef.setEnabled(False)",
"def is_ecChoose(self):\n return len(self.ecChoose_list) > 0",
"def was_used(self):\r\n return self.circ_chosen != 0",
"def test_s3_bucket_exists(self) -> None:\n if self.prod_env:\n bucket_name = 'saints-xctf-credentials-prod'\n else:\n bucket_name = 'saints-xctf-credentials-dev'\n\n s3_bucket = self.s3.list_objects(Bucket=bucket_name)\n return s3_bucket.get('Name') == bucket_name",
"def try_to_select_gear_for_upgrade(self):\n if self.emulator.is_ui_element_on_screen(self.ui['CUSTOM_GEAR_CHANGE_OPTION']):\n self.emulator.click_button(self.ui['CUSTOM_GEAR_1'].button)\n return not self.emulator.is_ui_element_on_screen(self.ui['CUSTOM_GEAR_CHANGE_OPTION'])",
"def test_bucket_by_id_is_returned_on_get_request(self):\n with self.client:\n token = self.get_user_token()\n # Create a Bucket\n response = self.client.post(\n '/bucketlists',\n data=json.dumps(dict(name='Travel')),\n headers=dict(Authorization='Bearer ' + token),\n content_type='application/json'\n )\n # Test Bucket creation\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 201)\n self.assertTrue(data['status'], 'success')\n self.assertTrue(data['name'], 'Travel')\n response = self.client.get(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token)\n )\n data = json.loads(response.data.decode())\n self.assertEqual(response.status_code, 200)\n self.assertTrue(data['status'] == 'success')\n self.assertTrue(data['bucket']['name'] == 'travel')\n self.assertIsInstance(data['bucket'], dict)\n self.assertTrue(response.content_type == 'application/json')",
"def select( self ):\n self._has_change = True",
"def test_is_empty_single(single_bucket): # pylint: disable=redefined-outer-name\n assert single_bucket.is_empty() is False",
"def IsSelected(self):\r\n\r\n return self._hasHilight != 0",
"def is_in_cmd(self):\r\n return self.select_cmd is not None",
"def testEnableBucketTool(self):\r\n self.bucketFill.enableBucketTool()\r\n myMessage = 'Unable to enable the bucketfill map tool'\r\n assert self.bucketFill.bucketFillAction.isEnabled(), myMessage",
"def is_selected(self):\n return NSCSpecIO().read()[\"profile\"] == self.path.stem",
"def bucket_exists(self, bucket_name):\n self.s3_resource.Bucket(bucket_name)\n exists = True\n try:\n self.s3_resource.meta.client.head_bucket(Bucket=bucket_name)\n except botocore.exceptions.ClientError as e:\n # If a client error is thrown, then check that it was a 404 error.\n # If it was a 404 error, then the bucket does not exist.\n error_code = e.response['Error']['Code']\n if error_code == '404' or error_code == '403':\n exists = False\n return exists",
"def check_buck(bucket, tabular=False):\n expected_keys = [u'index_count', u'views_count', u'items', u'mutations',\n u'tombstones', u'fts_count', u'analytics_count', u'size', u'name']\n self.assertTrue(set(expected_keys).issubset(bucket.keys()))\n\n index_count, views_count, items, mutations, tombstones, fts_count, \\\n analytics_count, size, name = [bucket[key] for key in expected_keys]\n\n # Check bucket name\n self.assertTrue(name in expected_bucks)\n # Check bucket size\n self.assertTrue(size >= 0)\n # Check bucket items\n self.assertTrue(items in [0, self.num_items])",
"def complete_bucket(self, bucket_id):\n url = self.prism_endpoint + \"/wBuckets/\" + bucket_id + \"/complete\"\n\n headers = {\n \"Authorization\": \"Bearer \" + self.bearer_token,\n \"Content-Type\": \"application/json\",\n }\n\n data = {}\n\n r = requests.post(url, headers=headers, data=json.dumps(data))\n\n if r.status_code == 201:\n logging.info(\"Successfully completed the bucket\")\n else:\n logging.warning(\"HTTP Error {}\".format(r.status_code))",
"def cbTriggered(self, value):\n global selectedCheckboxes\n selectedCheckboxes=value",
"def findBucket(conn, bucketName):\n for cand in conn.get_all_buckets():\n if cand.name == bucketName:\n return cand\n return None",
"def findBucket(conn, bucketName):\n for cand in conn.get_all_buckets():\n if cand.name == bucketName:\n return cand\n return None",
"def test_id_of_bucket_to_be_edited_does_not_exist(self):\n with self.client:\n # Get an auth token\n token = self.get_user_token()\n # Update the bucket name\n res = self.client.put(\n '/bucketlists/1',\n headers=dict(Authorization='Bearer ' + token),\n data=json.dumps(dict(name='Adventure')),\n content_type='application/json'\n )\n data = json.loads(res.data.decode())\n self.assertEqual(res.status_code, 404)\n self.assertTrue(res.content_type == 'application/json')\n self.assertTrue(data['status'] == 'failed')\n self.assertTrue(data['message'] == 'The Bucket with Id 1 does not exist')",
"def test_read_bucket(self):\n post_data = self.post_a_bucket()\n self.assertEqual(post_data.status_code, 201)\n result_of_get_method = self.client().get('/bucketlists/',\n headers=dict(Authorization='Bearer '\n + self.token())\n )\n self.assertEqual(result_of_get_method.status_code, 200)\n self.assertIn('Climb the Himalayas', str(result_of_get_method.data))",
"def is_clickable(self, tile):\n return \"match\" not in self.canvas.gettags(tile) and \\\n \"selected\" not in self.canvas.gettags(tile) and \\\n len(self.canvas.find_withtag(\"selected\")) < 2",
"def _is_selected ( self, object ):\n if hasattr(object, 'model_selection') \\\n and object.model_selection is not None:\n return True\n return False"
] | [
"0.60982263",
"0.59170085",
"0.5550665",
"0.55480874",
"0.5523471",
"0.5507626",
"0.5479987",
"0.54688084",
"0.5426117",
"0.53947604",
"0.5357052",
"0.53375906",
"0.5332464",
"0.5312817",
"0.5307807",
"0.53009874",
"0.5283366",
"0.52795064",
"0.52631134",
"0.5257124",
"0.52528805",
"0.52504784",
"0.5249137",
"0.5236254",
"0.52223647",
"0.52223647",
"0.51892185",
"0.5188973",
"0.5187567",
"0.51868874"
] | 0.7113258 | 0 |
Check that file exists in bucket | def check_file_exists_in_bucket(self, file):
self._check_queryable()
try:
self._client.get_object(Bucket=self._bucket, Key=file)
except self._client.exceptions.NoSuchKey:
raise Exception('File {} not found in bucket {}'.format(file, self._bucket)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def file_exists(path):\n if path.startswith('gs://'):\n return gcsio.GcsIO().exists(path)\n else:\n return os.path.exists(path)",
"def test_bucket_availability(self):\n s3 = boto3.resource('s3')\n bucket = s3.Bucket(app.config['S3_PHOTO_BUCKET'])\n exists = True\n try:\n s3.meta.client.head_bucket(Bucket=app.config['S3_PHOTO_BUCKET'])\n self.assertEqual(exists, True)\n except botocore.exceptions.ClientError as e:\n # If a client error is thrown, then check that it was a 404 error.\n # If it was a 404 error, then the bucket does not exist.\n error_code = e.response['Error']['Code']\n if error_code == '404':\n exists = False\n self.assertEqual(exists, True, msg='Bucket is not exist!')",
"def _file_exists_in_s3(client, key):\n try:\n obj = client.head_object(Bucket=bucket, Key=key)\n return obj[\"ContentLength\"]\n except ClientError as exc:\n if exc.response[\"Error\"][\"Code\"] != \"404\":\n raise",
"def exists_on_s3(self, name, file_name):\n path = \"cluster/\" + name + \"/\" + file_name\n bucket = self.s3.get_bucket(self.__secrets_bucket__)\n\n try:\n response = bucket.get_key(path)\n except Exception as e:\n print \"[-] Error\"\n print e\n return\n\n if response:\n return True\n\n return False",
"def check_gcloud_blob_exists(filename, bucket_name=\"bts-ml-data\") -> bool:\n bucket = storage_client.get_bucket(bucket_name)\n return bucket.blob(filename).exists(storage_client)",
"def does_bucket_exists( bucket_name ):\n bucket_exists_status = { 'status':False, 'error_message':'' }\n try:\n s3 = boto3.resource('s3')\n s3.meta.client.head_bucket( Bucket = bucket_name )\n bucket_exists_status['status'] = True\n except ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n bucket_exists_status['status'] = False\n bucket_exists_status['error_message'] = str(e)\n else:\n # logger.error('ERROR: {0}'.format( str(e) ) )\n bucket_exists_status['status'] = False\n bucket_exists_status['error_message'] = str(e)\n return bucket_exists_status",
"def path_exists(bucket, path):\n bucket = get_bucket(bucket)\n return bool(bucket.get_key(path, validate=True))",
"def _bucket_exists(self):\n try:\n self.resource.meta.client.head_bucket(Bucket=self.bucketname)\n except botocore.exceptions.ClientError as error:\n # If a client error is thrown, then check that it was a 404 error.\n # If it was a 404 error, then the bucket does not exist.\n error_code = int(error.response['Error']['Code'])\n if error_code == 404:\n raise LookupError(\"Bucket '%s' does not exist\", self.bucketname)\n else:\n # maybe a permissions issue\n raise error\n return True",
"def exists(self):\r\n return bool(self.bucket.lookup(self.name))",
"def bucket_exists(self, bucket_name):\n self.s3_resource.Bucket(bucket_name)\n exists = True\n try:\n self.s3_resource.meta.client.head_bucket(Bucket=bucket_name)\n except botocore.exceptions.ClientError as e:\n # If a client error is thrown, then check that it was a 404 error.\n # If it was a 404 error, then the bucket does not exist.\n error_code = e.response['Error']['Code']\n if error_code == '404' or error_code == '403':\n exists = False\n return exists",
"def s3_bucket_exists(session, name):\n client = session.client('s3')\n resp = client.list_buckets()\n for bucket in resp['Buckets']:\n if bucket['Name'] == name:\n return True\n\n return False",
"def __check(s3client, key, bucket_name):\n try:\n s3client.head_object(Bucket=bucket_name, Key=key)\n except ClientError as e:\n return int(e.response['Error']['Code']) != 404\n return True",
"def url_exists(url):\r\n from urllib import parse\r\n res = parse.urlparse(url)\r\n if res.scheme == 'gs':\r\n # blob_name has no '/' prefix\r\n bucket_name, blob_name = res.netloc, res.path[1:]\r\n from google.cloud import storage\r\n storage_client = storage.Client()\r\n bucket = storage_client.get_bucket(bucket_name)\r\n blob = bucket.blob(blob_name)\r\n return blob.exists()\r\n else:\r\n return os.path.exists(res.path)",
"def s3_key_exists(key, bucket):\n s3 = boto3.resource('s3')\n try:\n s3.Object(bucket, key).load()\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == \"404\":\n # The object does not exist.\n return False\n else:\n # Something else has gone wrong.\n raise e\n else:\n # The object does exist.\n return True",
"def does_bucket_exist(args):\n\n args.suppress_verify_output = True\n if verify(args) != 0:\n # restore stdout\n sys.stdout = sys.__stdout__\n print(\"Config file not valid, please use the verify function to debug\")\n return 1\n\n sanitised_group = args.group.replace('/', '-')\n\n with open(args.file, \"r\") as f:\n config_json = json.load(f)\n\n for group in config_json[\"groups\"]:\n if group[\"name\"] == sanitised_group:\n for bucket in group[\"buckets\"]:\n if bucket[\"name\"] == args.bucket:\n return 0\n break\n\n return 1",
"def does_bucket_exist(\n bucket_name: str, region: str = \"us-east-1\", session: Optional[boto3.Session] = None\n) -> bool:\n s3_resource = _get_resource(session, region)\n try:\n s3_resource.meta.client.head_bucket(Bucket=bucket_name)\n return True\n except ClientError as exc:\n if exc.response[\"Error\"][\"Message\"] == \"Not Found\":\n LOGGER.info('bucket \"%s\" does not exist', bucket_name)\n return False\n if exc.response[\"Error\"][\"Message\"] == \"Forbidden\":\n LOGGER.exception(\n 'access denied for bucket \"%s\" (permissions?)', bucket_name\n )\n raise\n return False",
"def exists(bucket: str, key: str) -> bool:\n try:\n client().head_object(Bucket=bucket, Key=key)\n except botocore.client.ClientError:\n return False\n else:\n return True",
"def path_exists(path):\n if path.startswith('gs://'):\n command = 'gsutil ls {path}'.format(path=path)\n elif path.startswith('s3://'):\n command = 'awscli s3 ls {path}'.format(path=path)\n else:\n return os.path.exists(path)\n\n return run_quick(command, echo=False).returncode == 0",
"def test_s3_bucket_exists(self) -> None:\n if self.prod_env:\n bucket_name = 'saints-xctf-credentials-prod'\n else:\n bucket_name = 'saints-xctf-credentials-dev'\n\n s3_bucket = self.s3.list_objects(Bucket=bucket_name)\n return s3_bucket.get('Name') == bucket_name",
"def test_exists_cache() -> None:\n s3_client = boto3.client(\"s3\", region_name=\"us-east-1\")\n s3_client.create_bucket(Bucket=\"example-bucket\")\n\n # Object should not exist.\n assert not File(\"s3://example-bucket/a\").exists()\n assert File(\"s3://example-bucket/a\").get_hash() == \"cb7880ecc11723b8b8cad37f6b5160251d7a765e\"\n\n # Update object outside of s3fs.\n s3_client.put_object(Body=b\"hello\", Bucket=\"example-bucket\", Key=\"a\")\n\n # Using the normal s3fs exists(), the existance check would be cached and\n # would now return an incorrect result.\n\n # However, File.exists() avoids using the s3fs cache and gives the correct result.\n # The hash should update as well.\n assert File(\"s3://example-bucket/a\").exists()\n assert File(\"s3://example-bucket/a\").get_hash() == \"ea438dc20234f0226736d407d7caba13f7e3c49e\"\n\n # Directory should not exist.\n assert not Dir(\"s3://example-bucket/dir/\").exists()\n\n # Update object outside of s3fs.\n s3_client.put_object(Body=b\"hello\", Bucket=\"example-bucket\", Key=\"dir/a\")\n\n # Directory should now exist.\n assert Dir(\"s3://example-bucket/dir/\").exists()",
"def bucket_exists(bucket: BucketLocation) -> bool:\n s3_client = boto3.client(\"s3\")\n response = s3_client.list_buckets()\n\n return bucket.name in response[\"Buckets\"]",
"def _key_exists_in_bucket(log, s3_conn, bucket_name, key_name):\n b = s3_conn.get_bucket(bucket_name, validate=False)\n k = Key(b, key_name)\n log.debug(\"Checking if key '%s' exists in bucket '%s'\" % (\n key_name, bucket_name))\n try:\n return k.exists()\n except S3ResponseError, e:\n log.error(\"Failed to checkf if file '%s' exists in bucket '%s': %s\" %\n (key_name, bucket_name, e))\n return False",
"def blob_exists(blob_url):\n blob = storage.Object.from_url(blob_url)\n blobex = blob.exists()\n return blobex",
"def gsutil_file_exists(self, path):\n try:\n self.m.gsutil(['ls', path])\n except self.m.step.StepFailure: # pragma: no cover\n return False\n return True",
"def is_file_exists(self):\n pass",
"async def has(path: str) -> bool:\n _ = path.strip('/').split('/')\n bucket = _[0]\n key = '/'.join(_[1:])\n async with _create_client() as client:\n try:\n await client.head_object(Bucket=bucket, Key=key)\n return True\n except ClientError:\n return False",
"def bucket_exists(gs_client, test_bucket):\n bucket = gs_client.conn.bucket(test_bucket)\n if not bucket.exists():\n gs_client.conn.create_bucket(test_bucket, predefined_acl=\"project-private\")\n yield gs_client",
"def check_file_exist(self):\n return False",
"def exists(path, bucket=os.getenv('RV_DEFAULT_S3_BUCKET')):\n matches = list_objects(path=path,\n bucket=bucket,\n include_prefix=True)\n if path in matches:\n return True\n return False",
"def s3_object_exists(s3_path: Path) -> bool:\n\n s3_args, unknown = get_s3_args().parse_known_args()\n s3_client = get_s3_client(s3_args)\n log = get_logger(\"s3_object_exists\")\n\n try:\n s3_client.get_object(Bucket=s3_args.s3_bucket, Key=str(s3_path))\n return True\n except s3_client.exceptions.NoSuchKey:\n return False"
] | [
"0.7652608",
"0.76365834",
"0.76209396",
"0.7487296",
"0.7461448",
"0.7449851",
"0.7337285",
"0.73094934",
"0.7305053",
"0.7244931",
"0.72251844",
"0.7200634",
"0.7111337",
"0.70806926",
"0.7041153",
"0.7033018",
"0.70185465",
"0.70138866",
"0.6980778",
"0.6973387",
"0.6955886",
"0.69431853",
"0.68979764",
"0.6892586",
"0.68411535",
"0.68064",
"0.6793446",
"0.6792113",
"0.6781893",
"0.66986483"
] | 0.82178646 | 0 |
Get list of S3 locations from timebase pattern parts with possible lag. | def get_S3_paths_with_lag(*paths, **kwargs):
lag = kwargs.get('lag', '01:00')
lag_as_date = datetime.datetime.strptime(lag, '%H:%M')
lag_as_delta = datetime.timedelta(hours=lag_as_date.hour, minutes=lag_as_date.minute, seconds=lag_as_date.second)
times = [
datetime.datetime.utcnow(),
datetime.datetime.utcnow() - lag_as_delta
]
result = [
particular_time.strftime(S3.join_bucket_paths(*paths))
for particular_time in times
]
return result | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def collect_backups(self, bucketname, prefix):\n backups = []\n \n bucket = self.conn.get_bucket(bucketname)\n\n logger.info(\"Scanning for backups: s3://%s/%s\", bucketname, prefix)\n\n for entry in natsort([key.name for key in bucket.list(prefix)]):\n # Check for a time stamp in the directory entry's name.\n match = TIMESTAMP_PATTERN.search(entry)\n if match:\n # Make sure the entry matches the given include/exclude patterns.\n if self.exclude_list and any(fnmatch.fnmatch(entry, p) for p in self.exclude_list):\n logger.debug(\"Excluded %r (it matched the exclude list).\", entry)\n elif self.include_list and not any(fnmatch.fnmatch(entry, p) for p in self.include_list):\n logger.debug(\"Excluded %r (it didn't match the include list).\", entry)\n else:\n backups.append(S3Backup(\n pathname=entry,\n timestamp=datetime.datetime(*(int(group, 10) for group in match.groups('0'))),\n ))\n else:\n logger.debug(\"Failed to match time stamp in filename: %s\", entry)\n if backups:\n logger.info(\"Found %i timestamped backups in %s.\", len(backups), bucket)\n return sorted(backups)",
"def fetch_s3_keys_by_regex_pattern(s3_bucket, s3_directory, pattern):\n bucket_contents = s3_bucket.list(s3_directory)\n return [key for key in bucket_contents if pattern.search(key.name)]",
"def parse_s3_uri(URIs):\n buckets, keys = [], []\n for URI in URIs:\n uri_path = path.normpath(URI).split(\"/\")\n buckets.append(uri_path[1])\n keys.append(uri_path[2:])\n\n return buckets, keys",
"def task_get_time_slices(\n self, timestamp: datetime = None\n ) -> List[Tuple[datetime, datetime]]:\n total_streams: int = self._config[\"graph_streams\"]\n\n t_now: datetime = (\n timestamp.replace(microsecond=0)\n if timestamp is not None\n else datetime.utcnow().replace(microsecond=0)\n )\n\n t_lag: timedelta = timedelta(seconds=self._config[\"graph_timelag\"])\n t_sec: timedelta = timedelta(seconds=1)\n t_delta: timedelta = timedelta(seconds=self._config[\"graph_stream_frame\"])\n\n frame_end: datetime = t_now - t_lag - t_sec\n frame_start: datetime = frame_end + t_sec - t_delta * total_streams\n\n self._logger.info(\n \"Split [%s - %s] into %s slices\",\n frame_start.isoformat(),\n frame_end.isoformat(),\n total_streams,\n )\n\n result: List[Tuple[datetime, datetime]] = []\n\n for i in range(total_streams):\n slice_start: datetime = frame_end + t_sec - t_delta * (i + 1)\n slice_end: datetime = frame_end - t_delta * i\n\n result.append((slice_start, slice_end))\n\n return result",
"def get_file_list(\n self,\n file_regex = r'.*'):\n s3Contents = []\n #Use list_objects_v2 via kwargs since there could be\n #more than 1000 objects (single return limit)\n kwargs = {'Bucket': self.bucket, 'Prefix':self.key}\n while True:\n try:\n resp = self.s3.list_objects_v2(**kwargs)\n except:\n resp = None\n self.logger.error('Unable to reach s3 bucket')\n sys.exit(1)\n if resp.get(\"Contents\"):\n try:\n f_regex = re.compile(file_regex)\n #python 3.8+ required for walrus operator\n s3Contents += [f['Key'] for f in resp['Contents'] if (match := re.search(f_regex, f['Key']))]\n except Exception as e:\n self.logger.exception(e)\n self.logger.error('failed to filter s3 folder. Bucket: %s and location: %s',\n self.bucket,\n self.key)\n sys.exit(1)\n try:\n kwargs['ContinuationToken'] = resp['NextContinuationToken']\n except KeyError:\n break\n if not s3Contents:\n self.logger.warning(\n 'No files were returned from s3 bucket: %s and location: %s filtering by %s',\n self.bucket,\n self.key,\n file_regex)\n return s3Contents",
"def list_objects(self, s3_prefix_path):\n bucket_name, prefix = S3Util.get_bucket_and_key(s3_prefix_path)\n bucket = self.s3_resource.Bucket(bucket_name)\n return [\"s3://\" + bucket_name + \"/\" + key.key for key in bucket.objects.filter(Prefix=prefix)]",
"def extract_paths(measures: List['UserMeasure']) -> List['GwPoint']:\n\n path: List['GwPoint'] = []\n measures = sorted(measures, key = lambda k: k.timestamp)\n (src, dest) = find_endpoints(measures)\n dest_index = 0\n while 'D' not in (src, dest): # Loop until the end of the file is reached\n for m in measures[dest_index:]:\n dest_index += 1\n if m.zone == dest:\n break\n src_index = dest_index\n for m in reversed(measures[:dest_index]):\n src_index -= 1\n if m.zone == src:\n break\n dag = to_DAG(measures[src_index:dest_index])\n for d in dag.list:\n path.append(GwPoint(\n d.id,\n d.lac,\n d.find_gw_match().azimuth,\n d.find_gw_match().longitude,\n d.find_gw_match().latitude,\n d.zone,\n d.timestamp\n ))\n src_index = dest_index\n (src, dest) = find_endpoints(measures[src_index:])\n return path",
"def objs_with_prefix(bucket, log_type, query_time):\n prefix = get_prefix(log_type, query_time)\n # S3 guarantees to return objects in ascending key order based on the UTF-8\n # binary representation of the key. Unfortunately the server-side filtering\n # is quite limited; we can't specify the sort order or the sort key.\n objs = list(bucket.objects.filter(Prefix=prefix))\n logging.info('Found %s files with prefix %s',\n 'no' if not objs else len(objs), prefix)\n return objs",
"def get_regions_in_partition(self, prefix=None, delimiter='/'):\n if prefix is None:\n prefix = self.s3_path\n else:\n prefix = self._strip_slashes(prefix)\n\n query_params = {\n 'Bucket': self.s3_bucket,\n 'Prefix': prefix + '/',\n 'Delimiter': delimiter\n }\n\n # We currently should be able to get all regions in a single request\n # TODO: Fail if we get a next token - there's more to this prefix than meets the eye\n region_list = []\n response = self.s3_client.list_objects_v2(**query_params)\n for c_prefix in response.get('CommonPrefixes', []):\n region = self._extract_region_from_prefix(c_prefix)\n if region:\n region_list.append(region)\n\n return region_list",
"def blob_generator(bucket_name, pattern):\n cloud_bucket = get_gcsbucket(bucket_name)\n for blob in cloud_bucket.objects():\n if blob.key.endswith(pattern):\n yield blob.uri",
"def getFiles(self, state=None, s3uri_prefix=None):\n self.log.info(\"getFiles\")\n downloads = []\n keys = list(self.downloads.keys())\n keys.sort()\n for key in keys:\n download = self.downloads[key]\n if not state or (state and download['state'] == state):\n print(download)\n s3uri = download['s3_uri']\n if s3uri_prefix is None or s3uri.startswith(s3uri_prefix):\n item = {}\n for k in ('local_filepath', 'size', 'state', 's3_time',\n 's3_date', 's3_uri'):\n item[k] = download[k]\n downloads.append(item)\n\n return downloads",
"def get_image_list(pattern='alpes_%d.jpg', start=0):\n image_list = []\n k = start\n while path.exists(pattern % k):\n image_list.append(pattern % k)\n k += 1\n return image_list",
"def list_sorted_files(uuid, basepath=None):\n if basepath is None:\n basepath = get_basepath()\n if 's3://' in basepath:\n return s3wrangler.list_objects(basepath + 'ephys/' + uuid + '/derived/kilosort2/')\n else:\n # return glob.glob(os.path.join(basepath, f'ephys/{uuid}/derived/kilosort2/*'))\n return glob.glob(basepath + f'ephys/{uuid}/derived/kilosort2/*')",
"def list_available_tiles(prefix):\n\n files = []\n generator = hls_container_client.list_blobs(name_starts_with=prefix)\n for blob in generator:\n files.append(blob.name)\n return files",
"def get_s3_file_names(s3_prefix_path):\n\n # parse s3 path for bucket name and prefix\n regex = r\"s3://([\\w._-]+)/([\\w./_-]+)\"\n m = re.match(regex, s3_prefix_path)\n s3bucket_name = m.group(1)\n s3prefix = m.group(2)\n\n # Get s3 bucket handle\n s3 = boto3.resource('s3')\n s3bucket = s3.Bucket(s3bucket_name)\n\n # Get all file names in the `s3bucket` with the prefix `s3prefix`\n files = []\n for object in s3bucket.objects.filter(Prefix=s3prefix):\n path_to_file = os.path.join(\"s3://%s\" % s3bucket_name, object.key)\n files.append(path_to_file)\n\n return files",
"def get_matching_s3_keys(bucket, prefix=\"\", suffix=\"\"):\n for obj in get_matching_s3_objects(bucket, prefix, suffix):\n yield obj[\"Key\"]\n\n def download_froms3(myfile, env='prod'):\n # session = boto3.Session(profile_name=PROFILE)\n boto_s3_session = boto3.Session(profile_name=env)\n s3 = boto_s3_session.resource('s3')\n s3client = boto_s3_session.client('s3', region_name='eu-west-2')\n try:\n file_name = unquote(myfile.split('/')[-1])\n oparse = urlparse(myfile, allow_fragments=False)\n print(oparse)\n S3_SRC_BUCKET_NAME = oparse.netloc\n key = oparse.path[1:]\n download_path = '{0}{1}'.format(BASE_PATH, file_name)\n print(f'Downloading from {S3_SRC_BUCKET_NAME} , {key} to {download_path} ')\n # s3.Bucket(S3_SRC_BUCKET_NAME).download_file(key, download_path)\n # s3.Bucket(S3_SRC_BUCKET_NAME).download_file(file_name, download_path)\n s3client.download_file(S3_SRC_BUCKET_NAME, key, download_path)\n print('File Downloaded')\n except botocore.exceptions.ClientError as err:\n if err.response['Error']['Code'] == \"404\":\n print(\"The object does not exist.\", err)\n else:\n # raise\n error = str(err)\n print(error)\n\n return myfile",
"def get_gzipped_s3_objects_from_dict(session, event):\n return get_s3_objects_from_dict(\n session, event, default_unzip_s3_object_handler_function\n )",
"def get_tar_files(self, loc_id):\n if self.storage_type == 's3':\n conn = boto3.client('s3')\n bucket_name = self.source_path.split('//')[1].split('/')[0]\n prefix = self.source_path.split(bucket_name+'/')[1]+'/'+loc_id\n s3_result = conn.list_objects_v2(Bucket=bucket_name, Prefix=prefix, Delimiter=\"/\")\n if 'Contents' not in s3_result:\n # print(s3_result)\n return []\n\n tar_files = []\n for key in s3_result['Contents']:\n tar_files.append(key['Key'])\n\n else:\n file_path = os.path.join(self.source_path, loc_id)\n tar_files = [os.path.join(file_path, f)\n for f in os.listdir(file_path) if 'tar' in f]\n return tar_files",
"def get_prev_starts(self):\r\n path_starts = []\r\n if self.prev_arr_list is None:\r\n return []\r\n \r\n for dpath in self.prev_arr_list:\r\n path_starts.append(dpath.path[0])\r\n return path_starts",
"def generate_items_in_bucket(\n bucket: BucketLocation, prefix: Optional[str] = None\n) -> Generator[ObjectLocation, None, None]:\n s3 = boto3.client(\"s3\")\n\n kwargs = {\"Bucket\": bucket.name}\n\n if prefix is not None:\n kwargs[\"Prefix\"] = prefix\n\n while True:\n response = s3.list_objects(**kwargs)\n\n for obj in response[\"Contents\"]:\n yield ObjectLocation(key=obj[\"Key\"], bucket=bucket)\n\n # The S3 API is paginated, returning up to 1000 keys at a time.\n # Pass the continuation token into the next response, until we\n # reach the final page (when this field is missing).\n try:\n kwargs[\"ContinuationToken\"] = response[\"NextContinuationToken\"]\n except KeyError:\n break",
"def list(self, prefix=\"\"):\n try:\n list_rep = self.client.listdir(self.bucket + \"/\" + prefix)\n for i in list_rep:\n # Remove preceding bucket name and potential leading slash from returned key value\n i = i.replace(self.bucket, \"\").replace('tar', 'wsp.sz')\n if i[0] == '/': i = i[1:]\n yield i\n except pyhdfs.HdfsFileNotFoundException:\n pass",
"def extract_locations(dataset_location):\n # Define an empty list to store file locations\n filename_array = []\n # Add file locations to the list\n for filename in glob(str(dataset_location) + '*.jpg'):\n filename_array.append(filename)\n # Sort the list and return it\n filename_array.sort()\n return filename_array",
"def list_s3_files(bucket, prefix):\n \n s3 = boto3.client('s3')\n\n if type(prefix) != list:\n prefix = [prefix]\n \n # Loop over prefixes:\n file_list = []\n for p in prefix:\n \n # Load one prefix:\n response = s3.list_objects_v2(Bucket=bucket, Prefix=p)\n if response['KeyCount'] > 0:\n file_list = file_list + [d['Key'] for d in response['Contents']]\n while response['IsTruncated']:\n response = s3.list_objects_v2(Bucket=bucket, Prefix=p, StartAfter=file_list[-1])\n file_list = file_list + [d['Key'] for d in response['Contents']] \n \n return file_list",
"def find_google_cloud_storage_file_names(bucket, prefix=''):\n return list(bucket.list_blobs(prefix=prefix))",
"def get_record_urls(split):\n\n stream = os.popen(f'gsutil ls {WAYMO_DATASET_BUCKET}/{split}')\n urls = list(filter(None, stream.read().split('\\n')))\n return urls",
"def extract_locations(spoiler_logs):\n locations = []\n \n for log in spoiler_logs: # loop to check the spoiler log with try-except\n try:\n locations.append(log['locations'])\n except KeyError:\n print(\"Invalid spoiler log ...\")\n return locations",
"def get_bucketlist():\n pass",
"def list_object_paths_in_s3(s3_prefix: Path) -> Generator[Path]:\n\n s3_args, unknown = get_s3_args().parse_known_args()\n s3_client = get_s3_client(s3_args)\n log = get_logger(\"list_object_paths_in_s3\")\n\n resp = s3_client.list_objects_v2(Bucket=s3_args.s3_bucket, Prefix=str(s3_prefix))\n\n if \"Contents\" not in resp:\n raise NoS3DataError(f\"No data at prefix {s3_prefix}\")\n\n while True:\n yield from (Path(obj[\"Key\"]) for obj in resp[\"Contents\"])\n\n if resp[\"IsTruncated\"]:\n continuation_key = resp[\"NextContinuationToken\"]\n resp = s3_client.list_objects_v2(\n Bucket=s3_args.s3_bucket,\n Prefix=str(s3_prefix),\n ContinuationToken=continuation_key,\n )\n else:\n break",
"def getFilesAtStamp(self, timestamp):\n\t\tout = []\n\t\tfor stream_name in self.stamps_by_stream.keys():\n\t\t\tts_index = bisect.bisect_right(self.stamps_by_stream[stream_name], timestamp)-1\n\t\t\tif ts_index < 0:\n\t\t\t\tcontinue\n\t\t\ttuple_ts = self.streams[stream_name].keys()\n\t\t\ttuple_ts.sort()\n\t\t\tout.append(self.streams[stream_name][tuple_ts[ts_index]])\n\t\treturn out",
"def list_s3(bucket, prefix, ext=None):\n s3 = boto3.resource('s3')\n s3_bucket = s3.Bucket(bucket)\n\n if ext:\n ext = '.' + ext.lstrip('.')\n else:\n ext = ''\n\n for item in s3_bucket.objects.filter(Prefix=prefix):\n key = item.key\n if not key.endswith(ext):\n continue\n\n yield key"
] | [
"0.5553879",
"0.5373015",
"0.52267677",
"0.50609094",
"0.5051911",
"0.5023184",
"0.50068414",
"0.49989265",
"0.4967261",
"0.49669662",
"0.4930386",
"0.48923874",
"0.4880217",
"0.48494568",
"0.4843274",
"0.48396668",
"0.48355362",
"0.4834737",
"0.48299235",
"0.48269066",
"0.48221627",
"0.48208332",
"0.48153675",
"0.4809755",
"0.4809748",
"0.4803143",
"0.48003837",
"0.47709432",
"0.47594655",
"0.47527206"
] | 0.6611707 | 0 |
Set positioning of Pinky's eyes based on PacMan's coordinates | def update_eyes(self, up_down_part, left_right_part):
if up_down_part and abs(up_down_part) > 5:
y = up_down_part/abs(up_down_part)
else:
y = 0
if left_right_part and abs(left_right_part) > 5:
x = left_right_part/abs(left_right_part)
else:
x = 0
self.looking = (x, y) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setPosition(self):\n # determine posX, posY for battle\n (x1,y1) = globals.battlemapQuadrants[self.systemGrid]\n self.posX = x1+self.setX\n self.posY = y1+self.setY",
"def set_new_location(self, xPos, yPos):",
"def set_pos(self, x, y, orien):\n self.pos_x = x\n self.pos_y = y\n self.orientation = orien",
"def pos_image(image, x,y):\n image.anchor_x = x\n image.anchor_y = y",
"def setDesiredPosition(self, x, y):\n (self.setX, self.setY) = (x , y)",
"def drawEyes(win, winW, winH):\n# leftEye = Oval(Point(300-120-40, 300-80-20), Point(300-120+40, 300-80+20))\n leftEye = Oval(Point(winW/2-winW/5-winW/15, winH/2-winH/7.5-winH/30),\n Point(winW/2-winW/5+winW/15, winH/2-winH/7.5+winH/30))\n leftEye.setFill(\"white\")\n leftEye.setOutline(\"black\")\n leftEye.draw(win)\n leftIris = Circle(Point(winW/2-winW/5, winH/2-winH/7.5), winH/40)\n leftIris.setOutline(\"black\")\n leftIris.setFill(\"darkcyan\")\n leftIris.draw(win)\n leftPupil = Circle(Point(winW/2-winW/5, winH/2-winH/7.5), winH/120)\n leftPupil.setOutline(\"black\")\n leftPupil.setFill(\"black\")\n leftPupil.draw(win)\n rightEye = leftEye.clone()\n rightEye.move(winW/2-winW/10,0)\n rightEye.draw(win)\n rightIris = leftIris.clone()\n rightIris.move(winW/2-winW/10,0)\n rightIris.draw(win)\n rightPupil = leftPupil.clone()\n rightPupil.move(winW/2-winW/10,0)\n rightPupil.draw(win)",
"def setPose(self, eye, look_at, up):\n eye = np.asarray(eye)\n look_at = np.asarray(look_at)\n up = np.asarray(up)\n\n self.eye = eye\n self.z = (eye - look_at)/np.linalg.norm(eye - look_at)\n self.x = (np.cross(up, self.z))/np.linalg.norm(np.cross(up, self.z))\n self.y = np.cross(self.z, self.x)",
"def set_position( self, posx, posy ):\n\n self.__foodx = posx\n self.__foody = posy",
"def positioning(self):\n pass",
"def position(self, x, y):\n self.x = x \n self.y = y\n self.pos[0] = x \n self.pos[1] = y",
"def reset_position(self): \n self.rect.x = 400\n self.rect.y = 400\n \n # Specifies the Player's spawnpoint as maze_arrangement[8][8], representing\n # the tile in the center of the maze \n self.__minotaur_x = 8\n self.__minotaur_y = 8",
"def set_position(self, az_pos, el_pos):\n raise NotImplementedError()",
"def setPosition(self):\n self.data['pos-x'] = \"%s\" % self.x()\n self.data['pos-y'] = \"%s\" % self.y()",
"def adjust_position(self):\n\n # Adjust position for x-axis\n r = self.rect.x % 30\n if r != 0:\n if r <= 16:\n x = self.rect.x - r\n else:\n x = self.rect.x + (30 - r)\n\n else:\n x = self.rect.x\n\n # Adjust position for y-axis\n r = self.rect.y % 30\n if r != 0:\n if r <= 16:\n y = self.rect.y - r\n else:\n y = self.rect.y + (30 - r)\n else:\n y = self.rect.y\n\n return x, y",
"def set_position(self, x, y):\n self.position.x = x\n self.position.y = y\n self.rect.topleft = x, y",
"def set_robot_pos(self):\n\t\tx,y,z = self.geo2desiredENU(self.curr_lat, self.curr_lon, self.gpsAlt)\n\t\tself.robot_msg.point.x = x\n\t\tself.robot_msg.point.y = y\n\t\tself.robot_msg.point.z = z",
"def prep_robot_location(self):\n x = int(self.robot.odo_x)\n y = int(self.robot.odo_y)\n o = round(self.robot.odo_o, 2)\n location_str = f\"Location (X,Y,O): {str(x)}, {str(y)}, {str(o)}\"\n # Prepare the image and positions it on the screen\n self.location_image = self.font.render(location_str, True, self.text_color, self.bg_color)\n self.location_rect = self.location_image.get_rect()\n self.location_rect.left = self.action_rect.left\n self.location_rect.top = self.action_rect.bottom + self.line_gap",
"def set_position( self ):\n\t\tscreen_rect = self.get_preview_window_screen_rect( )\n\n\t\twhile screen_rect.Intersects( self.GetScreenRect( ) ):\n\t\t\tpos = self.GetPosition( )\n\t\t\tself.SetPosition( ( pos[ 0 ] - 2, pos[ 1 ] + 2 ) )",
"def setzePosition(self, x, y):\n self.zielX = x\n self.zielY = y",
"def crouched_position(mp):\n joints = ['LHipPitch', 'RHipPitch', 'LKneePitch', 'RKneePitch']\n ankles = ['LAnklePitch', 'RAnklePitch']\n\n joint_angles = [-0.6074221134185791,\n -0.4356980323791504,\n 1.6413381099700928,\n 1.5739259719848633]\n\n ankle_angles = [-0.9403839111328125, -1.0461461544036865]\n\n # actuation\n mp.setAngles(joints, joint_angles, 0.1)\n time.sleep(0.420)\n mp.setAngles(ankles, ankle_angles, 0.1)",
"def SetPacman():\n\tfg=0\n\tglobal xcoP\n\tglobal ycoP\n\twhile fg!=1:\n\t\ta=random.randint(0,15)\n\t\tb=random.randint(0,35)\n\t\tif(board[a][b]=='.'):\n\t\t\tboard[a][b]='P'\n\t\t\txcoP=a\n\t\t\tycoP=b\n\t\t\tfg=1",
"def set_position(self, x, y):\n self.geometry('%s%s' % (x, y))",
"def setPosition(position):",
"def setPosition(*args):",
"def setPosition(*args):",
"def setPosition(*args):",
"def setPosition(*args):",
"def setPosition(*args):",
"def setPosition(*args):",
"def setPosition(*args):"
] | [
"0.65328586",
"0.6508323",
"0.6434197",
"0.64220273",
"0.63514304",
"0.6293591",
"0.62664384",
"0.6252575",
"0.6111345",
"0.61039346",
"0.6084001",
"0.60804886",
"0.6058369",
"0.60549945",
"0.60224134",
"0.59905356",
"0.5935536",
"0.59203494",
"0.5913262",
"0.59098",
"0.58949083",
"0.58875376",
"0.58664334",
"0.5863271",
"0.5863271",
"0.5863271",
"0.5863271",
"0.5863271",
"0.5863271",
"0.5863271"
] | 0.68247646 | 0 |
Append item to set. This method does not save object! | def add(self, item):
item = self._prepare_item(len(self), item)
if item not in self._data:
self._data.append(item)
self.__log__.append(SetAdd(value=item)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def add(self, item):\n self.update(set([item]))",
"def append(self, item):\n self.update([item])",
"def add_to_set(self, item, reload=True):\n index = len(self)\n item = self._prepare_item(index, item)\n data = self._field.item_field.to_mongo(self.__document__, item)\n\n qs = self._get_queryset()\n qs.update_one({'$addToSet': {self.__field_name__: data}})\n\n item = self._prepare_item(len(self), item)\n if item not in self._data:\n self._data.append(item)\n\n self.__log__.append(SetAddToSet(value=item))\n\n if reload:\n self.reload()",
"def add(self, item):\n if not (item in self.set):\n self.set[item] = True\n heapq.heappush(self.heap, item)",
"def append(self, item):\n self.items.append(item)",
"def add_to_bag(self, item):\n self._bag.append(item)",
"def append(self, item):\n # FIXME: this is only append if the key isn't already present\n key, value = item\n self._main[key] = value",
"def append(self, item: T) -> None:\n self.insert(item)",
"def push(self, item):\n if hasattr(item, \"__iter__\"):\n self.items.extend(item)\n else:\n self.items.append(item)",
"def add(self, item):",
"def append(self, item):\n self.work.append(item)",
"def append(self, item):\n self.trained_rqrmi.append(item)\n self.rqrmi_state_changed=True",
"def append(self, item):\n # type: (Any) -> None\n list.append(self, self.ref(item))",
"def append(self, item):\n try:\n i = self.index(item)\n return self[i]\n except ValueError:\n list.append(self, item)\n return item",
"def push(self, item):\n if item not in self._items:\n self._items.append(item)",
"def append(self, item):\n\t\theapq.heappush(self.heap, (self.f(item), item))",
"def append(self, value):\n assert isinstance(value, Item), type(value)\n list.append(self, value)\n self.emit('appened', value)\n self.emit('modified')",
"def push(self, item):\n self.items.append(item)",
"def push(self, item):\n self.items.append(item)",
"def push(self, item):\n self.items.append(item)",
"def push(self, item):\n self.items.append(item)",
"def push(self, item):\n self.items.append(item)",
"def push(self,item):\n self.items.append(item)",
"def push(self, item):\n\t\tself.items.append(item)",
"def append(self, item: T) -> None:\n pass",
"def append(self, item: T) -> None:\n pass",
"def append (self, item):\n pass",
"def add(self, item):\n\n if item not in self:\n self._index_map[item] = len(self._list)\n self._list.append(item)",
"def add_item(self, key, item):\n self[key].add(item)\n try:\n self._reverse_store[item].add(key)\n except KeyError:\n self._reverse_store[item] = set([key])",
"def push(self, item) -> None:\n self.items.append(item)"
] | [
"0.79363596",
"0.7526422",
"0.7515836",
"0.74165773",
"0.7331249",
"0.7069965",
"0.7059942",
"0.70584714",
"0.7025516",
"0.69851726",
"0.6967807",
"0.69535244",
"0.68928784",
"0.6875286",
"0.687117",
"0.6854746",
"0.68296975",
"0.6811986",
"0.6811986",
"0.6811986",
"0.6811986",
"0.6811986",
"0.68046945",
"0.6801992",
"0.68018895",
"0.68018895",
"0.6799018",
"0.67850745",
"0.67686945",
"0.6766992"
] | 0.8173588 | 0 |
Remove item from set. This method does not save object! | def remove(self, item):
try:
self._data.remove(item)
except ValueError as exc:
raise KeyError from exc
else:
self.__log__.append(SetRemove(value=item)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def remove(self, item):\n try:\n entry = self.set.pop(item)\n entry[-1] = self.REMOVED\n except KeyError:\n print(\"Can't remove a non-existing item\")",
"def discard(self, item):\n try:\n self._data.remove(item)\n except ValueError:\n pass\n else:\n self.__log__.append(SetDiscard(value=item))",
"def delete_set(self, item): # TODO test\n tree = item.parent\n item_label = item.parent_node\n tree.remove_node(item)\n tree.remove_node(item_label)\n self.exercise.sets.remove(item.set)\n print(\"delete set\")",
"def remove(self, item):\n del self._dict[item]",
"def remove(self, item) -> None:\n entry = self.entry_finder.pop(item)\n entry[-1][0] = None",
"def remove(self, item: T) -> None:\n index = self.index(item)\n self.delete_at_index(index)",
"def remove(self, item: T) -> None:\n index = self.index(item)\n self.delete_at_index(index)",
"def remove(self) -> object:\n return self._contains.pop()",
"def remove(self, item):\n # type: (Any) -> None\n return list.remove(self, self.ref(item))",
"def remove(self, item):\n\n if item in self:\n item_index = self._index_map[item]\n last_item = self._list[-1]\n\n # Swap in the item from the end of the list\n self._list[item_index] = last_item\n self._list.pop()\n\n self._index_map[last_item] = item_index",
"def remove(self, pset):\n self._sets.remove(pset)",
"def remove(self, other):\n self._check_item(other)\n self._set.remove(other)",
"def remove(self, item: Item) -> None:\n raise NotImplementedError(\"remove\")",
"def remove(self, item):\n\t\tif self.len == 0:\n\t\t\traise ValueError(\"Lista vacia\")\n\t\tif self.prim.dato == item:\n\t\t\tself.borrar_primero()\n\t\t\treturn\n\t\tanterior = self.prim\n\t\tactual = anterior.prox\n\t\twhile actual and actual.dato != item:\n\t\t\tanterior = anterior.prox\n\t\t\tactual = actual.prox\n\t\tif not actual:\n\t\t\traise ValueError(\"Elemento no encontrado\")\n\t\tanterior.prox = actual.prox\n\t\tself.len -= 1",
"def remove (self, item):\n pass",
"def remove(self, value):\r\n if value not in self:\r\n raise KeyError(value)\r\n self.discard(value)",
"def remove(self):\r\n\t\tself._delete()",
"def remove(self):\n raise NotImplementedError",
"def remove(self):\n raise NotImplementedError",
"def remove(self):",
"def item_remove(self, item):\n\t\treturn self._modify_object(item=item, new_item=\"\")",
"def remove(self, x):\n self._seen.remove(x)\n self._list.remove(x)",
"def remove(self, x):\n del self[self.index(x)]",
"def remove_item(self, item: tuple) -> None:\n self._antecedent.remove(item)\n self._is_updated = False",
"def remove(self):\n pass",
"def remove(self):\n pass",
"def remove(self):\n pass",
"def __delitem__(self, key):\n self.f_remove(key)",
"def remove(self, key):\n match = self.find(key)\n if not match:\n raise UserDBValueError(\"Element not found in list\")\n\n self._elements = [this for this in self._elements if this != match]\n return self",
"def discard(self, item):\n try:\n self._del(item)\n except KeyError:\n pass"
] | [
"0.8421894",
"0.75798225",
"0.7519389",
"0.7402691",
"0.7387822",
"0.72861737",
"0.72861737",
"0.71671855",
"0.7151665",
"0.7125882",
"0.7112938",
"0.7072577",
"0.70445186",
"0.70413494",
"0.70383376",
"0.7036314",
"0.7003476",
"0.6928036",
"0.6928036",
"0.6921462",
"0.69089407",
"0.6907743",
"0.6869458",
"0.68509316",
"0.6838494",
"0.6838494",
"0.6838494",
"0.683627",
"0.68296826",
"0.68287235"
] | 0.8193228 | 1 |
Pull item from database. See `$pull` in MongoDB's `update_one`. | def pull(self, query, reload=True):
qs = self._get_queryset()
qs.update_one({'$pull': {self.__field_name__: query}})
self.__log__.append(SetPull(query=query))
if reload:
self.reload() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def pull(self, *arg, **kwds):\n pass",
"def make_pull(db,url):\n result = db.product_mstator.find_one({\"url\":url})\n return result",
"def pull(self, pull: Optional[int] = None) -> Optional[int]:\n ...",
"async def _pull(self) -> None:\n raise NotImplementedError()",
"def get_one(collection: Collection, query: Dict[str, Any]):\n data = collection.find_one(query)\n if data is None:\n raise CannotFindItemInDatabase(query, data, collection.name)\n return data",
"def _pull(self) -> None:\n raise NotImplementedError() # pragma: no cover",
"def singularity_pull(self, image):\n Client.pull(image)",
"def pull(self, timeout=60, factory=SQSItem):\n self._connect()\n message = self.queue.read(timeout)\n if message is not None:\n body = message.get_body()\n data = json.loads(body)\n item = factory(data)\n\n # Mark for futher deletion, but do not refer to it from ouselves (for garbage collectors).\n if item is not None:\n item.__dict__['_queue_'] = self.queue\n item.__dict__['_message_'] = message\n\n return item\n else:\n return None",
"def pull(self, **kwargs):\n return _taskpipeoperation(self,'pull', **kwargs)",
"def pull(self):\n data = api.get(endpoint=self.endpoint, resource_id=self.id)\n self.__init__(**data)",
"def pull(self):\n raise NotImplementedError()",
"def get_item(item_id):\n return Item.query.filter_by(id=item_id).first()",
"def pop_message(self):\n app_process = sqlite3.connect('app_process::memory:', check_same_thread=False)\n app_process_cursor = app_process.cursor()\n app_process_cursor.execute(\"SELECT * FROM message_queue WHERE tstamp==(:first)\", {\"first\":self.mq_first}) \n item = app_process_cursor.fetchone()\n if item == None:\n return\n app_process_cursor.execute(\"DELETE FROM message_queue WHERE tstamp==(:first)\", {\"first\":self.mq_first})\n self.mq_first = item[4] #now sets first to next item pointed to\n app_process.commit()\n app_process.close()\n return item",
"def pull(self):",
"def popitem(self):\n return self.pop(0)",
"def pop(self):\n return self.list.pop()",
"def pull(self, *args, **kwargs) -> Any:\n raise NotImplementedError",
"def pop(self):\n item = None\n for x in self.user_order:\n if self.user_skip[x] > 0:\n continue\n item = self.user_queue[x].pop(0)\n self.obj_item.pop(item.obj)\n self.user_skip[x] = item.songs\n break\n self._normalise()\n return item.obj if item else None",
"def remove_bucket_list_item(self, id, collection, item):\n if type(id) is not ObjectId:\n id = ObjectId(id)\n obj = getattr(self.db, collection)\n result = obj.update(\n {'_id': id},\n {'$pull': {'bucket_list': item}}\n )\n return result",
"def pop(self):\n record = self.db.crawl_queue.find_and_modify(\n query={'status': self.WAITING},\n update={'$set': {'status': self.PROCESSING, 'timestamp': datetime.now()}}\n )\n if record:\n return record\n else:\n self.repair()\n raise KeyError()",
"def pull(self):\n data = api.get(endpoint=self.endpoint, resource_id=self.slug)\n self.__init__(**data)",
"def read_item(id: str, request: Request):\n obj = db.get(id, kind=endpoint_model)\n return obj",
"def get(self, task_id=None):\n if task_id:\n item = self.find(task_id)\n self.queue.remove(item)\n else:\n item = self.queue.get()\n return item",
"def pop(self):\r\n return self.list.pop()",
"def pull(self) -> int:",
"def pull(connection, rid=None, repo=None):\n\n if repo is None:\n repo = Repository(connection, rid)\n\n return repo.pull()",
"def pull(self, subscription, project):\n response, content = self._http.request(\n '%s/%s/subscriptions/%s:pull' % (\n PUBSUB_BASE_URL, project, subscription),\n body=json.dumps({'maxMessages': 1, 'returnImmediately': False}),\n method='POST',\n )\n if response['status'] == '404':\n raise NotFoundError(response, json.loads(content))\n return json.loads(content)",
"async def get_one(self, pk):\n\n return await self._expand(await self.db.get_one(pk=pk))",
"def get(self, block=True, timeout=None): \n if block: \n item = self.__db.blpop(self.key, timeout=timeout) \n else: \n item = self.__db.lrpop(elf.key) \n \n if item: \n item = item[1] \n return item",
"def read_item(\n db: Session = Depends(deps.get_db),\n item: models.Item = Depends(deps.get_owned_item_by_id),\n current_user: schemas.UserInDB = Depends(deps.get_current_active_user),\n) -> Any:\n return item"
] | [
"0.6538052",
"0.63186467",
"0.6040806",
"0.6029069",
"0.59004635",
"0.5854365",
"0.5729563",
"0.5702147",
"0.5686264",
"0.5669243",
"0.56689715",
"0.56077796",
"0.5606335",
"0.55896586",
"0.55583566",
"0.55535597",
"0.5551364",
"0.5546508",
"0.5537837",
"0.55240315",
"0.5517945",
"0.55175555",
"0.548034",
"0.54265666",
"0.53941196",
"0.5378648",
"0.537691",
"0.53701586",
"0.53442997",
"0.53420424"
] | 0.6360697 | 1 |
Generate string from symbols. | def generate_random_string(symbols, length):
sym_list = symbols.split()
str_list = random.sample(sym_list, length)
gen_string = ''.join(str_list)
return gen_string | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def r_string(symbols, length):\n return ''.join(R.choice(symbols)\n for _ in range(length))",
"def to_string(self):\r\n production_dict = self.get_production_dict()\r\n\r\n string_prods = ['S -> ' + ' | '.join([''.join(symbols) for symbols in production_dict.pop('S')])]\r\n for non_terminal, symbols_list in production_dict.items():\r\n string_prods.append(non_terminal + ' -> ' + ' | '.join([''.join(symbols) for symbols in symbols_list]))\r\n\r\n # concateate em\r\n return '\\n'.join(string_prods)",
"def gen_random(self, symbol):\n sentence = ''\n\n # select one production of this symbol randomly\n rand_prod = random.choice(self.prod[symbol])\n\n for sym in rand_prod:\n # for non-terminals, recurse\n if sym in self.prod:\n sentence += self.gen_random(sym)\n else:\n sentence += sym + ' '\n\n return sentence",
"def gen_random(self, symbol):\n sentence = ''\n\n # select one production of this symbol randomly\n rand_prod = random.choice(self.prod[symbol])\n\n for sym in rand_prod:\n # for non-terminals, recurse\n if sym in self.prod:\n sentence += self.gen_random(sym)\n else:\n sentence += sym + ' '\n\n return sentence",
"def symbol_name(string):\n return 'USymbol' + convert_name(string, True)",
"def buildString( lsys, iter ):\n\tnstring = getBase(lsys)\n\trule = getRule(lsys, 0)\n\tsymbol = rule[0]\n\treplacement = rule[1]\n\tfor i in range(iter):\n\t\tnstring = nstring.replace( symbol, replacement )\n\treturn nstring",
"def test_get_symbols_as_str(self) -> None:\n tape = TMTape(\n tape=\"abcdef\",\n blank_symbol=\".\",\n current_position=2,\n )\n self.assertEqual(tape.get_symbols_as_str(), \"abcdef\")",
"def random_name(symbols=6):\n name = ''\n for i in range(symbols):\n name += random.choice(random.choice(string.ascii_letters))\n return name",
"def string(self):\n text = \"\"\n for char, qty in self.chars.items():\n text += char * qty\n return \"\".join(sorted(text))",
"def MakeSymbolName(self,content):\n return self.register(SymbolName(content,reg=self))",
"def __str__(self) -> str:\n\n return self._format_symbol_table_content(\"Symbol table\", self._symbols.items())",
"def get_str ( self ):\n value = self.value\n def gen_words():\n if value == self.OV_NONE:\n yield \"none\"\n else:\n if value & self.OV_SYM_EXIST:\n if value & self.OV_SYM_DEAD:\n yield \"symlinks\"\n else:\n yield \"symlinks to existing files\"\n elif value & self.OV_SYM_DEAD:\n yield \"broken symlinks\"\n\n if value & self.OV_FILE:\n yield \"files\"\n # --- end of gen_words (...) ---\n\n return ', '.join ( gen_words() ) + \" (0x{:x})\".format ( value )",
"def _sympystr(self, printer: StrPrinter, *args: Any) -> str:\n return self.__str__()",
"def symbol(string: str) -> str:\n L = len(string)\n P1 = random.randint(1, L-1, 2)\n chars = []\n for char in string:\n chars.append(char)\n chars[P1[0]] = chars[P1[0]].upper()\n chars[P1[1]] = chars[P1[1]].upper()\n return ''.join(x for x in chars if x.isupper())+str(random.randint(9))",
"def __str__(self):\n dictt = self.getFullDict()\n return \"SymbolTable(\\n{}\\n)\".format(pprint.pformat(dictt))",
"def generate_symbol_definitions(mode, symbols, prefix, definition):\n direct = []\n tabled = []\n for ii in symbols:\n direct += [ii.generate_rename_direct(prefix)]\n tabled += [ii.generate_rename_tabled(prefix)]\n if \"vanilla\" == mode:\n tabled = direct\n return template_symbol_definitions % (definition, \"\\n\".join(direct), \"\\n\".join(tabled))",
"def strUniPoly(poly, symbol=\"X\", asc=True):\n return termorder.UnivarTermOrder(cmp).format(poly, symbol, asc)",
"def generate_symbol_struct(mode, symbols, definition):\n if \"vanilla\" == mode:\n return \"\"\n definitions = []\n hashes = []\n symbol_table_content = \"\"\n for ii in symbols:\n definitions += [\" %s;\" % (ii.generate_definition())]\n hashes += [\" %s%s,\" % (ii.generate_prototype(), ii.get_hash())]\n if \"dlfcn\" != mode:\n symbol_table_content = \" =\\n{\\n%s\\n}\" % (\"\\n\".join(hashes))\n return template_symbol_table % (definition, \"\\n\".join(definitions), symbol_table_content)",
"def create_symbol(self, base_ccy, asset):\n asset = self.user_to_exchange(self.name, asset)\n return (asset+base_ccy).upper()",
"def __str__(self):\n s = self.prefix.symbol\n s += self.unit.symbol\n if len(self.timebase.symbol) > 0:\n s += '/' + self.timebase.symbol\n if s == 'mls/min':\n s = 'sccm'\n elif s == 'ls/min':\n s = 'slm'\n if self.unit == Sfc5xxxUnit.STANDARD_LITER_15C:\n s += ' (15°C)'\n elif self.unit == Sfc5xxxUnit.STANDARD_LITER_25C:\n s += ' (25°C)'\n elif self.unit == Sfc5xxxUnit.STANDARD_LITER_70F:\n s += ' (70°F)'\n return s",
"def clean_symbols(self):\n self.add_labels()\n variable_counter = 16\n for i in range(len(self.commands)):\n command = self.commands[i]\n if command.startswith('@'): # symbols always reside in A instructions\n value = command.split('@')[1]\n if not value.isdigit(): # is a symbol\n if value not in self.symbol_table: # is a variable\n self.symbol_table[value] = str(variable_counter)\n variable_counter += 1\n numeric_value = self.symbol_table.get(value)\n command = '@' + numeric_value\n self.commands[i] = command",
"def _format_symbol_table_content(self, title: str, symbols: Any) -> str:\n\n header = f\"\\t\\t:::: {title} ::::\"\n lines = [\"\\n\", header, \"__\" * len(header)]\n if type(symbols).__name__ == \"odict_items\":\n lines.extend((f\"| {key}: {value}\") for key, value in self._symbols.items())\n elif type(symbols).__name__ == \"odict_keys\":\n lines.extend((f\"| {key}\") for key in self._symbols.keys())\n\n lines.append(\"\\n\")\n formatted_content = \"\\n\".join(lines)\n return formatted_content",
"def print_symbols():\n\n global program\n if program is None:\n print \"no program is loaded\"\n return\n for(s, a) in program.symbols():\n print \"0x{:x} : {}\".format(a, s)",
"def encode_symbol(self, string):\n symb = self.symbol_to_bits.get(string)\n if symb:\n return symb\n return self.symbol_to_bits.get(string + ' ')",
"def _str_make(self):\n return self._name if self._fact is None else f\"{self._fact} × {self._name}\"",
"def generate_syllable():\n return generate_vowel() + generate_consonant()",
"def unicode_ion_symbol(self) -> str:\n superscripts = {\n \"+\": \"\\u207A\",\n \"-\": \"\\u207B\",\n \"0\": \"\\u2070\",\n \"1\": \"\\u00B9\",\n \"2\": \"\\u00B2\",\n \"3\": \"\\u00B3\",\n \"4\": \"\\u2074\",\n \"5\": \"\\u2075\",\n \"6\": \"\\u2076\",\n \"7\": \"\\u2077\",\n \"8\": \"\\u2078\",\n \"9\": \"\\u2079\",\n }\n table = str.maketrans(superscripts)\n template = \"+\" if self.charge > 0 else \"-\"\n\n if abs(self.charge) != 1:\n template = str(abs(self.charge)) + template\n\n return self.symbol + template.translate(table)",
"def __str__(self):\n string = ''\n for degree, coef in enumerate(self.coefs, 1):\n degree = degree - 1\n string += str(coef)+'x^' + str(degree) + ' + '\n string = string[0:-3] # remove the last ' + '\n return string",
"def gen_chars(length, character):\n return ''.join([character for i in range(length)])",
"def _generate_symbols(self):\n\n def infix(id, bp):\n def led(self, left):\n self.first = left\n self.second = self.parent.expression(bp)\n return self\n\n self.symbol_factory(id, bp).led = led\n\n def prefix(id, bp):\n def nud(self):\n self.first = self.parent.expression(bp)\n return self\n\n self.symbol_factory(id, bp).nud = nud\n\n def infixr(id, bp):\n def led(self, left):\n self.first = left\n self.second = self.parent.expression(bp - 1)\n return self\n\n self.symbol_factory(id, bp).led = led\n\n def paren(id):\n def nud(self):\n expr = self.parent.expression()\n self.parent._advance(\"RIGHT_PAREN\")\n return expr\n\n self.symbol_factory(id).nud = nud\n\n paren(\"LEFT_PAREN\")\n self.symbol_factory(\"RIGHT_PAREN\")\n self.symbol_factory(\"END\")\n self.symbol_factory(\":\")\n self.symbol_factory(\"NEWLINE\")\n self.symbol_factory(\"INDENT\")\n self.symbol_factory(\"DEDENT\")\n\n # numbers denote order of operations\n infix(\"+\", 10)\n infix(\"-\", 10)\n infix(\"*\", 20)\n infix(\"/\", 20)\n infix(\"==\", 5)\n infix(\">\", 5)\n infix(\"<\", 5)\n infix(\"&\", 4)\n infix(\"|\", 3)\n infix(\",\", 1)\n infix(\"::\", 1)\n \n infixr(\"=\", 1) # assignment is a little different from others.\n\n # example +4 , -2 \n prefix(\"+\", 100)\n prefix(\"-\", 100)\n\n def literal(id):\n self.symbol_factory(id).nud = lambda self: self\n\n for l in [\"NUMBER\", \"FLOAT\", \"NAME\", \"STRING\", \"BOOL\"]:\n literal(l)\n\n def statement(id, std):\n self.symbol_factory(id).stmt_begin = True\n self.symbol_factory(id).std = std\n\n def if_statement(self):\n self.first = self.parent.expression()\n self.parent._advance([\":\"])\n self.parent._advance([\"NEWLINE\"])\n self.second = self.parent.Block()\n if self.parent.token.id == \"else\":\n self.parent._advance([\"else\"])\n self.parent._advance([\":\"])\n self.parent._advance([\"NEWLINE\"])\n self.third = self.parent.Block()\n return self\n\n def let_statement(self):\n self.first = self.parent.expression()\n self.parent._advance([\"NEWLINE\"])\n return self\n\n def print_statement(self):\n self.parent._advance([\"LEFT_PAREN\"])\n self.first = self.parent.expression()\n self.parent._advance([\"RIGHT_PAREN\"])\n self.parent._advance([\"NEWLINE\"])\n return self\n\n def while_statement(self):\n self.parent._advance([\"LEFT_PAREN\"])\n self.first = self.parent.expression()\n self.parent._advance([\"RIGHT_PAREN\"])\n self.parent._advance([\":\"])\n self.parent._advance([\"NEWLINE\"])\n self.second = self.parent.Block()\n return self\n\n def func_statement(self):\n arg_list = []\n\n self.first = self.parent.expression()\n self.parent._advance([\"LEFT_PAREN\"])\n self.second = self.parent.expression()\n self.parent._advance([\"RIGHT_PAREN\"])\n self.parent._advance([\":\"])\n self.parent._advance([\"NEWLINE\"])\n self.third = self.parent.Block()\n return self\n\n statement(\"if\", if_statement)\n statement(\"let\", let_statement)\n statement(\"print\", print_statement)\n statement(\"while\", while_statement)\n statement(\"fn\", func_statement)"
] | [
"0.65853775",
"0.63188434",
"0.62873995",
"0.62873995",
"0.6264359",
"0.6238999",
"0.6125742",
"0.6105235",
"0.60517746",
"0.60211015",
"0.59943384",
"0.5946436",
"0.5872985",
"0.57784",
"0.5774602",
"0.57340676",
"0.5729776",
"0.5718029",
"0.5713585",
"0.56909215",
"0.5682448",
"0.56823283",
"0.56744325",
"0.56432253",
"0.56231457",
"0.56220573",
"0.5604244",
"0.56035024",
"0.5595535",
"0.5590957"
] | 0.66242063 | 0 |
Generate unique value of field of object | def generate_object_field(symbols, length, object, field):
string = generate_random_string(symbols, length)
try:
instance = object.objects.get(**{field:string})
if instance:
string = generate_object_field(symbols, length, object, field)
except object.DoesNotExist:
pass
return string | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _unique_key(self):\n key = f'factor_{self.counter}'\n self.counter += 1\n return key",
"def unique_id():\n global _unique_id_counter\n _unique_id_counter += 1\n return \"_element_{}\".format(_unique_id_counter)",
"def unique_id() -> str:",
"def get_unique_value(query_set, field_name, value):\n \n original_value = value\n column_count = query_set.filter(**{field_name: value}).count()\n to_append = 2\n while column_count != 0:\n value = \"%s_%s\" % (original_value, to_append)\n column_count = query_set.filter(**{field_name: value}).count()\n to_append = to_append + 1\n return value",
"def _unique_key(self):\n key = f'param_{self.counter}'\n self.counter += 1\n return key",
"def _uniq_id():\n return random.getrandbits(64)",
"def unique_key_generator(instance):\n size = random.randint(30, 45)\n key = get_random_string(size=size)\n\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(key=key).exists()\n if qs_exists:\n return get_unique_slug(instance)\n return key",
"def make_fields_unique(self, fields):\n ...",
"def unique_key_generator(instance):\n size = random.randint(30, 45)\n key = random_string_generator(size=size)\n\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(key=key).exists()\n if qs_exists:\n return unique_slug_generator(instance)\n return key",
"def gen_key(self):\n\n if len(self.fields) == 0:\n return None\n\n # we do not call self.validate() here as self._id will always be null,\n # so we call self.validator.validate on the schema. This will validate\n # that 'effectiveDate', 'carrier', and 'flightNumber' are not None\n # and of valid data type\n if self.validator.validate(self.fields) == False:\n return None\n\n h = hashlib.md5()\n h.update(self.fields['effectiveDate'].isoformat())\n h.update(str(self.fields['carrier']))\n h.update(str(self.fields['flightNumber']))\n\n return h.hexdigest()",
"def unique_name():\n return \"unique-{0}\".format(uuid.uuid4())",
"def _get_unique_id(self):\n now = datetime.now()\n\n u_id = now.second + 60*(now.minute + 60*(now.hour + 24*(now.day + 31*(now.month + 366*(now.year)))))\n return \"instance\" + str(u_id)",
"def generate_unique_slug(klass, field, instance=None):\n origin_slug = slugify(field)\n unique_slug = origin_slug\n numb = 1\n if instance is not None:\n while klass.objects.filter(slug=unique_slug).exclude(id=instance.id).exists():\n unique_slug = '%s-%d' % (origin_slug, numb)\n numb += 1\n else:\n while klass.objects.filter(slug=unique_slug).exists():\n unique_slug = '%s-%d' % (origin_slug, numb)\n numb += 1\n return unique_slug",
"def unique_purchase_id_generator(instance):\n purchase_new_id = get_random_string()\n\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(purchase_id=purchase_new_id).exists()\n if qs_exists:\n return get_unique_slug(instance)\n return purchase_new_id",
"def get_unique_integer() -> int:\n return int(time.time())",
"def generate_field_name(field_class, field_label, max_tries=10000, use_uuid=True):\n\n def field_name_in_use(_field_class, _name):\n return _field_class.objects.filter(name=_name).count() > 0\n\n field_name = slugify(field_label)\n\n if not field_name_in_use(field_class, field_name):\n return field_name\n\n for i in range(1, max_tries + 1):\n if not field_name_in_use(field_class, field_name + str(i)):\n return field_name + str(i)\n\n if use_uuid:\n field_name = str(uuid.uuid4())\n while field_name_in_use(field_class, field_name):\n field_name = str(uuid.uuid4())\n\n return field_name\n\n return None",
"def get_or_create_unique_id(self):\n if not self.unique_id:\n self.unique_id = uuid.uuid4().hex\n return self.unique_id",
"def pre_save(self, model_instance, add):\n value = super().pre_save(model_instance, add)\n if self.auto and not value:\n # Assign a new value for this attribute if required.\n value = shortuuid.uuid(name=self.namespace)\n if self.prefix:\n value = self.prefix + ':' + value\n setattr(model_instance, self.attname, value)\n return value",
"def get_unique_id():\n global unique_id_increment\n if unique_id_increment is None:\n unique_id_increment = 0\n unique_id_increment += 1\n return '%d%d' % (int(time.time()), unique_id_increment)",
"def generate_unique_model_id(model_class, prefix, variable_part_length):\n while True:\n unique_alphanum = generate_random_alphanumeric(variable_part_length)\n unique_id = f'{prefix}{unique_alphanum}'\n if not model_class.objects.filter(string_id=unique_id).exists():\n return unique_id",
"def _next_id(self):\n self._uniq_id += 1\n return str(self._uniq_id)",
"def _next_id(self):\n self._uniq_id += 1\n return str(self._uniq_id)",
"def get_unique_id():\n global unique_id_increment\n if unique_id_increment is None:\n unique_id_increment = 0\n unique_id_increment += 1\n return \"%d%d\" % (int(time.time()), unique_id_increment)",
"def createUniqueRatingId():\n #connector = appEngine.connect()\n ratingID = 'r' + str(ceil(time.time()))\n return ratingID",
"def unique_id(self):\n return f\"octopus_energy_gas_{self._serial_number}_{self._mprn}_previous_accumulative_cost_override_tariff\"",
"def unique_order_id_generator(instance):\n order_new_id = get_random_string()\n\n Klass = instance.__class__\n qs_exists = Klass.objects.filter(order_id=order_new_id).exists()\n if qs_exists:\n return get_unique_slug(instance)\n return order_new_id",
"def unique_id(self):\n if not self._unique_id:\n h = hashlib.sha224()\n self.expr_op.update_hash(h)\n self._unique_id = b64encode(h.digest(), altchars=\"_-\").rstrip(\"=\")\n return self._unique_id",
"def get_unique_id(self):\n if not self.unique_id:\n self.unique_id = uuid.uuid4().hex\n return self.unique_id",
"def makeid(cls):\n return str(uuid.uuid4().hex)",
"def unique_id(self) -> str:\n return \"{}-{}-{}\".format(*self._id)"
] | [
"0.6485617",
"0.6468718",
"0.6443546",
"0.64268094",
"0.6342841",
"0.63409305",
"0.63334376",
"0.62932295",
"0.62595534",
"0.6258071",
"0.62518674",
"0.62270314",
"0.6215765",
"0.62076354",
"0.6207109",
"0.6184692",
"0.61518097",
"0.6139777",
"0.613857",
"0.6138417",
"0.6127942",
"0.6127942",
"0.6113496",
"0.61022687",
"0.60945237",
"0.6049061",
"0.6044051",
"0.60412115",
"0.6027586",
"0.6003176"
] | 0.65939534 | 0 |
Generate activation token as sha1 hash of random number + sha1 hash of username | def generate_activation_token(username):
salt = hashlib.sha1(str(random.random()).encode('utf-8')).hexdigest()[:5]
encoded_string = salt + username
token = hashlib.sha1(encoded_string.encode('utf-8')).hexdigest()
try:
profile = ActivationProfile.objects.get(token=token)
if profile:
token = generate_activation_token(username)
except ActivationProfile.DoesNotExist:
pass
return token | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _gen_activation_hash():\r\n # for now just cheat and generate an api key, that'll work for now\r\n return User.gen_api_key()",
"def generate_token():\n return uuid4()",
"def _generate_token(self):\n return sha1(\"%s#%s\" % (time(),\n self.app.cfg['sessions/secret'])).hexdigest()",
"def generate_token():\n chars = ('abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789')\n rand = random.SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(40))\n return hmac.new(\n config.SECRET_KEY,\n random_string,\n hashlib.sha256\n ).hexdigest()",
"async def _token(self, user: discord.User = None, user_id: int = None):\n # This is to be used with the registration URL so that it doesn't contain\n # the user's ID in cleartext. This is so that person A cannot trivially\n # generate person B's URL and assign them to person A's team.\n if not user:\n user = self.bot.get_user(user_id)\n hashh = await self.config.user(user).digest()\n if hashh is None:\n salt = await self.config.user(user).secret()\n if salt is None:\n salt = random_salt()\n await self.config.user(user).secret.set(salt)\n hashh = digest(user.id, salt)\n await self.config.user(user).digest.set(hashh)\n await self.config.set_raw('undigest', hashh, value=user.id)\n return hashh",
"def create_token(self,uid):\n token_str = self.get_random(5) + str(uid) + str(int(time.time()))\n m = hashlib.md5()\n m.update(token_str)\n return m.hexdigest()",
"def generate_token(login, password):\n time = datetime.datetime.now().timestamp()\n raw_string = str(login) + str(password) + str(time)\n return hashlib.sha256(str(raw_string).encode('utf-8')).hexdigest()",
"def create_token(self):\n ts_datetime = self.logged_at or self.created_at\n ts = int(mktime(ts_datetime.timetuple()))\n key = base64.encodestring(self.email)\n base = \"{}{}\".format(key, ts)\n salt, hsh = self.password.split('$')\n return \"{}$${}\".format(key, get_hexdigest(salt, base))",
"def short_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()[::2]",
"def new_token(*args, **kwargs):\n return uuid.uuid4().hex",
"def long_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()",
"def short_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()[::2]",
"def generate_state_token():\n chars = (ascii_letters + digits)\n rand = SystemRandom()\n random_string = ''.join(rand.choice(chars) for _ in range(len(chars)))\n return hmac.new(\n config.SECRET_KEY.encode('utf-8'),\n random_string.encode('utf-8'),\n hashlib.sha256\n ).hexdigest()",
"def make_token():\n return secrets.token_urlsafe(36)",
"def generateAuthToken():\r\n alnum = ''.join(c for c in map(chr, range(256)) if c.isalnum())\r\n return ''.join(random.choice(alnum) for _ in range(32))",
"def generate_token(ip: Text):\n return pbkdf2_sha256.encrypt(salt + ip)",
"def generate_token(ip: Text):\n return pbkdf2_sha256.encrypt(salt + ip)",
"def create_hash(self):\n return os.urandom(32).encode('hex')",
"def long_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()",
"def gen_api_key(username):\n salt = str(os.urandom(64)).encode('utf-8')\n return hash_password(username, salt)",
"def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token.decode()",
"def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days= 3)\n payload = {\n 'user': user.username,\n 'exp': int (exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm= 'HS256')\n return token",
"def gen_verification_token(user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n # Generacion del token\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token",
"def generate_token(self):\n token = randint(100000000000000000, 999999999999999999)\n return str(token)",
"def generate_password():\n return urlsafe_b64encode(urandom(32)).decode('utf-8')",
"def random_username():\n return str(uuid.uuid4().hex.upper())",
"def _generate_token_value():\n return secrets.token_urlsafe()",
"def generate_token(usr):\n token = jwt.encode({\"user\":usr, \"exp\":datetime.datetime.utcnow()\n + datetime.timedelta(minutes=30)}, KEY)\n user = User.update(token=token).where(User.username == usr)\n user.execute()\n return token",
"def gen_verification_token(self, user):\n exp_date = timezone.now() + timedelta(days=3)\n payload = {\n 'user': user.username,\n 'exp': int(exp_date.timestamp()),\n 'type': 'email_confirmation'\n }\n token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')\n return token",
"def generate_user(self):\n token = str(uuid.uuid4())\n return self.generate_subid(token=token, return_user=True)"
] | [
"0.7930364",
"0.732733",
"0.7284244",
"0.72545826",
"0.7070762",
"0.70537156",
"0.7041763",
"0.7013381",
"0.700369",
"0.6981259",
"0.69533074",
"0.69524884",
"0.692625",
"0.6890338",
"0.6884929",
"0.687067",
"0.687067",
"0.6851789",
"0.68499076",
"0.6831315",
"0.6826171",
"0.68140703",
"0.6775213",
"0.67578155",
"0.67297477",
"0.67049545",
"0.66910046",
"0.66760594",
"0.66471004",
"0.6632492"
] | 0.8487366 | 0 |
Find activation profile by field (token or sms) and code. Activate user object, set password, and delete activation profile. Also authenticate user. | def activate_profile(field, code, request):
try:
activation = ActivationProfile.objects.get(**{field:code})
except ActivationProfile.DoesNotExist:
messages.error(request, _('Activation code expired or not valid!'))
return False
if timezone.now() < activation.valid_through:
activation.user.is_active = True
activation.user.set_unusable_password()
activation.user.save()
if request.user.is_anonymous():
if field == 'token':
user = authenticate(username=activation.user.username, token=activation.token)
elif field == 'sms_key':
user = authenticate(username=activation.user.username, code=activation.sms_key)
else:
user = None
activation.delete()
if user:
login(request, user)
messages.success(request, _("""Profile activated successfully! You should change your password!"""))
return True
else:
return False
else:
messages.success(request, _("""You already have an account!"""))
return False | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def activate_account(self, activation_key):\n try:\n registration_profile = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return None\n\n if not registration_profile.is_expired():\n user = registration_profile.user\n user.is_active = True\n user.save()\n registration_profile.delete()\n return user\n else:\n return None",
"def activate_user(self, activation_key):\n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point trying to look it up in\n # the database.\n try:\n profile = self.get(admin_key=activation_key)\n except self.model.DoesNotExist:\n return False, False\n user = profile.user\n activated = False\n if not user.is_active:\n user.is_active = True\n user.save()\n activated = True\n return (activated, user)",
"def activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except (TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.profile.email_confirmed = True\n user.save()\n login(request, user)\n return redirect('home')\n else:\n return render(request, 'registration/activation_invalid.html')",
"def test_valid_activation_with_profile(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n profile, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current(), get_profile=True)\n\n self.assertIsInstance(profile, self.registration_profile)\n self.assertEqual(profile.id, profile.id)\n self.assertTrue(profile.activated)\n self.assertTrue(activated)\n\n new_user.refresh_from_db()\n self.assertTrue(profile.user.id, new_user.id)\n self.assertTrue(new_user.is_active)",
"def test_valid_activation_with_profile(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n profile, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current(), get_profile=True)\n\n self.assertIsInstance(profile, self.registration_profile)\n self.assertEqual(profile.id, profile.id)\n self.assertTrue(profile.activated)\n self.assertTrue(activated)\n\n new_user.refresh_from_db()\n self.assertTrue(profile.user.id, new_user.id)\n self.assertFalse(new_user.is_active)",
"def activate_user(self, activation_key):\n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point even trying to look it up\n # in the DB.\n if SHA1_RE.search(activation_key):\n try:\n user_profile = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return False\n if not user_profile.activation_key_expired():\n # Account exists and has a non-expired key. Activate it.\n user = user_profile.user\n user.is_active = True\n user.save()\n return user\n return False",
"def activate_user(self, activation_key):\n if SHA1_RE.search(activation_key):\n try:\n profile = RegistrationProfile.objects.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n return False\n if not profile.activation_key_expired():\n user = profile.user\n user.is_active = True\n user.save()\n profile.activation_key = \"ALREADY_ACTIVATED\"\n profile.save()\n return user\n\n return False",
"def activate_user(self, activation_key):\r\n # Make sure the key we're trying conforms to the pattern of a\r\n # SHA1 hash; if it doesn't, no point trying to look it up in\r\n # the database.\r\n if SHA1_RE.search(activation_key):\r\n try:\r\n profile = self.get(activation_key=activation_key)\r\n except self.model.DoesNotExist:\r\n return False\r\n if not profile.activation_key_expired():\r\n user = profile.user\r\n user.is_active = True\r\n user.save()\r\n profile.activation_key = \"ALREADY_ACTIVATED\"\r\n profile.save()\r\n return user\r\n return False",
"def activate_user(cls, activation_key):\n #from registration.signals import user_activated\n \n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point trying to look it up in\n # the database.\n db = DB_Session()\n if SHA1_RE.search(activation_key):\n query = db.query(RegistrationProfile)\n profile = query.filter(RegistrationProfile.activation_key == activation_key).one()\n if not profile:\n return False\n if not profile.activation_key_expired():\n user = profile.user\n user.is_active = 1\n profile.activation_key = RegistrationProfile.ACTIVATED\n db.flush()\n db.commit()\n db.close()\n #user_activated.send(sender=self.model, user=user)\n return user\n return False",
"def activate(self, *args, **kwargs):\n username = self.validate_key(kwargs.get(\"activation_key\"))\n user = self.get_user(username)\n user.is_active = True\n user.save()\n return user",
"def activate(request, activation_key):\n profile = get_object_or_404(User, activation_key=activation_key)\n if profile.akey_expires < timezone.now():\n return render('user_account/activate.html', {'expired': True})\n\n profile.save(update_fields=['active', 'activation_key'])\n return render(\n 'user_account/activate.html',\n {'success': True, 'name': profile.name + \" \" + profile.surname}\n )",
"def activate(request, activation_key,template_name='registration/activate.html',extra_context=None):\n\tactivation_key = activation_key.lower() # Normalize before trying anything with it.\n\taccount = RegistrationProfile.objects.activate_user(activation_key)\n\t\n\t\n\t#new profile PROBLEME NON ENREGISTREMENT DU PROFILE\n\t#recuperer l user id de l'account user.id\n\tprofile = UserProfile();\n\tprofile.user = account\n\tprofile.save()\n\t\n\t\n\tif extra_context is None:\n\t\textra_context = {}\n\tcontext = RequestContext(request)\n\tfor key, value in extra_context.items():\n\t\tcontext[key] = callable(value) and value() or value\n\treturn render_to_response(template_name,{ 'account': account,'expiration_days': settings.ACCOUNT_ACTIVATION_DAYS }, context_instance=context)",
"def activateWebAppUser( self, username, activation_code ):\n try:\n con = self.getMetadataDatabaseConnection()\n user_data = con.cursor()\n\n con.cursor().callproc('verify_user_activation_code', [username, activation_code, user_data])\n row = user_data.fetchone()\n if row:\n con.cursor().callproc('activate_user_account', [username])\n return True\n else:\n return False\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), e)\n return False",
"def activate_user(activation_code, new_password):\n um = logic.UserManager()\n try:\n user = um.lookup_user_by_activation_code(activation_code)\n user.activate()\n user.set_password(new_password)\n except ex.UserNotFoundError:\n blogger.debug(\"no user found with activation code %s\" % activation_code)\n transaction.abort()\n return dict(activated=False)\n else:\n transaction.commit()\n return dict(activated=True)",
"def activate_token(request, token):\n # Getting environment from settings\n debug = settings.DEBUG\n\n # Based on the debug redirect the user to correct url\n if debug:\n REDIRECT_URL = 'http://localhost:3000'\n else:\n REDIRECT_URL = 'https://leadbook-challenge.herokuapp.com'\n\n try:\n profile = Profile.objects.get(activation_key=token)\n profile.is_verified = True\n profile.save()\n except Profile.DoesNotExist:\n profile = None\n\n if profile:\n return HttpResponseRedirect('{}/activation/success'.format(REDIRECT_URL))\n else:\n return HttpResponseRedirect('{}/activation/failed'.format(REDIRECT_URL))",
"def activate():\n try:\n body = request.get_json()\n\n activate_token = body[\"activate_token\"]\n password = body[\"password\"]\n\n if len(password) < 3 or len(password) > 50:\n return bad_request()\n\n if not models.token_exists(activate_token):\n\n return bad_request()\n\n student_hash = models.create_hash(password)\n models.save_hash(student_hash, activate_token)\n\n except KeyError:\n return bad_request()\n except Exception as e:\n print(e)\n return server_error()\n\n return created()",
"def activate_account_api():\n\n # get the data for this query\n data = request.get_json()\n if not data:\n response = jsonify({\n 'success': False,\n 'message': 'Missing request body'\n })\n response.status_code = 422\n return response\n\n # process arguments\n arg_email = data.get('email').strip().lower()\n\n # check if there is a user with this activation_link\n secret_link = data.get('secret_link')\n user = db.session.query(User).filter(\n User.activation_link == secret_link,\n ).one_or_none()\n if not user:\n response = jsonify({\n 'success': False,\n 'message': 'This activation link is no longer active. Contact your system administrator to receive a new one.'\n })\n response.status_code = 200\n return response\n\n # check if this user has already activated their account\n if user.activated:\n response = jsonify({\n 'success': False,\n 'message': 'This account has already been activated. Try forgot password to recover your password.'\n })\n response.status_code = 200\n return response\n\n # check if the correct email address was supplied\n if user.email != arg_email:\n response = jsonify({\n 'success': False,\n 'message': 'This is not the correct email for this activation link. Contact your system administrator to request a link for this email.'\n })\n response.status_code = 200\n return response\n\n # generate and set new password\n new_password = generate_password_hash(data.get('password'))\n user.password = new_password\n user.activated = True\n db.session.add(user)\n db.session.commit()\n\n # log that a user just activated their account\n _log('++ {} just activated their account'.format(user.email), '_signup')\n\n # return authenticated token\n token = generate_auth_token(user_id=user.user_id)\n response = jsonify({\n 'success': True,\n 'token': token\n })\n response.status_code = 200\n return response",
"def activate(request, uidb64, token):\n try:\n uid = force_text(urlsafe_base64_decode(uidb64))\n user = User.objects.get(pk=uid)\n except(TypeError, ValueError, OverflowError, User.DoesNotExist):\n user = None\n if user is not None and account_activation_token.check_token(user, token):\n user.is_active = True\n user.save()\n return render(request, 'accounts/active_done.html')\n else:\n return HttpResponse('Activation link is invalid!')",
"def activate(request, uidb64, token):\r\n\ttry:\r\n\t\tuid = force_text(urlsafe_base64_decode(uidb64))\r\n\t\tuser = User.objects.get(pk=uid)\r\n\texcept (TypeError, ValueError, OverflowError, User.DoesNotExist):\r\n\t\tuser = None\r\n\r\n\tif user is not None and account_activation_token.check_token(user, token):\r\n\t\t# User activated and redirected to the homepage\r\n\t\tuser.is_active = True\r\n\t\tuser.profile.email_confirmed = True\r\n\t\tuser.save()\r\n\t\tlogin(request, user, backend='django.contrib.auth.backends.ModelBackend')\r\n\t\tgames = Game.objects.all()\r\n\t\treturn redirect('/', {'games': games, 'MEDIA_URL': settings.MEDIA_URL})\r\n\telse:\r\n\t\treturn render(request, 'account_activation_invalid.html')",
"def test_valid_activation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n self.assertEqual(user.id, new_user.id)\n self.assertTrue(user.is_active)\n self.assertTrue(activated)\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertTrue(profile.activated)",
"def test_valid_activation(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n self.assertIsInstance(user, UserModel())\n self.assertEqual(user.id, new_user.id)\n self.assertFalse(user.is_active)\n self.assertTrue(activated)\n\n profile = self.registration_profile.objects.get(user=new_user)\n self.assertTrue(profile.activated)",
"def activate_user(username, code, new_pass):\r\n\r\n qry = Activation.query.\\\r\n filter(Activation.code == code).\\\r\n filter(User.username == username)\r\n\r\n res = qry.first()\r\n\r\n if UserMgr.acceptable_password(new_pass) and res is not None:\r\n user = res.user\r\n user.activated = True\r\n user.password = new_pass\r\n res.activate()\r\n\r\n LOG.debug(dict(user))\r\n\r\n return True\r\n else:\r\n return None",
"def test_activate_user(self):\n activated_user = (RegistrationProfile.objects\n .activate_user(self.activation_key))\n self.assertTrue(activated_user.registrationprofile.activated)\n self.assertFalse(activated_user.is_active)",
"def activate_user(self, email):\r\n activation_key = Registration.objects.get(user__email=email).activation_key\r\n # and now we try to activate\r\n check_for_get_code(self, 200, reverse('activate', kwargs={'key': activation_key}))\r\n # Now make sure that the user is now actually activated\r\n self.assertTrue(User.objects.get(email=email).is_active)",
"def account_activate(request):\r\n params = request.params\r\n\r\n username = params.get('username', None)\r\n activation = params.get('code', None)\r\n password = params.get('password', None)\r\n new_username = params.get('new_username', None)\r\n\r\n if username is None and activation is None and password is None:\r\n # then try to get the same fields out of a json body\r\n json_body = request.json_body\r\n username = json_body.get('username', None)\r\n activation = json_body.get('code', None)\r\n password = json_body.get('password', None)\r\n new_username = json_body.get('new_username', None)\r\n\r\n if not UserMgr.acceptable_password(password):\r\n request.response.status_int = 406\r\n return _api_response(request, {\r\n 'error': \"Come on, pick a real password please\",\r\n })\r\n\r\n username = username.lower()\r\n new_username = new_username.lower() if new_username else None\r\n res = ActivationMgr.activate_user(\r\n username,\r\n activation,\r\n password)\r\n\r\n if res:\r\n # success so respond nicely\r\n AuthLog.reactivate(username, success=True, code=activation)\r\n\r\n # if there's a new username and it's not the same as our current\r\n # username, update it\r\n if new_username and new_username != username:\r\n try:\r\n user = UserMgr.get(username=username)\r\n user.username = new_username\r\n except IntegrityError, exc:\r\n request.response.status_int = 500\r\n return _api_response(request, {\r\n 'error': 'There was an issue setting your new username',\r\n 'exc': str(exc)\r\n })\r\n\r\n return _api_response(request, {\r\n 'message': \"Account activated, please log in.\",\r\n 'username': username,\r\n })\r\n else:\r\n AuthLog.reactivate(username, success=False, code=activation)\r\n request.response.status_int = 500\r\n return _api_response(request, {\r\n 'error': \"There was an issue attempting to activate this account.\",\r\n })",
"def activate_user(self, activation_key, request=None):\n # Make sure the key we're trying conforms to the pattern of a\n # SHA1 hash; if it doesn't, no point trying to look it up in\n # the database.\n if SHA1_RE.search(activation_key):\n try:\n profile = self.get(activation_key=activation_key)\n except self.model.DoesNotExist:\n profile = None\n statsd.incr('user.activate-error.does-not-exist')\n reason = 'key not found'\n if profile:\n if not profile.activation_key_expired():\n user = profile.user\n user.is_active = True\n user.save()\n\n # We don't need the RegistrationProfile anymore, delete it.\n profile.delete()\n\n # If user registered as contributor, send them the\n # welcome email.\n if user.groups.filter(name=CONTRIBUTOR_GROUP):\n self._send_email(\n confirmation_profile=profile,\n url=None,\n subject=_('Welcome to SUMO!'),\n text_template='users/email/contributor.ltxt',\n html_template='users/email/contributor.html',\n send_to=user.email,\n contributor=user)\n\n return user\n else:\n statsd.incr('user.activate-error.expired')\n reason = 'key expired'\n else:\n statsd.incr('user.activate-error.invalid-key')\n reason = 'invalid key'\n\n log.warning(u'User activation failure ({r}): {k}'.format(\n r=reason, k=activation_key))\n\n return False",
"def account_activate(request, uidb64, token):\r\n try:\r\n # decode the user's id and get the user by id.\r\n user_id = smart_str(urlsafe_base64_decode(uidb64))\r\n user = get_object_or_404(User, id=user_id)\r\n if user.is_active:\r\n # Display already activated account message\r\n messages.success(request, f'Your Account already activated. You can login.', extra_tags='activation-valid')\r\n # check if the token is valid.\r\n elif account_activation_token.check_token(user, token):\r\n user.is_active = True\r\n # user.previously_logged_in = True\r\n user.save()\r\n # Display activation success message\r\n messages.success(request, f'Your Account has been activated successfully. Now you can login.', extra_tags='activation-valid') \r\n else:\r\n # Display error message.\r\n messages.error(request, f'The activation link is invalid. Please request a new one.', extra_tags='activation-invalid') \r\n except DjangoUnicodeDecodeError:\r\n # Display error message.\r\n messages.error(request, f'The activation link is invalid. Please request a new one.', extra_tags='activation-invalid') \r\n return redirect('accounts:login')",
"def test_activation(self):\n reg_profile = RegisterProfile.objects.create_profile(\n 'TestName', '[email protected]', 'asdf1234')\n\n kwargs = {'activation_key': reg_profile.activation_key}\n response = self.client.post(reverse('users.activate', kwargs=kwargs))\n eq_(200, response.status_code)\n\n # Test relations\n u = User.objects.get(email='[email protected]')\n eq_(u.get_profile().display_name, 'TestName')",
"def _activate_user(self, email):\r\n activation_key = registration(email).activation_key\r\n\r\n # and now we try to activate\r\n resp = self.client.get(reverse('activate', kwargs={'key': activation_key}))\r\n return resp",
"def test_activation_already_activated(self):\n new_user = self.registration_profile.objects.create_inactive_user(\n site=Site.objects.get_current(), **self.user_info)\n profile = self.registration_profile.objects.get(user=new_user)\n self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n\n profile = self.registration_profile.objects.get(user=new_user)\n user, activated = self.registration_profile.objects.activate_user(\n profile.activation_key, Site.objects.get_current())\n self.assertEqual(user, new_user)\n self.assertFalse(activated)"
] | [
"0.68785006",
"0.6514229",
"0.64548576",
"0.6426541",
"0.64197016",
"0.63910127",
"0.6378009",
"0.63755804",
"0.63652694",
"0.62683344",
"0.6250443",
"0.6241231",
"0.6199857",
"0.6191263",
"0.6141303",
"0.6119849",
"0.6073964",
"0.606942",
"0.6056606",
"0.60392773",
"0.5997497",
"0.5988625",
"0.59390616",
"0.5938386",
"0.59271014",
"0.5918059",
"0.5898285",
"0.58723724",
"0.58595425",
"0.58558905"
] | 0.78744406 | 0 |
Convert base 64 string withe headers to image | def convert_str_to_image(image_string):
image = image_string.partition('base64,')[2]
img_data = base64.b64decode(image)
return img_data | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def formatImage(imgData):\n imgstr = re.search(b'base64,(.*)', imgData).group(1)\n with open('output.png','wb') as output:\n output.write(base64.decodebytes(imgstr))",
"def base64_decode_image(inStr):\n imgDat, imgType, imgShape = json.loads(inStr)\n imgDat = bytes(imgDat, encoding=\"utf-8\")\n\n imgDat = base64_decode_array(imgDat, imgType)\n imgDat = imgDat.reshape(imgShape)\n return imgDat",
"def _get_image(x):\n return b64encode(x).decode('ascii')",
"def convertdataTOimage(data):\n data = data.partition(\",\")[2]\n padding = len(data)%4\n data += \"=\"*padding\n image = Image.open(BytesIO(b64decode(data)))\n return image",
"def base_64_to_img(base_64_string):\r\n # convert image into np array\r\n return cv2.imdecode(\r\n np.frombuffer(base64.b64decode(base_64_string.split(\";base64,\").pop()), np.uint8),\r\n cv2.IMREAD_COLOR)",
"def process_image(self, base64_string: str) -> str:\n self.convert_base64_to_image(base64_string)\n self.corp_image()\n self.change_image_pixels()\n return self.image_to_string()",
"def base64(self):\n image = self.png.getvalue()\n return base64.encodestring(image).decode('utf-8')",
"def convertImage(img):\n return '\\\\includegraphicsdata{%s}' % \":\".join([\n 'data',\n img.contentType,\n \"base64,%s\" % img.data.encode(\"base64\").replace(\"\\n\", \"\"),\n ])",
"def encode_image(image):\n return base64.b64encode(image).decode('ascii')",
"def to_image_data(data):\n \n # removing image\n if not data:\n return u''\n\n # image path (not changed)\n if data[0:5] != u'data:':\n return None\n \n # TODO: better MIME handling\n mime = data[5:data.index(u';')].lower()\n img = data[data.index(u',') + 1:].decode('base64')\n \n return mime, img",
"def base64_to_image(base64_image):\n return Image.open(io.BytesIO(base64.b64decode(base64_image)))",
"def b64_image(self) -> bytes:\n buffer = BytesIO()\n self.image.save(buffer, \"PNG\") \n im_b64 = base64.b64encode(buffer.getvalue())\n im_b64 = b\"data:image/png;base64,\" + im_b64\n return im_b64",
"def deserialise_image(data):\n if \"data:image\" in data:\n data = data[data.find(\",\") + 1:]\n\n return Image.open(io.BytesIO(base64.urlsafe_b64decode(data)))",
"def img_to_base64(img):\n with io.BytesIO() as output:\n img.save(output, format=\"PNG\")\n img_string = base64.b64encode(output.getvalue())\n return img_string.decode(\"utf-8\")",
"def image_to_base64str(image):\n file_bytes = image.file.read()\n base64_img_str = 'data:image;base64, '\n base64_img_str += str(base64.b64encode(file_bytes), 'utf-8')\n return base64_img_str",
"def convert_base64_to_image(self, image_in_base64):\n image_in_base64 = str(image_in_base64).replace('data:image/jpeg;base64,', '')\n image_data = base64.b64decode(image_in_base64)\n\n # Save image as image file\n with open(self.captcha_image_filename, 'wb') as file:\n file.write(image_data)",
"def base64ify(image_data: bytes):\n # Convert the avatar to base64.\n mimetype = imghdr.what(None, image_data)\n if not mimetype:\n raise ValueError(\"Invalid image type\")\n\n b64_data = base64.b64encode(image_data).decode()\n return \"data:{};base64,{}\".format(mimetype, b64_data)",
"def base64_string(self) -> global___Expression:",
"def decode_base64(data):\n\n image = None\n try:\n image = base64.decodestring(data)\n except:\n print \"Could not decode base64 image from json\"\n\n return image",
"def prepare_output(image: np.ndarray) -> str:\n response_image = Image.fromarray(np.uint8(image * 255))\n buffer = BytesIO()\n response_image.save(buffer, \"PNG\")\n encoded = base64.b64encode(buffer.getvalue())\n return \"data:image/png;base64,\" + str(encoded)[2:-1]",
"def image_to_base64(image, format='JPEG'):\n in_mem_file = io.BytesIO()\n image.save(in_mem_file, format=format)\n # reset file pointer to start\n in_mem_file.seek(0)\n img_bytes = in_mem_file.read()\n base64_bstr = base64.b64encode(img_bytes)\n return base64_bstr.decode('ascii')",
"def convert_photo(link):\n\n image = open(link, \"rb\") #Open binary file in read-only mode\n image_read = image.read()\n image_base64 = base64.b64encode(image_read)\n\n return image_base64",
"def base64_to_PIL(string):\n try:\n base64_data = base64.b64decode(string)\n img = Image.open(BytesIO(base64_data)).convert('RGB')\n return img\n except:\n return None",
"def getbase64(nparr,):\n if type(nparr) == type({}):\n nparr = nparr['img']\n im = Image.fromarray(nparr)\n buf = BytesIO()\n im.save(buf,format=\"JPEG\")\n return base64.b64encode(buf.getvalue()).decode('ascii')",
"def base64_encode_image(inArray):\n imgDat = [base64_encode_array(inArray).decode(\"utf-8\")]\n imgType = str(inArray.dtype)\n imgShape = inArray.shape\n return json.dumps([ imgDat, imgType, imgShape ])",
"def get_image_base64(path):\n with open(path, 'r') as img:\n return base64.b64encode(img.read())",
"def encodedImage(imageFile):\n imageFile = \"\".join([METRICS_PATH, imageFile])\n encoded = base64.b64encode(open(imageFile, 'rb').read())\n return 'data:image/jpg;base64,{}'.format(encoded.decode())",
"def to_internal_value(self, data):\n if isinstance(data, str) and data.startswith('data:image'):\n # Found image is encoded, and must be decoded\n format, imgstr = data.split(';base64,')\n ext = format.split('/')[-1] # Extract file extension\n id = uuid.uuid4()\n data = ContentFile(base64.b64decode(imgstr), name = id.urn[9:] + '.' + ext)\n return super(Base64ImageField, self).to_internal_value(data)",
"def save_img_base64(_preds):\n img = Image.fromarray(_preds)\n buff = BytesIO()\n img.save(buff, format=\"JPEG\")\n return base64.b64encode(buff.getvalue())",
"def encode(pixels):\n # save the image to a bytes buffer\n buffered = BytesIO()\n image = Image.fromarray(pixels.astype('uint8'))\n image = image.convert('RGB')\n image.save(buffered, format=\"PNG\")\n\n # decode the bytes as a string\n img_str = base64.b64encode(buffered.getvalue()).decode('utf-8')\n\n return img_str"
] | [
"0.746931",
"0.73137474",
"0.7189252",
"0.7102462",
"0.7000746",
"0.69148093",
"0.6903235",
"0.68649226",
"0.6849316",
"0.67165977",
"0.67024094",
"0.668832",
"0.66554683",
"0.66335756",
"0.66325825",
"0.66167563",
"0.65795285",
"0.6556288",
"0.6536883",
"0.65069014",
"0.6489022",
"0.6463567",
"0.64581",
"0.6457404",
"0.6407959",
"0.6400545",
"0.63587266",
"0.6321964",
"0.62910575",
"0.6263006"
] | 0.74544173 | 1 |
List all available boxes in the catalog. | def list(self, arguments):
print("{}\t{}".format(
'BOX'.rjust(35),
'VERSION'.rjust(12),
))
path = os.path.abspath(os.path.join(HOME, 'boxes'))
for root, dirnames, filenames in os.walk(path):
for filename in fnmatch.filter(filenames, '*.box'):
directory = os.path.dirname(os.path.join(root, filename))[len(path) + 1:]
account, box, version = (directory.split(os.path.sep, 2) + ['', ''])[:3]
print("{}\t{}".format(
"{}/{}".format(account, box).rjust(35),
version.rjust(12),
)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"async def list(self, ctx):\n server = ctx.message.server\n if server.id not in self.db:\n self.db[server.id] = {}\n dataIO.save_json(\"data/lootbox/servers.json\", self.db)\n if len(self.db[server.id]) < 1:\n await self.bot.say(\"No boxes have been created for this server yet, please create some using [p]box create\"\n \" first, thanks\")\n return\n boxes = self.db[server.id].keys()\n await self.bot.say(\"Here are this server's boxes:\\n{}\".format(\"\\n\".join(boxes)))",
"def _available_boxes(self, graph):\n return sorted([node.name for node in graph.available_nodes()\n if not isinstance(node.meta, Ibox)])",
"def _get_boxes(self):\n return self._boxes",
"def cli(boxes, show_all):\n\n box_status = []\n for project, project_boxes in boxes.iteritems():\n for box in project_boxes:\n # add some nice colors to box status\n status = box.status()\n if not show_all and status == 'not created':\n continue\n color_status = {\n 'running': click.style('running', fg='green'),\n 'saved': click.style('saved', fg='blue'),\n 'poweroff': click.style('powered off', fg='yellow'),\n 'not created': click.style('not created', fg='red'),\n }.get(status, status)\n\n box_status.append({\n 'project': project.name(),\n 'name': box.name(),\n 'image': box.image(),\n 'status': color_status\n })\n\n box_status = sorted(box_status, key=_status_sort)\n status_table.echo(box_status)",
"def get_boxes(self) -> List[Box]:\n return [Box.from_npbox(npbox) for npbox in self.boxlist.get()]",
"def base_boxes():\n return sorted(list(set([name for name, provider in _box_list()])))",
"def test_get_all_boxes(self, postfix_directory):\n print(\"Test_All_Boxes\")\n protein_file = os.path.join(postfix_directory, \"PfATP4.pdb\")\n ligand_file = os.path.join(postfix_directory, \"SJ733.pdb\")\n coords = rdkit_util.load_molecule(protein_file)[0]\n\n boxes = dc.dock.binding_pocket.get_all_boxes(coords)\n assert isinstance(boxes, list)\n # Pocket is of form ((x_min, x_max), (y_min, y_max), (z_min, z_max))\n for pocket in boxes:\n assert len(pocket) == 3\n assert len(pocket[0]) == 2\n assert len(pocket[1]) == 2\n assert len(pocket[2]) == 2\n (x_min, x_max), (y_min, y_max), (z_min, z_max) = pocket\n assert x_min < x_max\n assert y_min < y_max\n assert z_min < z_max",
"def boxes(self) -> dict:\n return self.data[\"boxes\"]",
"def list(self, arguments):\n print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(\n 'NAME'.rjust(20),\n 'ADDRESS'.rjust(15),\n 'BOX'.rjust(35),\n 'VERSION'.rjust(12),\n 'PATH',\n ))\n for instance_name, instance in utils.instances().items():\n path = instance.get('path')\n if path and os.path.exists(path):\n self.activate(instance_name)\n mech_path = os.path.join(path, '.mech')\n if os.path.exists(mech_path):\n vmx = self.get_vmx(silent=True)\n if vmx:\n vmrun = VMrun(vmx, user=self.user, password=self.password)\n lookup = self.get(\"enable_ip_lookup\", False)\n ip = vmrun.getGuestIPAddress(wait=False, quiet=True, lookup=lookup)\n else:\n ip = colored.red(\"invalid\")\n if ip is None:\n ip = colored.yellow(\"poweroff\")\n elif not ip:\n ip = colored.green(\"running\")\n else:\n ip = colored.green(ip)\n else:\n ip = \"\"\n box_name = self.box_name or \"\"\n box_version = self.box_version or \"\"\n print(\"{}\\t{}\\t{}\\t{}\\t{}\".format(\n colored.green(instance_name.rjust(20)),\n ip.rjust(15),\n box_name.rjust(35),\n box_version.rjust(12),\n path,\n ))",
"def catalogs(env):\n envs = environments()\n check_env(env, envs)\n\n if app.config['ENABLE_CATALOG']:\n nodenames = []\n catalog_list = []\n query = AndOperator()\n\n if env != '*':\n query.add(EqualsOperator(\"catalog_environment\", env))\n\n query.add(NullOperator(\"catalog_timestamp\", False))\n\n order_by_str = '[{\"field\": \"certname\", \"order\": \"asc\"}]'\n nodes = get_or_abort(puppetdb.nodes,\n query=query,\n with_status=False,\n order_by=order_by_str)\n nodes, temp = tee(nodes)\n\n for node in temp:\n nodenames.append(node.name)\n\n for node in nodes:\n table_row = {\n 'name': node.name,\n 'catalog_timestamp': node.catalog_timestamp\n }\n\n if len(nodenames) > 1:\n form = CatalogForm()\n\n form.compare.data = node.name\n form.against.choices = [(x, x) for x in nodenames\n if x != node.name]\n table_row['form'] = form\n else:\n table_row['form'] = None\n\n catalog_list.append(table_row)\n\n return render_template(\n 'catalogs.html',\n nodes=catalog_list,\n envs=envs,\n current_env=env)\n else:\n log.warn('Access to catalog interface disabled by administrator')\n abort(403)",
"def getCatalogs():",
"def list_catalogs(self):\n return self._json_object_field_to_list(\n self._get_catalogs_json(), self.__MISSION_STRING)",
"def list(self, all=False):\n return self.client.containers.list(all=all)",
"def get_catalogs(self):\n # Implemented from kitosid template for -\n # osid.resource.BinLookupSession.get_bins_template\n catalogs = self._get_provider_session('catalog_lookup_session').get_catalogs()\n cat_list = []\n for cat in catalogs:\n cat_list.append(Catalog(self._provider_manager, cat, self._runtime, self._proxy))\n return CatalogList(cat_list)",
"def build_boxes(self):\n for index in self.box_space.points:\n if self.rank_of_box[index] == self.my_rank:\n self.my_boxes.append(Box(self, index))",
"def get_bboxes(self, **kwargs):\n pass",
"def list_volumes(self):\n print '# Listing existing volumes'\n self.compute.list_volumes()",
"def ls():\n # TODO: listing all availabe containers form sequence\n return",
"def get_catalog_items(id):\n\n username = login_session.get('username', None)\n catalogs = session.query(Catalog).all()\n selected_catalog = session.query(Catalog).filter_by(id=id).one()\n items = selected_catalog.items\n catalogs_display = [\n {\n 'id': catalog.id,\n 'name': catalog.name\n } for catalog in catalogs]\n items_display = [{'id': item.id, 'title': item.title} for item in items]\n items_summary = '{0} Items ({1} items)'.format(\n selected_catalog.name,\n len(items_display))\n return render_template(\n 'home.html',\n catalogs_display=catalogs_display,\n items_display=items_display,\n items_summary=items_summary,\n username=username)",
"def box_menu(request):\n\n products = Product.objects.all()\n sizes = Size.objects.all()\n forsixs = Forsix.objects.all()\n categories = None\n\n if request.GET:\n category_name = request.GET['box']\n products = products.filter(category__name=category_name)\n categories = Category.objects.filter(name=category_name)\n\n context = {\n 'products': products,\n 'forsixs': forsixs,\n 'sizes': sizes,\n 'categories_selected': categories,\n }\n\n return render(request, 'products/shop.html', context)",
"def checkCatalogs():\n url = CHECKBASE % 'catalogs'\n catalogs = []\n try:\n fh = getURLHandle(url)\n #fh = urllib2.urlopen(url)\n data = fh.read()\n dom = minidom.parseString(data)\n fh.close()\n catalog_elements = dom.getElementsByTagName('Catalog')\n for catel in catalog_elements:\n if catel.firstChild is None:\n continue\n catalog = catel.firstChild.data.strip()\n if len(catalog):\n catalogs.append(str(catalog))\n except:\n raise Exception,\"Could not open %s to search for list of catalogs\" % url\n return catalogs",
"def get_boxes():\n boxes = []\n\n box_sizes = [256]\n left_x_cords = [x for x in range(0,1280,12)]\n top_y_cords = [y for y in range(360,720,12)]\n\n for box_size in box_sizes:\n for x_cord in left_x_cords:\n for y_cord in top_y_cords:\n if box_size+x_cord < 1280 and box_size+y_cord < 720:\n boxes.append([x_cord, y_cord, x_cord+box_size, y_cord+box_size])\n\n return boxes",
"def print_catalog(self):\n for book in self.books.keys():\n print(book)",
"def print_catalog(self):\n for book in self.books.keys():\n print(book)",
"def get_display_boxes(all_boxes):\n display_boxes = []\n display_classes = []\n for class_id, class_boxes in enumerate(all_boxes):\n for box in class_boxes:\n display_boxes.append(box)\n display_classes.append(class_id)\n return np.asarray(display_boxes), np.asarray(display_classes)",
"def get_catalog_options(self):\n catalog_api = CourseCatalogApiClient(self.user)\n catalogs = catalog_api.get_all_catalogs()\n # order catalogs by name.\n catalogs = sorted(catalogs, key=lambda catalog: catalog.get('name', '').lower())\n\n return BLANK_CHOICE_DASH + [\n (catalog['id'], catalog['name'],)\n for catalog in catalogs\n ]",
"def test_list_subnets(self):\n print(self.the_client.list_subnets())",
"def get_boxes(self):\r\n\r\n boxes = [(\" \", self.worldbox.tl, self.worldbox.br)]\r\n# boxes = []\r\n boxes += [(\".\", b.tl, b.br) for b in self.wallboxes]\r\n boxes += [(\"x\", b.tl, b.br) for b in self.targetboxes]\r\n agentscale = 100\r\n boxes += [(\"a\", (self.i_state[0] - self.dx * agentscale, self.i_state[1] - self.dx * agentscale),\r\n (self.i_state[0] + self.dx * agentscale, self.i_state[1] + self.dx * agentscale))]\r\n return boxes",
"def display_all(self) -> None:\n self.display.draw_list(self.read_all_statuses())",
"def get(self):\n return GenericGet().get_catalogs()"
] | [
"0.6789714",
"0.6467714",
"0.64102626",
"0.6343046",
"0.62305814",
"0.62055796",
"0.6174118",
"0.616372",
"0.6149185",
"0.584758",
"0.58460945",
"0.5771759",
"0.5769471",
"0.57662725",
"0.5710648",
"0.57082415",
"0.5686935",
"0.5678659",
"0.56130964",
"0.554044",
"0.55158705",
"0.55073845",
"0.55041116",
"0.55041116",
"0.5465971",
"0.54608077",
"0.5433157",
"0.5432442",
"0.5425828",
"0.5398975"
] | 0.6993446 | 0 |
Repackage the box that is in use in the current mech environment. | def repackage(self, arguments):
puts_err(colored.red("Not implemented!")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _provision_package(self):",
"def updaterun(self):\n self.comp('packmanager').updaterun_allpacks()",
"def updatecheck(self):\n self.comp('packmanager').updatecheck_allpacks()",
"def unintallpack(package_name: str) -> None:\n\tresp = subprocess.call(['pip', 'uninstall', '-y', package_name])",
"def repackage_state(self, state):\n state['hxs'] = state['hxs'].detach()\n state['cxs'] = state['cxs'].detach()\n return state",
"def confirmRebuild( self ):\n if( self.mode == \"install\" and self.rebuild ):\n input = ask_ok( self.name + \" at [ \" + self.installPath + \" ] is going to be rebuild, are you sure? [y/n] \" )\n if( not input ):\n self.mode = \"use\"\n self.rebuild = False",
"def test_reinstall_packages():\n\tassert packaging.install_packages(pkgs) == None",
"def releasePackage(self, pid):\n if pid in self.packages:\n del self.packages[pid]",
"def nuke_home_pypackage():\n\n try:\n os.remove(os.path.join(os.path.expanduser(\"~\"), \".pypackage\"))\n except:\n pass",
"def test_upgrade_without_fromrepo(self):\n pkg_cmd = MagicMock(return_value={\"retcode\": 0})\n\n with patch.dict(pkgng.__salt__, {\"cmd.run_all\": pkg_cmd}):\n with patch(\"salt.modules.pkgng.list_pkgs\", ListPackages()):\n result = pkgng.upgrade()\n expected = {\n \"gettext-runtime\": {\"new\": \"0.20.1\", \"old\": \"\"},\n \"p5-Mojolicious\": {\"new\": \"8.40\", \"old\": \"\"},\n }\n self.assertDictEqual(result, expected)\n pkg_cmd.assert_called_with(\n [\"pkg\", \"upgrade\", \"-y\"],\n output_loglevel=\"trace\",\n python_shell=False,\n )",
"def remove():\n run('pew rm {0}'.format(package_name()))",
"def patch_package(self, **kwargs):\n results = self.api.action.package_patch(**kwargs)\n self.get_ckan_metadata(True)\n return results",
"def update_openblock():\n\n tf = tempfile.mktemp(suffix='-openblock')\n local('git clone git://github.com/openplans/openblock.git {0}'.format(tf))\n dest = os.path.join(PROJECT_ROOT, 'requirements', 'sdists')\n for name in ('obadmin', 'ebdata', 'ebpub'):\n package = os.path.join(tf, name)\n os.chdir(package)\n local('pip install -e {source} -d {dest}'.format(source=package,\n dest=dest))\n shutil.rmtree(tf)",
"def _stage_pkg(self):\n context = self._config.context\n context.package.file = os.path.basename(context.package.arg)\n root_path = self._distro.root_mountspec.mountpoint\n stage_path = os.path.join(root_path, context.package.dir.lstrip('/'))\n context.package.full_path = os.path.join(stage_path, context.package.file)\n try:\n if any(protocol in context.package.arg for protocol in ['http://', 'https://']):\n self._download_pkg(context)\n else:\n self._move_pkg(context)\n except Exception:\n errstr = 'Exception encountered while staging package'\n log.critical(errstr)\n log.debug(errstr, exc_info=True)\n return False\n # reset to chrooted file path\n context.package.arg = os.path.join(context.package.dir, context.package.file)\n return True",
"def update_egg(self):\n valid_egg_types = ['LOCAL', 'REUSE']\n egg_type = config.get_param('LYDIAN_EGG_TYPE')\n egg_type = egg_type.upper()\n vals = ','.join(valid_egg_types)\n err_msg = \"Invalid Egg Type. Valid values are : {%s}\" % vals\n assert egg_type in valid_egg_types, err_msg\n\n if egg_type == 'LOCAL':\n common_util.remove_egg() # Generate fresh egg.\n\n # Generate egg from local installtion if egg not already present.\n install.install_egg()",
"def clean_start(self, package_name):\n self.reset_active_settings()\n return self.activate_package(package_name)",
"def delete(self):\n self.package = None",
"def remove_package(package, remote):\n flavor = remote.os.package_type\n if flavor == 'deb':\n pkgcmd = ['DEBIAN_FRONTEND=noninteractive',\n 'sudo',\n '-E',\n 'apt-get',\n '-y',\n 'purge',\n '{package}'.format(package=package)]\n elif flavor == 'rpm':\n # FIXME: zypper\n pkgcmd = ['sudo',\n 'yum',\n '-y',\n 'erase',\n '{package}'.format(package=package)]\n else:\n log.error('remove_package: bad flavor ' + flavor + '\\n')\n return False\n return remote.run(args=pkgcmd)",
"def _setPackage(self, package):\n if package is self._package: return\n oldPackage = self._package\n if self._package:\n siblings = self._package.resources[self.checksum]\n siblings.remove(self)\n if len(siblings) == 0:\n del self._package.resources[self.checksum]\n oldPath = self.path\n else:\n assert hasattr(self, '_originalFile')\n oldPath = self._originalFile\n self._package = package\n if self._package:\n self._addOurselvesToPackage(oldPath)\n if oldPackage and self.checksum not in oldPackage.resources:\n if oldPath.exists():\n oldPath.remove()\n else:\n log.error(\"Tried to delete a resource that's already not there anymore: \"\n \"filename=\\\"%s\\\" userName=\\\"%s\\\"\" % (oldPath, self.userName))\n if self._idevice and self._idevice.parentNode.package is not self._package:\n self._idevice.userResources.remove(self)\n self._idevice = None",
"def _unprovision_node(self, conn):\n conn.run(f\"rm -rf {EXPORTER_HOME}\")",
"def firmware_pack_modify(handle, org_name, name, rack_bundle_version=None,\n blade_bundle_version=None, descr=None, mode=None,\n org_parent=\"org-root\"):\n\n org_dn = org_parent + \"/org-\" + org_name\n fw_dn= org_dn + \"/fw-host-pack-\" + name\n mo = handle.query_dn(fw_dn)\n if mo is not None:\n if rack_bundle_version is not None:\n mo.rack_bundle_version = rack_bundle_version\n if blade_bundle_version is not None:\n mo.blade_bundle_version = blade_bundle_version\n if mode is not None:\n mo.mode=mode\n if descr is not None:\n mo.descr = descr\n\n handle.set_mo(mo)\n handle.commit()\n else:\n log.info(\"Firmware host pack <%s> not found.\" % name)",
"def test_lock_handle_eggs(PipenvInstance):\n with PipenvInstance() as p:\n with open(p.pipfile_path, 'w') as f:\n f.write(\"\"\"\n[packages]\nRandomWords = \"*\"\n \"\"\")\n c = p.pipenv('lock --verbose')\n assert c.return_code == 0\n assert 'randomwords' in p.lockfile['default']\n assert p.lockfile['default']['randomwords']['version'] == '==0.2.1'",
"def reset_package(self):\n # FIXME: this state does not make sense\n self.package_set = False\n self.package_spdx_id_set = False\n self.package_vers_set = False\n self.package_file_name_set = False\n self.package_supplier_set = False\n self.package_originator_set = False\n self.package_down_location_set = False\n self.package_files_analyzed_set = False\n self.package_home_set = False\n self.package_verif_set = False\n self.package_chk_sum_set = False\n self.package_source_info_set = False\n self.package_conc_lics_set = False\n self.package_license_declared_set = False\n self.package_license_comment_set = False\n self.package_cr_text_set = False\n self.package_summary_set = False\n self.package_desc_set = False\n self.package_comment_set = False\n self.pkg_ext_comment_set = False",
"def reset(self):\n self._pkgs.clear()\n self._catalogs.clear()\n self._categories.clear()\n self._command_to_category.clear()\n self._version = None",
"def upgrade(self):\n # The workaround we need in order to fix [1]. In few words,\n # when new Docker is installed the containers MUST NOT start\n # again because in this case puppet inside them will install\n # latest packages and breaks dependencies in some soft.\n #\n # [1]: https://bugs.launchpad.net/fuel/+bug/1455419\n self.supervisor.stop_all_services()\n\n self.install_repos()\n self.update_repo()\n self.install_packages()\n self.run_puppet()",
"def m_DownPkgAndTar(self,pkgURL,machineIps,port,username,password):\n packageName = pkgURL.split(\"/\")[-1]\n execmd = \"cd /root\\nwget \" + pkgURL + \"\\ntar -xzvf \" + packageName\n for k, v in machineIps.items():\n b.sshclient_execmd(k, port,username,password,execmd)",
"def auto_upgrade(force, package):\n if not package or package == PICKLEY: # pragma: no cover, exercised via test_bootstrap() functional test\n manifest = bootstrap()\n if not package:\n if not manifest:\n inform(\"Pickley is already bootstrapped\")\n\n sys.exit(0) # When called without 'package' specified: intent was to bootstrap only\n\n # We were called by auto-upgrade wrapper (in the background)\n auto_upgrade_v1(CFG)\n if manifest:\n sys.exit(0) # Bootstrap already got us up-to-date\n\n pspec = PackageSpec(CFG, package)\n ping = pspec.ping_path\n if not force and runez.file.is_younger(ping, CFG.version_check_delay(pspec) * 60):\n LOG.debug(\"Skipping auto-upgrade, checked recently\")\n sys.exit(0)\n\n runez.touch(ping)\n if runez.file.is_younger(pspec.lock_path, CFG.install_timeout(pspec) * 60):\n LOG.debug(\"Lock file present, another installation is in progress\")\n sys.exit(0)\n\n perform_install(pspec, is_upgrade=True, force=False, quiet=True)",
"def YumUninstall(vm):\n _Uninstall(vm)",
"def upgrade_packages():\n\n require('environment', provided_by=env.environments)\n system.update_apt_sources()\n system.upgrade_apt_packages()",
"def fix_leanpkg_bug():\n leanpkg = Path('leanpkg.toml')\n conf = leanpkg.read_text()\n m = LEAN_VERSION_RE.match(conf)\n if m:\n ver = m.group(1)\n leanpkg.write_text(conf.replace(ver, 'leanprover-community/lean:'+ver))"
] | [
"0.6154953",
"0.58410996",
"0.5439497",
"0.5417384",
"0.5338681",
"0.5287142",
"0.52402675",
"0.5207523",
"0.51990336",
"0.5193477",
"0.51707906",
"0.50902545",
"0.5075615",
"0.50582254",
"0.50577927",
"0.502458",
"0.4999124",
"0.49943903",
"0.4990816",
"0.49826893",
"0.4978645",
"0.4966196",
"0.4950221",
"0.49495357",
"0.49339184",
"0.4924493",
"0.4922886",
"0.49198872",
"0.49024782",
"0.49000984"
] | 0.588097 | 1 |
Initializes a new mech environment by creating a Mechfile. | def init(self, arguments):
url = arguments['<location>']
if url:
name = arguments['<name>']
else:
url = arguments['<name>']
name = None
version = arguments['--box-version']
instance_name = arguments['--name']
force = arguments['--force']
requests_kwargs = utils.get_requests_kwargs(arguments)
if os.path.exists('Mechfile') and not force:
puts_err(colored.red(textwrap.fill(
"`Mechfile` already exists in this directory. Remove it "
"before running `mech init`."
)))
return
puts_err(colored.green("Initializing mech"))
if utils.init_mechfile(instance_name, url, name=name, version=version, requests_kwargs=requests_kwargs):
puts_err(colored.green(textwrap.fill(
"A `Mechfile` has been initialized and placed in this directory. "
"You are now ready to `mech up` your first virtual environment!"
)))
else:
puts_err(colored.red("Couldn't initialize mech")) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __init__(self): \n\t\n\t # get the environment\n\t\tself.env = env()",
"def SetUp(self):\n self.buffer_file = buffer_file_common.BufferFile(\n self.args, self.logger.name, self.GetDataDir())\n\n self.attachments_tmp_dir = os.path.join(self.GetDataDir(),\n _TEMPORARY_ATTACHMENT_DIR)\n # Remove the attachments tmp dir, if Instalog terminated last time.\n if os.path.exists(self.attachments_tmp_dir):\n shutil.rmtree(self.attachments_tmp_dir)\n file_utils.TryMakeDirs(self.attachments_tmp_dir)",
"def initFromEnv(self):\n #self.command = 'scram' # SB I think this line is not needed\n self[\"SCRAM_ARCH\"] = None\n\n if 'SCRAM_ARCH' in os.environ:\n self[\"SCRAM_ARCH\"] = os.environ[\"SCRAM_ARCH\"]\n else:\n stdout, _, _ = execute_command(command='scram arch')\n self[\"SCRAM_ARCH\"] = stdout\n\n try:\n self[\"CMSSW_BASE\"] = os.environ[\"CMSSW_BASE\"]\n self[\"CMSSW_VERSION\"] = os.environ[\"CMSSW_VERSION\"]\n# Commenting these two out. I don't think they are really needed\n# self.cmsswReleaseBase = os.environ[\"CMSSW_RELEASE_BASE\"]\n# self.localRT = os.environ[\"LOCALRT\"]\n except KeyError as ke:\n self[\"CMSSW_BASE\"] = None\n self[\"CMSSW_VERSION\"] = None\n# self.cmsswReleaseBase = None\n# self.localRT = None\n msg = \"Please make sure you have setup the CMS enviroment (cmsenv). Cannot find %s in your env\" % str(ke)\n msg += \"\\nPlease refer to https://twiki.cern.ch/twiki/bin/view/CMSPublic/WorkBookCRAB3Tutorial#CMS_environment for how to setup the CMS enviroment.\"\n raise EnvironmentException(msg)",
"def initialise(self, args, environ):",
"def __init__(self, file):\n self.file = file\n\n self.env = None\n self.brain_name = None\n self.action_size = None\n self.state_size = None\n\n self.n_agents = None\n self.state = EnvEnum.idle",
"def setUp(self):\n BuilderTestsMixin.setUp(self)\n self.builder = ManBuilder()\n self.manDir = FilePath(self.mktemp())\n self.manDir.createDirectory()",
"def setUp(self):\n\n self.hw = HMMERWrapper\n\n modpath = os.path.abspath(os.path.dirname(__file__))\n self.seqfile = os.path.join(modpath, 'data', 'P00929.fasta')\n self.badfile = os.path.join(modpath, 'data', 'bad.fasta')",
"def __init__(self):\n self.model = {'mol':[], 'nmol':0}\n self.template = {} \n self.config = {}\n self.config['tfile'] = 'gau-template-bsse.gjf'\n self.config['xyzfile'] = 'model.xyz'\n self.config['jobfile'] = 'gau.gjf'\n self.config['job_prefix'] = self.config['jobfile'].split(\".\")[0]\n self.config['incr'] = 1\n \n self.rd_cmd_stream()\n return",
"def __init__(self):\n\n self.root_path = os.path.dirname(os.path.abspath(__file__))[:-5]\n self.config_path = os.path.join(self.root_path, \"files\\\\CONFIG.txt\")\n self.metrics_path = os.path.join(self.root_path, \"files\\\\metrics.txt\")\n\n self.setup_metrics_file()\n\n if self.check_configuration() is False:\n self.setup_configuration_file()",
"def __init__(self, env, system=None):\n self._env = env\n self._system = system if system is not None else {}",
"def setUp(self):\n self.ConFem_ = ConFem.ConFem()\n self.ConSimFem_ = ConSimFem.ConSimFem()\n self.NameLog = \"../_DataShellsSlabs/tmp\"",
"def __init__(self, file_path):\r\n self.file_path = Path(file_path)\r\n self.fname = self.file_path.name\r\n self.d_stgs = settings.DisplaySettings()\r\n self.c_stgs = settings.CalculationSettings()\r\n logger.info(f'{self} create')",
"def __init__(self, path_to_the_file):",
"def __init__(self, path_to_the_file):",
"def initialize():\n environment = Environment()\n environment.setup()",
"def __init__(self):\r\n\t\tself.introducer()\r\n\t\tif self.code_mode == \"1\":\r\n\t\t\tif self.input_mode == \"1\":\r\n\t\t\t\tself.encrypt_message()\r\n\t\t\telse:\r\n\t\t\t\tself.encrypt_text_file()\r\n\t\t\t\t#print(\"work in progress\")\r\n\t\telif self.code_mode == \"2\":\r\n\t\t\tif self.input_mode == \"1\":\r\n\t\t\t\tself.decrypt_message()\r\n\t\t\telse:\r\n\t\t\t\tself.decrypt_text_file()\r\n\t\telse:\r\n\t\t\tif self.input_mode == \"1\":\r\n\t\t\t\tself.hack_message()\r\n\t\t\telse:\r\n\t\t\t\tself.hack_text_file()",
"def init(self):\n self.data_dir = self.data_dir or default_data_path / self._name_\n self.cache_dir = self.data_dir / \"cache\"\n assert self.level in [\n \"word\",\n \"char\",\n ], f\"level {self.level} not supported\"",
"def __init__(self):\n self.filepath = os.path.dirname(__file__)\n self.filepath = os.path.join(self.filepath, \"Datenbank\")\n self.filepath_render_database = os.path.join(self.filepath, self.render_database)\n self.filepath_object_database = os.path.join(self.filepath, self.object_database)\n self.filepath_output_database = os.path.join(self.filepath, self.output_database)\n self.create_database()",
"def init():\n env = Environment(5, 5, 20, [10, 20, 10, 5])\n return env",
"def __init__(self, hist_file):\n self.setup_cmd2(hist_file)\n\n self.prompt = self.make_prompt()\n\n self.exploit = None\n self.exploit_name = ''\n self.input = None\n self.output = None\n self.options = Options()\n self.defaulted_options = []\n\n self.script_mode = False\n self.shortcuts = cmd2.DEFAULT_SHORTCUTS",
"def initializeMagicFile(self):\n \n if not self.magicType or self.magicType==\"magic\":\n try:\n import magicgit\n self.magicgit=magicgit\n self.magicType=\"magicgit\"\n except ImportError:\n self.logger.debug(\"Importing of library magicgit was unsuccesful.\")\n \n if not self.magicType:\n try:\n import magic\n self.magic=magic\n self.magicType=\"magic\"\n except ImportError:\n self.logger.warning(\"Importing of library magic was unsuccesful. Install package python-magic.\")",
"def init(ctx, project_root, mkdir, level, formatter, log_path):\n conf_content = get_tpl('logme', level=level, formatter=formatter, filename=log_path)\n\n config = get_config(conf_content)\n\n abs_path = Path(project_root).resolve()\n conf_location = abs_path.joinpath('logme.ini')\n\n if not abs_path.exists():\n if not mkdir:\n raise NotADirectoryError(f\"{abs_path.parent.resolve() / project_root} does not exist. If you'd \"\n f\"like to make the directory, please use '-mk' flag.\")\n else:\n abs_path.mkdir(parents=True, exist_ok=True)\n\n with conf_location.open('w') as conf:\n config.write(conf)",
"def __init__(self):\n\n self.read_input_file()\n self.read_simulation_files()",
"def __init__(self, force_remount=False):\n\n drive.mount(\"/content/gdrive\", force_remount=force_remount)\n\n if os.path.isfile(self.envpath):\n self.envload()\n else:\n self.create_vars_dot_env()",
"def __init__(self, test):\n global manifest_file\n global msl_data_file\n global msl_data_path\n global rsa_key_bin\n rsa_key_bin = 'rsa_manifest_medium.bin'\n msl_data_file = 'msl_data_manifest_medium.json'\n manifest_file = 'manifest_medium.json'\n if os.path.isfile(msl_data_path + rsa_key_bin):\n os.remove(msl_data_path + rsa_key_bin)\n if os.path.isfile(msl_data_path + msl_data_file):\n os.remove(msl_data_path + msl_data_file)\n if os.path.isfile(msl_data_path + manifest_file):\n os.remove(msl_data_path + manifest_file)\n else:\n try:\n os.mkdir(msl_data_path)\n except OSError:\n pass\n\n if self.file_exists(msl_data_path, msl_data_file):\n self._MSL__load_msl_data()\n self.handshake_performed = True\n else:\n if self.file_exists(msl_data_path, rsa_key_bin):\n self._MSL__load_rsa_keys()\n self._MSL__perform_key_handshake()\n else:\n print('Generating Device Keys...')\n self.rsa_key = RSA.generate(2048)\n self._MSL__save_rsa_keys()\n self._MSL__perform_key_handshake()",
"def init(self):\n if self._key is not None:\n raise RuntimeError('Already initialized. Call close() first.')\n\n self._key = self._stobjectnew()\n self._cwd = os.getcwd()\n self.reset()",
"def init(self):\n # Initialize runtime and MDK:\n self.runtime = fakeRuntime()\n self.runtime.getEnvVarsService().set(\"DATAWIRE_TOKEN\", \"somevalue\")\n self.runtime.dependencies.registerService(\"failurepolicy_factory\",\n RecordingFailurePolicyFactory())\n self.mdk = MDKImpl(self.runtime)\n self.mdk.start()\n self.disco = self.mdk._disco\n # Create a session:\n self.session = self.mdk.session()",
"def _setup(self):\n self.log_object.write_log(\n \"MESH0001\", None, {\"mailbox\": self.mailbox, \"environment\": self.environment}\n )\n\n common_params = MeshCommon.get_ssm_params(f\"/{self.environment}/mesh\")\n mailbox_params = MeshCommon.get_ssm_params(\n f\"/{self.environment}/mesh/mailboxes/{self.mailbox}\"\n )\n self.params = {**common_params, **mailbox_params}\n # self._write_certs_to_files()\n\n # maybe_verify = bool(\n # self.mailbox_params.get(\"MESH_VERIFY_SSL\", \"True\") == \"True\"\n # )\n\n # if not maybe_verify:\n # requests.urllib3.disable_warnings(InsecureRequestWarning)\n\n # # rewrite MeshClient\n # self.mesh_client = ExtendedMeshClient(\n # common_params[\"MESH_URL\"],\n # self.mailbox,\n # mailbox_params[\"MAILBOX_PASSWORD\"],\n # shared_key=common_params[\"MESH_SHARED_KEY\"].encode(\"utf8\"),\n # cert=(self.client_cert_file.name, self.client_key_file.name),\n # verify=self.ca_cert_file.name if maybe_verify else None,\n # max_chunk_size=MeshCommon.DEFAULT_CHUNK_SIZE,\n # )",
"def init():",
"def __init__(self,inp='INP.mcnp'):\n # Material dictionary for the moderator, light guide, and detector\n self.material = {'Moderator':None,'Detector':None,'LightGuide':None}\n self.material['Detector'] = {'name':'Detector','mt': 3, 'rho': 1.1,'matString':None} # detector\n self.material['LightGuide'] = {'name': 'PMMA','mt':10, 'rho':0.93} # PMMA\n self.material['Moderator'] = {'name':'HDPE','mt':456, 'rho': 0.93} # HPDE\n \n # Cell and Surface Inital Numbering\n self.CellStartNum = 600\n self.SurfaceStartNum = 600\n self.ZeroSurfaceNum = 500\n self.UniverseNum = 200\n self.surfGeo = None\n self.inp = inp\n self.name = 'OUT_'+self.inp.strip('.mcnp')+'.'\n self.setMaterial(0.1,'PS')"
] | [
"0.59631246",
"0.588322",
"0.5817756",
"0.57999754",
"0.5796112",
"0.5791291",
"0.5737065",
"0.57075596",
"0.5706491",
"0.5700405",
"0.5675861",
"0.56662416",
"0.5640394",
"0.5640394",
"0.5637696",
"0.56174433",
"0.56106085",
"0.55529803",
"0.55174655",
"0.55148506",
"0.5505797",
"0.5500892",
"0.54907864",
"0.54866827",
"0.54805946",
"0.54752135",
"0.5472253",
"0.5471927",
"0.5469395",
"0.54585886"
] | 0.7118378 | 0 |
Output OpenSSH valid configuration to connect to the machine. | def ssh_config(self, arguments):
instance_name = arguments['<instance>']
instance_name = self.activate(instance_name)
print(utils.config_ssh_string(self.config_ssh)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ssh(args, config):\n print('{}'.format(ssh.__doc__))",
"def bootstrap_config(self):\n self.logger.info(\"applying bootstrap configuration\")\n self.wait_write(\"\\r\", None)\n # Wait for the prompt\n time.sleep(1)\n self.wait_write(\"system-view\", \"<HPE>\")\n self.wait_write(\"ssh server enable\", \"[HPE]\")\n self.wait_write(\"user-interface class vty\", \"[HPE]\")\n self.wait_write(\"authentication-mode scheme\", \"[HPE-line-class-vty]\")\n self.wait_write(\"protocol inbound ssh\", \"[HPE-line-class-vty]\")\n self.wait_write(\"quit\", \"[HPE-line-class-vty]\")\n self.wait_write(\"local-user %s\" % (self.username), \"[HPE]\")\n self.wait_write(\"password simple %s\" % (self.password), \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"service-type ssh\", \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"authorization-attribute user-role network-admin\", \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"quit\", \"[HPE-luser-manage-%s]\" % (self.username))\n self.wait_write(\"interface GigabitEthernet%s/0\" % (self.num_nics + 1), \"[HPE]\")\n self.wait_write(\"ip address 10.0.0.15 255.255.255.0\", \"[HPE-GigabitEthernet%s/0]\" % (self.num_nics + 1))\n self.wait_write(\"quit\", \"[HPE-GigabitEthernet%s/0]\" % (self.num_nics + 1))\n self.wait_write(\"quit\", \"[HPE]\")\n self.wait_write(\"quit\", \"<HPE>\")\n self.logger.info(\"completed bootstrap configuration\")",
"def print_cfg(self, out=stdout):\n print(self.cmaboss_sim.str_cfg(), file=out)",
"def open_ssh():\n print('Opening SSH...')",
"def showconfig():\n print(yaml.dump(CONFIG))",
"def establish_connection(self):\r\n\r\n #creates SSH connection and adds SSH key to .known_hosts\r\n self.ssh_conn = paramiko.SSHClient()\r\n self.ssh_conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())\r\n\r\n try:\r\n self.ssh_conn.connect(**self.conn_parm)\r\n print \"Connected to %s\" % self.conn_parm['hostname']\r\n #testing: self.ssh_conn.close()\r\n except socket.error:\r\n print \"Connection Failed on device %s\" % self.conn_parm['hostname']\r\n\r\n #find prompt\r\n open_session = self.ssh_conn.invoke_shell()\r\n output = open_session.recv(1000)\r\n\r\n #testing: print output\r\n\r\n #go into Enable-Mode if not already in it\r\n if '#' not in output:\r\n open_session.send('enable\\n')\r\n time.sleep(1)\r\n open_session.send(self.password)\r\n open_session.send('\\n')\r\n else:\r\n print \"In Enable-Mode\"\r\n\r\n #turn off paging\r\n open_session.send('terminal length 0\\n')\r\n time.sleep(3)\r\n \r\n return open_session",
"def test_print_config(self) -> None:\n out = io.StringIO()\n with contextlib.redirect_stdout(out):\n self.config.print()\n self.assertEqual(\n out.getvalue().rstrip(),\n \"{}: {}\\n{}\".format(\"q2\", \"abcdefghij\", \"^\".rjust(7)),\n )",
"def display_config_info():\n print(\"Merlin Configuration\")\n print(\"-\" * 25)\n print(\"\")\n\n conf = default_config_info()\n sconf = {}\n excpts = {}\n try:\n conf[\"broker server\"] = broker.get_connection_string(include_password=False)\n sconf[\"broker server\"] = broker.get_connection_string()\n conf[\"broker ssl\"] = broker.get_ssl_config()\n except Exception as e:\n conf[\"broker server\"] = \"Broker server error.\"\n excpts[\"broker server\"] = e\n\n try:\n conf[\"results server\"] = results_backend.get_connection_string(include_password=False)\n sconf[\"results server\"] = results_backend.get_connection_string()\n conf[\"results ssl\"] = results_backend.get_ssl_config()\n except Exception as e:\n conf[\"results server\"] = \"No results server configured or error.\"\n excpts[\"results server\"] = e\n\n print(tabulate(conf.items(), tablefmt=\"presto\"))\n\n if excpts:\n print(\"\\nExceptions:\")\n for k, v in excpts.items():\n print(f\"{k}: {v}\")\n\n check_server_access(sconf)",
"def printConfig():\n # Why not log instead? Are we asking user to confirm settings?\n pass # until implemented",
"def connecting(self):\r\n \r\n ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy()) \r\n try:\r\n ssh.connect(self.hostname,self.port,self.identifier,self.password)\r\n feedback = '***Connection Established***'\r\n return feedback\r\n \r\n except Exception as e:\r\n feedback= '***Connection failed : '+str(e)+'***'\r\n return feedback\r\n sys.exit(1)",
"def do_display_config(self, *arg):\n try:\n if self.pocs and self.pocs.config:\n pprint(self.pocs.config)\n else:\n print_warning(\"No config file for POCS.\")\n\n except AttributeError:\n print_warning(\"Please run `setup_pocs` before trying to run `display_config`\")",
"def printConf(self):\n print \"\"\n for pname, pvalue in self.neededParams.items():\n print pname, pvalue\n for pname, pvalue in self.optionalParams.items():\n print pname, pvalue",
"def line_connect_info(self, dummy=None):\n connection_file = self.kernel.config[\"IPKernelApp\"][\"connection_file\"]\n try:\n config = json.loads(open(connection_file).read())\n except:\n config = {\"stdin_port\": \"UNKNOWN\",\n \"shell_port\": \"UNKNOWN\",\n \"iopub_port\": \"UNKNOWN\",\n \"hb_port\": \"UNKNOWN\",\n \"ip\": \"UNKNOWN\",\n \"key\": \"UNKNOWN\",\n \"signature_scheme\": \"UNKNOWN\",\n \"transport\": \"UNKNOWN\"\n }\n retval = \"\"\"{\n \"stdin_port\": %(stdin_port)s,\n \"shell_port\": %(shell_port)s,\n \"iopub_port\": %(iopub_port)s,\n \"hb_port\": %(hb_port)s,\n \"ip\": \"%(ip)s\",\n \"key\": \"%(key)s\",\n \"signature_scheme\": \"%(signature_scheme)s\",\n \"transport\": \"%(transport)s\"\n}\n\nPaste the above JSON into a file, and connect with:\n $> ipython <app> --existing <file>\nor, if you are local, you can connect with just:\n $> ipython <app> --existing %(key)s\n\nor even just:\n $> ipython <app> --existing\nif this is the most recent Jupyter session you have started.\n\"\"\" % config\n self.kernel.Print(retval)",
"def connectSsh(self):\n connect_handle = pexpect.spawn(\"ssh -q -o StrictHostKeyChecking=no root@%s\" % self.ip)\n connect_handle.setwinsize(800,800)\n connect_handle.logfile_read = sys.stdout\n #connect_handle.logfile_send = sys.stdout\n i = 0\n ssh_newkey = r'(?i)Are you sure you want to continue connecting'\n remote_key_changed = r\"REMOTE HOST IDENTIFICATION HAS CHANGED\"\n\n perm_denied = r\"(?i)Permission denied\"\n while True:\n i = connect_handle.expect([ssh_newkey, 'assword:',self.promptshell,\n pexpect.EOF, pexpect.TIMEOUT,\n remote_key_changed, perm_denied])\n if i==0:\n connect_handle.sendline('yes')\n continue\n elif i==1:\n logger.info(\"Password supplied\")\n connect_handle.sendline(self.password)\n continue\n\t elif i==2:\n self._mode = CLI_MODES.shell\n self._prompt = self.promptshell\n break\n elif i==3:\n logger.info(\"Connection closed: %s\" % self)\n logger.info(connect_handle.before) # print out the result\n raise ValueError(\"Connection Closed: %s\" % self)\n elif i==4:\n logger.warning(\"Timeout while waiting for connection\")\n logger.info(connect_handle.before) # print out the result\n raise ValueError(\"Unable to establish connection %s\" % self)\n elif i==5:\n logger.warn(\"Removing offending key from .known_hosts..\")\n known_hosts_file = os.path.expanduser(\"~/.ssh/known_hosts\")\n\n if \"darwin\" in sys.platform.lower():\n # MAC OS\n utils.run_cmd(\"sed -i 1 's/%s.*//' %s\" % (self.ip, known_hosts_file))\n elif \"linux\" in sys.platform.lower():\n # Linux\n utils.run_cmd(\"sed -i 's/%s.*//' %s\" % (self.ip, known_hosts_file))\n\n connect_handle = pexpect.spawn(\"ssh root@%s\" % self.ip)\n connect_handle.setwinsize(800,800)\n connect_handle.logfile_read = sys.stdout\n\n continue\n elif i==6:\n logger.warning(\"Permission denied: %s\" % self)\n logger.info(connect_handle.before) # print out the result\n raise ValueError(\"Permission denied: %s.\" % self)\n return connect_handle",
"def test_ssh(self):\n assert self.rc_conf.has_key('sshd_enable')\n assert self.rc_conf['sshd_enable'] == '\"YES\"'\n sshd_conf = open('/etc/ssh/sshd_config').read()\n assert re.search('[^#]PermitRootLogin yes', sshd_conf)",
"def show_config(config, args):\n pprint.pprint(config)",
"def write_ssh_config(bastion_ip, os_user, keyfile):\n with open('ssh_config-metrics', 'w') as config_file:\n config_file.write('host *\\n')\n config_file.write(' User %s\\n' % os_user)\n config_file.write(' IdentityFile %s\\n' % keyfile)\n config_file.write(' StrictHostKeyChecking no\\n')\n config_file.write(' UserKnownHostsFile /dev/null\\n')\n if bastion_ip:\n config_file.write(' ProxyCommand ssh -i %s -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null %s@%s exec nc %%h %%p\\n'\n % (keyfile, os_user, bastion_ip))\n if not bastion_ip:\n return False",
"def __repr__(self) -> str:\n view = {\n \"server\": self.server,\n \"access-token\": 'yes' if self.token is not None else 'no',\n \"insecure\": self.insecure,\n \"output\": self.output,\n \"verbose\": self.verbose,\n }\n\n return \"<Configuration({})\".format(view)",
"def print_config_option(args, run):\n print_config(run)\n print(\"-\" * 79)",
"def config_html(output_file=''):\n if output_file:\n f = open(output_file, 'w')\n else:\n f = sys.stdout\n create_config_html(f)",
"async def send_config_setSSH(self, cmds=None, timeout=None):\n\n # Display info message\n log.info(\"send_config_setSSH\")\n\n # Default value of timeout variable\n if timeout is None:\n timeout = self.timeout\n\n # Clear returned output\n returned_output = \"\"\n\n # Check if cmds is a string\n if isinstance(cmds, str):\n\n # A string\n\n # Convert the string into a list\n cmds = [cmds]\n\n # A list?\n elif not isinstance(cmds, list):\n\n # Not a list (and not a string)\n\n # Display error message\n log.error(\n \"send_config_setSSH: parameter cmds used in send_config_set is neither a string nor a list\"\n )\n\n # Leave the method\n return returned_output\n\n ##############################\n # Entering configuration mode\n ##############################\n\n # Display info message\n log.info(\"send_config_set: entering configuration mode\")\n\n # Clear output\n output = \"\"\n\n # Get command for entering in config made\n cmd = self.cmd_enter_config_mode\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Display info message\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n\n # Sending command\n self.stdinx.write(cmd)\n\n # Display message\n log.info(\"send_config_setSSH: configuration mode entered\")\n\n while True:\n\n # Read the data received\n output += await asyncio.wait_for(\n self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(f\"send_config_setSSH: output: '{output}'\")\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n # Debug info message\n log.debug(\n f\"send_config_setSSH: raw output: '{output}'\\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Add the output to the returned output\n returned_output += output\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Display info message\n log.debug(\n f\"send_config_setSSH: cleaned output: '{output}'\\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n ##############################\n # Sending commands\n ##############################\n\n # Display info message\n log.info(\"send_config_setSSH: sending commands\")\n\n # Clear output\n output = \"\"\n\n # Each command\n for cmd in cmds:\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Display info message\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n\n # Sending command\n self.stdinx.write(cmd)\n\n # Display info message\n log.info(\"send_config_setSSH: command sent\")\n\n while True:\n\n # Read the data received\n output += await asyncio.wait_for(\n self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(f\"send_config_setSSH: output: '{output}'\")\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n # Debug info message\n log.debug(\n f\"send_config_setSSH: raw output: '{output}'\\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Add the output to the returned output\n returned_output += output\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Display info message\n log.debug(\n f\"send_config_setSSH: cleaned output: '{output}'\\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n ##############################\n # Leaving configuration mode\n ##############################\n\n # Display info message\n log.info(\"send_config_setSSH: leaving configuration mode\")\n\n # Clear output\n output = \"\"\n\n # Get command to leave config made\n cmd = self.cmd_exit_config_mode\n\n # Add carriage return at the end of the command (mandatory to send the command)\n cmd = cmd + self._carriage_return_for_send_command\n\n # Display info message\n log.info(f\"send_config_setSSH: cmd = '{cmd}'\")\n\n # Sending command\n self.stdinx.write(cmd)\n\n # Display info message\n log.info(\"send_config_setSSH: command to leave configuration mode sent\")\n\n while True:\n\n # Read the data received\n output += await asyncio.wait_for(\n self.stdoutx.read(MAX_BUFFER_DATA), timeout=timeout\n )\n\n # Display info message\n log.info(f\"send_config_setSSH: output: '{output}'\")\n\n # Check if prompt is found\n if self.check_if_prompt_is_found(output):\n\n # Yes\n\n # Leave the loop\n break\n\n # Debug info message\n log.debug(\n f\"send_config_setSSH: raw output: '{output}'\\nsend_config_setSSH: raw output (hex): '{output.encode().hex()}'\"\n )\n\n # Add the output to the returned output\n returned_output += output\n\n # Remove the command sent from the result of the command\n output = self.remove_command_in_output(output, str(cmd))\n # Remove the carriage return of the output\n output = self.remove_starting_carriage_return_in_output(output)\n # Remove the ending prompt of the output\n output = self.remove_ending_prompt_in_output(output)\n\n # Display info message\n log.debug(\n f\"send_config_setSSH: cleaned output: '{output}'\\nsend_config_setSSH: cleaned output (hex): '{output.encode().hex()}'\"\n )\n\n # Check if there is an error in the output string (like \"% Unrecognized command\")\n # and generate an exception if needed\n self.check_error_output(output)\n\n # Return the result of the commands\n return returned_output",
"def print_config(self):\n for key in self._config.keys():\n print('[{0}] = {1}'.format(key, self._config[key]))",
"def open(self):\n logging.debug('Connecting to device %s' % self.paramiko_cfg.get('hostname'))\n self.ssh = paramiko.SSHClient()\n self.ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())\n self.ssh.connect(**self.paramiko_cfg)",
"def connect(self, driver):\n # 0 1 2\n events = [driver.password_re, self.device.prompt_re, driver.unable_to_connect_re,\n # 3 4 5 6 7\n NEWSSHKEY, KNOWN_HOSTS, HOST_KEY_FAILED, MODULUS_TOO_SMALL, PROTOCOL_DIFFER,\n # 8 9\n driver.timeout_re, pexpect.TIMEOUT]\n\n transitions = [\n (driver.password_re, [0, 1, 4, 5], -1, partial(a_save_last_pattern, self), 0),\n (self.device.prompt_re, [0], -1, partial(a_save_last_pattern, self), 0),\n # cover all messages indicating that connection was not set up\n (driver.unable_to_connect_re, [0], -1, a_unable_to_connect, 0),\n (NEWSSHKEY, [0], 1, partial(a_send_line, \"yes\"), 10),\n (KNOWN_HOSTS, [0, 1], 0, None, 0),\n (HOST_KEY_FAILED, [0], -1, ConnectionError(\"Host key failed\", self.hostname), 0),\n (MODULUS_TOO_SMALL, [0], 0, self.fallback_to_sshv1, 0),\n (PROTOCOL_DIFFER, [0], 4, self.fallback_to_sshv1, 0),\n (PROTOCOL_DIFFER, [4], -1, ConnectionError(\"Protocol version differs\", self.hostname), 0),\n (pexpect.TIMEOUT, [0], 5, partial(a_send, \"\\r\\n\"), 10),\n (pexpect.TIMEOUT, [5], -1, ConnectionTimeoutError(\"Connection timeout\", self.hostname), 0),\n (driver.timeout_re, [0], -1, ConnectionTimeoutError(\"Connection timeout\", self.hostname), 0),\n ]\n\n logger.debug(\"EXPECTED_PROMPT={}\".format(pattern_to_str(self.device.prompt_re)))\n fsm = FSM(\"SSH-CONNECT\", self.device, events, transitions, timeout=_C['connect_timeout'],\n searchwindowsize=160)\n return fsm.run()",
"def configure_handshake(conf):\n print()\n if conf.get('host', None) is None or conf['host'] not in ['Server', 'Client']:\n conf['host'] = ask_options('Is this the Server or Client?',\n ['Server', 'Client']).title()\n if conf.get('hostname', None) is None:\n if conf['host'] == 'Server':\n conf['hostname'] = simple_response('What is your hostname?')\n else:\n conf['hostname'] = simple_response(\n 'What is the hostname that you are connecting to?')\n if conf.get('port', None) is None:\n if conf['host'] == 'Server':\n conf['port'] = numeric_response('What port do you want to use?')\n else:\n conf['port'] = numeric_response(\n 'What port on the host are you connecting to?')\n if conf.get('timeout', None) is None:\n conf['timeout'] = numeric_response('How long, in seconds, can a connection hang before timing out?',\n default=30)\n if conf.get('encryption', None) is None:\n conf['encryption'] = yes_no(\n 'Would you like to use TLS encryption?', default=False)\n if conf['encryption'] and conf.get('cert', None) is None:\n conf['cert'] = ask_path('Enter the path for the certificate file')\n if conf['encryption'] and conf['host'] == 'Server' \\\n and conf.get('key', None) is None:\n conf['key'] = ask_path('Enter the path for the key file')\n return conf",
"def main():\n extremehandle = {\n \"device_type\": \"extreme\",\n \"ip\": \"10.54.116.175\",\n \"username\": \"admin\",\n \"password\": \"\",\n }\n net_connect = ConnectHandler(**extremehandle)\n output = net_connect.send_command(\"show config vlan\")\n print(output)",
"def print_config_status():\n\n check_server_running()\n check_postgres()\n\n if check_storage_permission():\n print('[{green}+{white}] /.faraday/storage -> Permission accepted' \\\n .format(green=Fore.GREEN, white=Fore.WHITE))\n else:\n print('[{red}-{white}] /.faraday/storage -> Permission denied'\\\n .format(red=Fore.RED, white=Fore.WHITE))\n\n if check_open_ports():\n print(\"[{green}+{white}] Port {PORT} in {ad} is open\"\\\n .format(PORT=faraday.server.config.faraday_server.port, green=Fore.GREEN,white=Fore.WHITE,ad=faraday.server.config.faraday_server.bind_address))\n else:\n print(\"[{red}-{white}] Port {PORT} in {ad} is not open\"\\\n .format(PORT=faraday.server.config.faraday_server.port,red=Fore.RED,white=Fore.WHITE,ad =faraday.server.config.faraday_server.bind_address))",
"def ssh_config(name=''):\n with settings(hide('running')):\n output = local('vagrant ssh-config %s' % name, capture=True)\n\n config = {}\n for line in output.splitlines()[1:]:\n key, value = line.strip().split(' ', 2)\n config[key] = value\n return config",
"def vagrant_ssh_config():\n proc = sp.Popen(\"vagrant ssh-config\", stdout=sp.PIPE, stderr=sp.PIPE, shell=True)\n out, _ = proc.communicate()\n\n hosts = {}\n new_line = True # new block\n for line in out.decode().split(\"\\n\"):\n\n if new_line is True:\n hostname = line.replace(\"Host \", \"\")\n new_line = False\n elif len(line) == 0:\n new_line = True\n else:\n data = line[2:].split(\" \")\n\n host = hosts.get(hostname, {})\n host.update({data[0]: \" \".join(data[1:])})\n hosts.update({hostname: host})\n\n return hosts",
"def _config_chn_ins(ssh_clt, topo_info):\n # MARK: Assume the iterface name pattern: eth0, eth1, eth2...\n for ifce_name in ['eth1', 'eth2']:\n print('## Setup interface: %s' % ifce_name)\n ssh_clt.exec_command('sudo ip link set %s up' % ifce_name)\n time.sleep(1)\n print('## Assign IP via DHCP')\n ssh_clt.exec_command('sudo dhclient %s' % ifce_name)\n time.sleep(1)\n print('## Remove duplicate route table items...')\n ssh_clt.exec_command('sudo ip route delete %s dev %s'\n % (conf.NET_ARGS['pvt_subnet_cidr'], ifce_name)\n )\n time.sleep(1)\n\n print('## Add static routing to source and destination...')\n ssh_clt.exec_command('sudo ip route add %s dev eth1' % topo_info['src_ip'])\n time.sleep(1)\n ssh_clt.exec_command('sudo ip route add %s dev eth2' % topo_info['dst_ip'])\n time.sleep(1)\n\n print('## Enable Linux Kernel IP forwarding...')\n ssh_clt.exec_command('echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward')\n time.sleep(1)\n print('# Config Finished\\n')"
] | [
"0.6099966",
"0.60891026",
"0.60809517",
"0.5902394",
"0.58439136",
"0.58291984",
"0.5805959",
"0.5784745",
"0.57688177",
"0.5761952",
"0.56910163",
"0.56236494",
"0.562262",
"0.5595992",
"0.55930495",
"0.5480706",
"0.5446128",
"0.54187673",
"0.5414092",
"0.5410241",
"0.5367734",
"0.53539985",
"0.5347633",
"0.5319227",
"0.5310571",
"0.5308444",
"0.5298896",
"0.5296675",
"0.52939516",
"0.5288401"
] | 0.6446934 | 0 |
If TLS 1.3 support is missing and it's configured, it will raise a ConfigError. | def test_tls_client_minimum_1_point_3_missing(self):
# thanks i hate it
if hasattr(SSL, "OP_NO_TLSv1_3"):
OP_NO_TLSv1_3 = SSL.OP_NO_TLSv1_3
delattr(SSL, "OP_NO_TLSv1_3")
self.addCleanup(setattr, SSL, "SSL.OP_NO_TLSv1_3", OP_NO_TLSv1_3)
assert not hasattr(SSL, "OP_NO_TLSv1_3")
config = {"federation_client_minimum_tls_version": 1.3}
t = TestConfig()
with self.assertRaises(ConfigError) as e:
t.read_config(config, config_dir_path="", data_dir_path="")
self.assertEqual(
e.exception.args[0],
(
"federation_client_minimum_tls_version cannot be 1.3, "
"your OpenSSL does not support it"
),
) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _config_tls(self):\n pass",
"def test_tls_client_minimum_1_point_3_exists(self):\n # thanks i hate it, still\n if not hasattr(SSL, \"OP_NO_TLSv1_3\"):\n SSL.OP_NO_TLSv1_3 = 0x00\n self.addCleanup(lambda: delattr(SSL, \"OP_NO_TLSv1_3\"))\n assert hasattr(SSL, \"OP_NO_TLSv1_3\")\n\n config = {\"federation_client_minimum_tls_version\": 1.3}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1.3\")",
"def _validate_ssl_context_for_tls_in_tls(ssl_context):\n\n if not hasattr(ssl_context, \"wrap_bio\"):\n if six.PY2:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"supported on Python 2\"\n )\n else:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"available on non-native SSLContext\"\n )",
"def test_certs_error(self, tmpdir):\n with pytest.raises(docker.errors.TLSParameterError):\n client_kwargs_from_config(\n 'http://l cert_path=%s' % tmpdir.strpath\n )",
"def test_tls_client_minimum_set_passed_through_1_0(self):\n config = {\"federation_client_minimum_tls_version\": 1}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n\n cf = FederationPolicyForHTTPS(t)\n options = _get_ssl_context_options(cf._verify_ssl_context)\n\n # The context has not had any of the NO_TLS set.\n self.assertEqual(options & SSL.OP_NO_TLSv1, 0)\n self.assertEqual(options & SSL.OP_NO_TLSv1_1, 0)\n self.assertEqual(options & SSL.OP_NO_TLSv1_2, 0)",
"def test_tls_client_minimum_set_passed_through_1_2(self):\n config = {\"federation_client_minimum_tls_version\": 1.2}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n\n cf = FederationPolicyForHTTPS(t)\n options = _get_ssl_context_options(cf._verify_ssl_context)\n\n # The context has had NO_TLSv1_1 and NO_TLSv1_0 set, but not NO_TLSv1_2\n self.assertNotEqual(options & SSL.OP_NO_TLSv1, 0)\n self.assertNotEqual(options & SSL.OP_NO_TLSv1_1, 0)\n self.assertEqual(options & SSL.OP_NO_TLSv1_2, 0)",
"def test_tls_client_minimum_default(self):\n config = {}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n\n self.assertEqual(t.federation_client_minimum_tls_version, \"1\")",
"def has_tls_support(self):\n return \"STARTTLS\" in self.__capabilities",
"def get_tls_factory(self):\n if not access(self.cert_path, R_OK):\n raise RuntimeError('Error: cert file at %s is not '\n 'readable' % self.cert_path)\n if not access(self.key_path, R_OK):\n raise RuntimeError('Error: key file at %s is not '\n 'readable' % self.key_path)\n if not HAVE_PYOPENSSL:\n raise RuntimeError('Error: running with TLS (cert and key) requires'\n ' pyOpenSSL, but it does not appear to be '\n 'installed. Please \"pip install pyOpenSSL\".')\n # check certs are readable\n cf = certificateOptionsFromFiles(self.key_path, self.cert_path)\n return cf",
"def get_tls(self):\n configured_value = self.charm_config[\"enable-tls\"]\n if configured_value:\n return configured_value\n return False",
"def test_get_tls(matrix):\n matrix.charm_config[\"enable-tls\"] = True\n result = matrix.get_tls()\n assert result is True\n matrix.charm_config[\"enable-tls\"] = False\n result = matrix.get_tls()\n assert result is False",
"def __init__(self, tls_1_2=None, tls_1_1=None, tls_1_0=None, ssl_3_0=None):\n self.tls_1_2 = tls_1_2\n self.tls_1_1 = tls_1_1\n self.tls_1_0 = tls_1_0\n self.ssl_3_0 = ssl_3_0",
"def test_alpn_call_failure(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(ValueError):\n context.set_alpn_protos([])",
"def test_http_over_https_error(\n http_request_timeout,\n tls_http_server, adapter_type,\n ca, ip_addr,\n tls_certificate,\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n):\n # disable some flaky tests\n # https://github.com/cherrypy/cheroot/issues/225\n issue_225 = (\n IS_MACOS\n and adapter_type == 'builtin'\n )\n if issue_225:\n pytest.xfail('Test fails in Travis-CI')\n\n tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)\n tls_adapter = tls_adapter_cls(\n tls_certificate_chain_pem_path, tls_certificate_private_key_pem_path,\n )\n if adapter_type == 'pyopenssl':\n tls_adapter.context = tls_adapter.get_context()\n\n tls_certificate.configure_cert(tls_adapter.context)\n\n interface, _host, port = _get_conn_data(ip_addr)\n tlshttpserver = tls_http_server((interface, port), tls_adapter)\n\n interface, _host, port = _get_conn_data(\n tlshttpserver.bind_addr,\n )\n\n fqdn = interface\n if ip_addr is ANY_INTERFACE_IPV6:\n fqdn = '[{fqdn}]'.format(**locals())\n\n expect_fallback_response_over_plain_http = (\n (\n adapter_type == 'pyopenssl'\n )\n )\n if expect_fallback_response_over_plain_http:\n resp = requests.get(\n 'http://{host!s}:{port!s}/'.format(host=fqdn, port=port),\n timeout=http_request_timeout,\n )\n assert resp.status_code == 400\n assert resp.text == (\n 'The client sent a plain HTTP request, '\n 'but this server only speaks HTTPS on this port.'\n )\n return\n\n with pytest.raises(requests.exceptions.ConnectionError) as ssl_err:\n requests.get( # FIXME: make stdlib ssl behave like PyOpenSSL\n 'http://{host!s}:{port!s}/'.format(host=fqdn, port=port),\n timeout=http_request_timeout,\n )\n\n if IS_LINUX:\n expected_error_code, expected_error_text = (\n 104, 'Connection reset by peer',\n )\n if IS_MACOS:\n expected_error_code, expected_error_text = (\n 54, 'Connection reset by peer',\n )\n if IS_WINDOWS:\n expected_error_code, expected_error_text = (\n 10054,\n 'An existing connection was forcibly closed by the remote host',\n )\n\n underlying_error = ssl_err.value.args[0].args[-1]\n err_text = str(underlying_error)\n assert underlying_error.errno == expected_error_code, (\n 'The underlying error is {underlying_error!r}'.\n format(**locals())\n )\n assert expected_error_text in err_text",
"def test_set_tlsext_use_srtp_not_bytes(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n context.set_tlsext_use_srtp(str(\"SRTP_AES128_CM_SHA1_80\"))",
"def test_set_client_ca_list_errors(self):\n ctx = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n ctx.set_client_ca_list(\"spam\")\n with pytest.raises(TypeError):\n ctx.set_client_ca_list([\"spam\"])",
"def test_tls_min_version(self):\n self.x509 = x509main(host=self.cluster.master)\n self.x509.generate_multiple_x509_certs(servers=self.cluster.servers)\n for server in self.cluster.servers:\n _ = self.x509.upload_root_certs(server)\n self.x509.upload_node_certs(servers=self.cluster.servers)\n self.x509.delete_unused_out_of_the_box_CAs(self.cluster.master)\n self.x509.upload_client_cert_settings(server=self.cluster.servers[0])\n\n self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])\n\n rest = RestConnection(self.cluster.master)\n status, content = rest.set_min_tls_version(version='tlsv1.3')\n if not status:\n self.fail(\"Setting tls min version to 1.3 failed with content {0}\".format(content))\n\n self.validate_tls_min_version(node=self.cluster.master, version=\"1.2\", expect=\"fail\")\n self.validate_tls_min_version(node=self.cluster.master, version=\"1.3\", expect=\"pass\")\n\n self.disable_n2n_encryption_cli_on_nodes(nodes=self.cluster.servers)\n CbServer.use_https = False\n rest = RestConnection(self.cluster.master)\n status, content = rest.set_min_tls_version(version='tlsv1.2')\n if not status:\n self.fail(\"Setting tls min version to 1.2 failed with content {0}\".format(content))\n self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])\n self.validate_tls_min_version(node=self.cluster.master, version=\"1.2\", expect=\"pass\")\n\n self.x509 = x509main(host=self.cluster.master)\n self.x509.teardown_certs(servers=self.cluster.servers)",
"def test_add_client_ca_wrong_args(self):\n ctx = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n ctx.add_client_ca(\"spam\")",
"def test_ssl_default(self):\n assert security.security_settings.ssl_verify()",
"def can_https(tls_ver):\n output = True\n\n # check python version\n if sys.version_info < (3, 6): #modify from 3, 7 to 3, 6\n _LOGGER.error(\"PyISY cannot use HTTPS: Invalid Python version. See docs.\")\n output = False\n\n # check that Python was compiled against correct OpenSSL lib\n if \"PROTOCOL_TLSv1_1\" not in dir(ssl):\n _LOGGER.error(\n \"PyISY cannot use HTTPS: Compiled against old OpenSSL \"\n + \"library. See docs.\"\n )\n output = False\n\n # check the requested TLS version\n if tls_ver not in [1.1, 1.2]:\n _LOGGER.error(\n \"PyISY cannot use HTTPS: Only TLS 1.1 and 1.2 are supported \"\n + \"by the ISY controller.\"\n )\n output = False\n\n return output",
"def test_set_options_wrong_args(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n context.set_options(None)",
"def __init__(self):\n super(TLS12AuthenticationSuite, self).__init__()\n self._protocol = ssl.PROTOCOL_TLSv1_2\n self._ciphers = ':'.join((\n 'AES128-SHA256',\n 'AES256-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-DSS-AES128-SHA256',\n 'DH-RSA-AES128-SHA256',\n 'DHE-DSS-AES128-SHA256',\n 'DHE-RSA-AES128-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-RSA-AES256-SHA256',\n 'DHE-DSS-AES256-SHA256',\n 'DHE-RSA-AES256-SHA256',\n 'ECDH-ECDSA-AES128-SHA256',\n 'ECDH-ECDSA-AES256-SHA256',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n 'ECDH-RSA-AES128-SHA256',\n 'ECDH-RSA-AES256-SHA384',\n 'ECDHE-RSA-AES128-SHA256',\n 'ECDHE-RSA-AES256-SHA384',\n 'ECDHE-ECDSA-AES128-GCM-SHA256',\n 'ECDHE-ECDSA-AES256-GCM-SHA384',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n ))",
"def test_use_certificate_file_missing(self, tmpfile):\n ctx = Context(SSLv23_METHOD)\n with pytest.raises(Error):\n ctx.use_certificate_file(tmpfile)",
"def test_set_tlsext_use_srtp_valid(self):\n context = Context(SSLv23_METHOD)\n assert context.set_tlsext_use_srtp(b\"SRTP_AES128_CM_SHA1_80\") is None",
"def force_communicate_tls(rest: ClusterManager) -> bool:\n settings, err = rest.get_security_settings()\n _exit_if_errors(err)\n\n # The cluster isn't using 'strict' cluster encryption, we shouldn't need to force enable TLS\n if 'clusterEncryptionLevel' not in settings or settings['clusterEncryptionLevel'] != 'strict':\n return False\n\n # The user might not have used a 'https://' scheme prefix, so communicating to other nodes via the secure ports may\n # lead to interesting/surprising errors; let them know beforehand.\n _warning(\"sub-command requires multi-node communication via TLS enabled ports, '--cacert' or \"\n \"'--no-ssl-verify' may need to be supplied\")\n\n return True",
"def create_tls_context(TLSSTRENGTH):\n\n #CREATE a CONTEXT that we can then update\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS)\n\n if TLSSTRENGTH == \"tls1_3\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_3)\n\n if TLSSTRENGTH == \"tls1_2\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_2)\n\n elif TLSSTRENGTH == \"tls1_1\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_1)\n\n elif TLSSTRENGTH == \"tls1\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1)\n\n else:\n print(\"Valid TLS Protocol Not Found: Needs to be in OpenSSL format: tls_1, tls_1_1 tls_2\")\n return\n\n context.verify_mode = ssl.CERT_REQUIRED\n context.check_hostname = True\n context.load_default_certs()\n print(\"TLS Protocol Specified: {}\".format(TLSSTRENGTH))\n return context",
"def test_set_options(self):\n context = Context(SSLv23_METHOD)\n options = context.set_options(OP_NO_SSLv2)\n assert options & OP_NO_SSLv2 == OP_NO_SSLv2",
"def test_set_tlsext_use_srtp_invalid_profile(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(Error):\n context.set_tlsext_use_srtp(b\"SRTP_BOGUS\")",
"def insecure_tls(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"insecure_tls\")",
"def insecure_tls(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"insecure_tls\")"
] | [
"0.73794323",
"0.7187562",
"0.6202741",
"0.608287",
"0.60574615",
"0.5968533",
"0.5968185",
"0.59237605",
"0.5923406",
"0.59213465",
"0.5912386",
"0.58577335",
"0.5834019",
"0.5784727",
"0.5690416",
"0.5681512",
"0.5679992",
"0.562341",
"0.55832535",
"0.55645514",
"0.5561639",
"0.5552638",
"0.5505458",
"0.5504243",
"0.5480745",
"0.54799986",
"0.54651254",
"0.5460312",
"0.545573",
"0.545573"
] | 0.79050493 | 0 |
If TLS 1.3 support exists and it's configured, it will be settable. | def test_tls_client_minimum_1_point_3_exists(self):
# thanks i hate it, still
if not hasattr(SSL, "OP_NO_TLSv1_3"):
SSL.OP_NO_TLSv1_3 = 0x00
self.addCleanup(lambda: delattr(SSL, "OP_NO_TLSv1_3"))
assert hasattr(SSL, "OP_NO_TLSv1_3")
config = {"federation_client_minimum_tls_version": 1.3}
t = TestConfig()
t.read_config(config, config_dir_path="", data_dir_path="")
self.assertEqual(t.federation_client_minimum_tls_version, "1.3") | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_tls_client_minimum_1_point_3_missing(self):\n # thanks i hate it\n if hasattr(SSL, \"OP_NO_TLSv1_3\"):\n OP_NO_TLSv1_3 = SSL.OP_NO_TLSv1_3\n delattr(SSL, \"OP_NO_TLSv1_3\")\n self.addCleanup(setattr, SSL, \"SSL.OP_NO_TLSv1_3\", OP_NO_TLSv1_3)\n assert not hasattr(SSL, \"OP_NO_TLSv1_3\")\n\n config = {\"federation_client_minimum_tls_version\": 1.3}\n t = TestConfig()\n with self.assertRaises(ConfigError) as e:\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(\n e.exception.args[0],\n (\n \"federation_client_minimum_tls_version cannot be 1.3, \"\n \"your OpenSSL does not support it\"\n ),\n )",
"def _config_tls(self):\n pass",
"def test_tls_client_minimum_set_passed_through_1_0(self):\n config = {\"federation_client_minimum_tls_version\": 1}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n\n cf = FederationPolicyForHTTPS(t)\n options = _get_ssl_context_options(cf._verify_ssl_context)\n\n # The context has not had any of the NO_TLS set.\n self.assertEqual(options & SSL.OP_NO_TLSv1, 0)\n self.assertEqual(options & SSL.OP_NO_TLSv1_1, 0)\n self.assertEqual(options & SSL.OP_NO_TLSv1_2, 0)",
"def __init__(self, tls_1_2=None, tls_1_1=None, tls_1_0=None, ssl_3_0=None):\n self.tls_1_2 = tls_1_2\n self.tls_1_1 = tls_1_1\n self.tls_1_0 = tls_1_0\n self.ssl_3_0 = ssl_3_0",
"def test_tls_client_minimum_set_passed_through_1_2(self):\n config = {\"federation_client_minimum_tls_version\": 1.2}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n\n cf = FederationPolicyForHTTPS(t)\n options = _get_ssl_context_options(cf._verify_ssl_context)\n\n # The context has had NO_TLSv1_1 and NO_TLSv1_0 set, but not NO_TLSv1_2\n self.assertNotEqual(options & SSL.OP_NO_TLSv1, 0)\n self.assertNotEqual(options & SSL.OP_NO_TLSv1_1, 0)\n self.assertEqual(options & SSL.OP_NO_TLSv1_2, 0)",
"def has_tls_support(self):\n return \"STARTTLS\" in self.__capabilities",
"def test_set_options(self):\n context = Context(SSLv23_METHOD)\n options = context.set_options(OP_NO_SSLv2)\n assert options & OP_NO_SSLv2 == OP_NO_SSLv2",
"def __init__(self):\n super(TLS12AuthenticationSuite, self).__init__()\n self._protocol = ssl.PROTOCOL_TLSv1_2\n self._ciphers = ':'.join((\n 'AES128-SHA256',\n 'AES256-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-DSS-AES128-SHA256',\n 'DH-RSA-AES128-SHA256',\n 'DHE-DSS-AES128-SHA256',\n 'DHE-RSA-AES128-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-RSA-AES256-SHA256',\n 'DHE-DSS-AES256-SHA256',\n 'DHE-RSA-AES256-SHA256',\n 'ECDH-ECDSA-AES128-SHA256',\n 'ECDH-ECDSA-AES256-SHA256',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n 'ECDH-RSA-AES128-SHA256',\n 'ECDH-RSA-AES256-SHA384',\n 'ECDHE-RSA-AES128-SHA256',\n 'ECDHE-RSA-AES256-SHA384',\n 'ECDHE-ECDSA-AES128-GCM-SHA256',\n 'ECDHE-ECDSA-AES256-GCM-SHA384',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n ))",
"def __init__(self):\n super(BasicAuthenticationSuite, self).__init__()\n self._protocol = ssl.PROTOCOL_TLSv1\n self._ciphers = ':'.join((\n 'AES128-SHA',\n 'DES-CBC3-SHA',\n 'AES256-SHA',\n 'DHE-DSS-DES-CBC3-SHA',\n 'DHE-RSA-DES-CBC3-SHA',\n 'DH-DSS-AES128-SHA',\n 'DH-RSA-AES128-SHA',\n 'DHE-DSS-AES128-SHA',\n 'DHE-RSA-AES128-SHA',\n 'DH-RSA-AES256-SHA',\n 'DHE-DSS-AES256-SHA',\n 'DHE-RSA-AES256-SHA',\n ))",
"def _set_tls_capabilities(self, caps):\n if self.settings.get(\"ssl-mode\") == SSLMode.DISABLED:\n return\n\n if self.stream.is_socket():\n if self.settings.get(\"ssl-mode\"):\n _LOGGER.warning(\"SSL not required when using Unix socket.\")\n return\n\n if \"tls\" not in caps:\n self.close_connection()\n raise OperationalError(\"SSL not enabled at server\")\n\n is_ol7 = False\n if platform.system() == \"Linux\":\n distname, version, _ = linux_distribution()\n try:\n is_ol7 = \"Oracle Linux\" in distname and version.split(\".\")[0] == \"7\"\n except IndexError:\n is_ol7 = False\n\n if sys.version_info < (2, 7, 9) and not is_ol7:\n self.close_connection()\n raise RuntimeError(\n \"The support for SSL is not available for this Python version\"\n )\n\n self.protocol.set_capabilities(tls=True)\n self.stream.set_ssl(\n self.settings.get(\"tls-versions\", None),\n self.settings.get(\"ssl-mode\", SSLMode.REQUIRED),\n self.settings.get(\"ssl-ca\"),\n self.settings.get(\"ssl-crl\"),\n self.settings.get(\"ssl-cert\"),\n self.settings.get(\"ssl-key\"),\n self.settings.get(\"tls-ciphersuites\"),\n )\n if \"attributes\" in self.settings:\n conn_attrs = self.settings[\"attributes\"]\n self.protocol.set_capabilities(session_connect_attrs=conn_attrs)",
"def test_tls_min_version(self):\n self.x509 = x509main(host=self.cluster.master)\n self.x509.generate_multiple_x509_certs(servers=self.cluster.servers)\n for server in self.cluster.servers:\n _ = self.x509.upload_root_certs(server)\n self.x509.upload_node_certs(servers=self.cluster.servers)\n self.x509.delete_unused_out_of_the_box_CAs(self.cluster.master)\n self.x509.upload_client_cert_settings(server=self.cluster.servers[0])\n\n self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])\n\n rest = RestConnection(self.cluster.master)\n status, content = rest.set_min_tls_version(version='tlsv1.3')\n if not status:\n self.fail(\"Setting tls min version to 1.3 failed with content {0}\".format(content))\n\n self.validate_tls_min_version(node=self.cluster.master, version=\"1.2\", expect=\"fail\")\n self.validate_tls_min_version(node=self.cluster.master, version=\"1.3\", expect=\"pass\")\n\n self.disable_n2n_encryption_cli_on_nodes(nodes=self.cluster.servers)\n CbServer.use_https = False\n rest = RestConnection(self.cluster.master)\n status, content = rest.set_min_tls_version(version='tlsv1.2')\n if not status:\n self.fail(\"Setting tls min version to 1.2 failed with content {0}\".format(content))\n self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])\n self.validate_tls_min_version(node=self.cluster.master, version=\"1.2\", expect=\"pass\")\n\n self.x509 = x509main(host=self.cluster.master)\n self.x509.teardown_certs(servers=self.cluster.servers)",
"def test_get_tls(matrix):\n matrix.charm_config[\"enable-tls\"] = True\n result = matrix.get_tls()\n assert result is True\n matrix.charm_config[\"enable-tls\"] = False\n result = matrix.get_tls()\n assert result is False",
"def test_set_tlsext_use_srtp_valid(self):\n context = Context(SSLv23_METHOD)\n assert context.set_tlsext_use_srtp(b\"SRTP_AES128_CM_SHA1_80\") is None",
"def create_tls_context(TLSSTRENGTH):\n\n #CREATE a CONTEXT that we can then update\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS)\n\n if TLSSTRENGTH == \"tls1_3\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_3)\n\n if TLSSTRENGTH == \"tls1_2\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_2)\n\n elif TLSSTRENGTH == \"tls1_1\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_1)\n\n elif TLSSTRENGTH == \"tls1\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1)\n\n else:\n print(\"Valid TLS Protocol Not Found: Needs to be in OpenSSL format: tls_1, tls_1_1 tls_2\")\n return\n\n context.verify_mode = ssl.CERT_REQUIRED\n context.check_hostname = True\n context.load_default_certs()\n print(\"TLS Protocol Specified: {}\".format(TLSSTRENGTH))\n return context",
"def can_https(tls_ver):\n output = True\n\n # check python version\n if sys.version_info < (3, 6): #modify from 3, 7 to 3, 6\n _LOGGER.error(\"PyISY cannot use HTTPS: Invalid Python version. See docs.\")\n output = False\n\n # check that Python was compiled against correct OpenSSL lib\n if \"PROTOCOL_TLSv1_1\" not in dir(ssl):\n _LOGGER.error(\n \"PyISY cannot use HTTPS: Compiled against old OpenSSL \"\n + \"library. See docs.\"\n )\n output = False\n\n # check the requested TLS version\n if tls_ver not in [1.1, 1.2]:\n _LOGGER.error(\n \"PyISY cannot use HTTPS: Only TLS 1.1 and 1.2 are supported \"\n + \"by the ISY controller.\"\n )\n output = False\n\n return output",
"def test_tls_client_minimum_default(self):\n config = {}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n\n self.assertEqual(t.federation_client_minimum_tls_version, \"1\")",
"def _validate_ssl_context_for_tls_in_tls(ssl_context):\n\n if not hasattr(ssl_context, \"wrap_bio\"):\n if six.PY2:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"supported on Python 2\"\n )\n else:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"available on non-native SSLContext\"\n )",
"def get_tls(self):\n configured_value = self.charm_config[\"enable-tls\"]\n if configured_value:\n return configured_value\n return False",
"def test_ssl_default(self):\n assert security.security_settings.ssl_verify()",
"def tls(self) -> pulumi.Input[bool]:\n return pulumi.get(self, \"tls\")",
"def tls_enabled(self):\n has_cert = getattr(self.args, 'ssl_certfile', None) is not None\n has_key = getattr(self.args, 'ssl_keyfile', None) is not None\n tls = getattr(self.args, 'tls', False)\n return tls or self.is_sandbox or (has_cert and has_key)",
"def test_get_cipher_version_before_connect(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n assert conn.get_cipher_version() is None",
"def initialize_ssl(self):\n self.ssl_context = ssl.SSLContext()\n # if self.config.get('ca_file', None):\n # self.ssl_context.load_verify_locations(ca_file=self.config['ca_file'])\n\n # TODO : Remove this\n\n verify_ssl = self.config[\"AUTH\"][\"verify_ssl\"]\n if isinstance(verify_ssl, str):\n verify_ssl = strtobool(verify_ssl)\n\n if not verify_ssl:\n self.ssl_context.verify_mode = ssl.CERT_NONE",
"def force_communicate_tls(rest: ClusterManager) -> bool:\n settings, err = rest.get_security_settings()\n _exit_if_errors(err)\n\n # The cluster isn't using 'strict' cluster encryption, we shouldn't need to force enable TLS\n if 'clusterEncryptionLevel' not in settings or settings['clusterEncryptionLevel'] != 'strict':\n return False\n\n # The user might not have used a 'https://' scheme prefix, so communicating to other nodes via the secure ports may\n # lead to interesting/surprising errors; let them know beforehand.\n _warning(\"sub-command requires multi-node communication via TLS enabled ports, '--cacert' or \"\n \"'--no-ssl-verify' may need to be supplied\")\n\n return True",
"def test_set_tlsext_use_srtp_not_bytes(self):\n context = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n context.set_tlsext_use_srtp(str(\"SRTP_AES128_CM_SHA1_80\"))",
"def test_enable_tls_and_plaintext(cassandra_service, dcos_ca_bundle):\n update_service_transport_encryption(\n cassandra_service, enabled=True, allow_plaintext=True)\n verify_client_can_write_read_and_delete(dcos_ca_bundle)",
"def test_ssl_env( # noqa: C901 # FIXME\n thread_exceptions,\n recwarn,\n mocker,\n http_request_timeout,\n tls_http_server, adapter_type,\n ca, tls_verify_mode, tls_certificate,\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n tls_ca_certificate_pem_path,\n use_client_cert,\n):\n interface, _host, port = _get_conn_data(ANY_INTERFACE_IPV4)\n\n with mocker.mock_module.patch(\n 'idna.core.ulabel',\n return_value=ntob('127.0.0.1'),\n ):\n client_cert = ca.issue_cert(ntou('127.0.0.1'))\n\n with client_cert.private_key_and_cert_chain_pem.tempfile() as cl_pem:\n tls_adapter_cls = get_ssl_adapter_class(name=adapter_type)\n tls_adapter = tls_adapter_cls(\n tls_certificate_chain_pem_path,\n tls_certificate_private_key_pem_path,\n )\n if adapter_type == 'pyopenssl':\n tls_adapter.context = tls_adapter.get_context()\n tls_adapter.context.set_verify(\n _stdlib_to_openssl_verify[tls_verify_mode],\n lambda conn, cert, errno, depth, preverify_ok: preverify_ok,\n )\n else:\n tls_adapter.context.verify_mode = tls_verify_mode\n\n ca.configure_trust(tls_adapter.context)\n tls_certificate.configure_cert(tls_adapter.context)\n\n tlswsgiserver = tls_http_server((interface, port), tls_adapter)\n\n interface, _host, port = _get_conn_data(tlswsgiserver.bind_addr)\n\n resp = requests.get(\n 'https://' + interface + ':' + str(port) + '/env',\n timeout=http_request_timeout,\n verify=tls_ca_certificate_pem_path,\n cert=cl_pem if use_client_cert else None,\n )\n\n env = json.loads(resp.content.decode('utf-8'))\n\n # hard coded env\n assert env['wsgi.url_scheme'] == 'https'\n assert env['HTTPS'] == 'on'\n\n # ensure these are present\n for key in {'SSL_VERSION_INTERFACE', 'SSL_VERSION_LIBRARY'}:\n assert key in env\n\n # pyOpenSSL generates the env before the handshake completes\n if adapter_type == 'pyopenssl':\n return\n\n for key in {'SSL_PROTOCOL', 'SSL_CIPHER'}:\n assert key in env\n\n # client certificate env\n if tls_verify_mode == ssl.CERT_NONE or not use_client_cert:\n assert env['SSL_CLIENT_VERIFY'] == 'NONE'\n else:\n assert env['SSL_CLIENT_VERIFY'] == 'SUCCESS'\n\n with open(cl_pem, 'rt') as f:\n assert env['SSL_CLIENT_CERT'] in f.read()\n\n for key in {\n 'SSL_CLIENT_M_VERSION', 'SSL_CLIENT_M_SERIAL',\n 'SSL_CLIENT_I_DN', 'SSL_CLIENT_S_DN',\n }:\n assert key in env\n\n # builtin ssl environment generation may use a loopback socket\n # ensure no ResourceWarning was raised during the test\n if IS_PYPY:\n # NOTE: PyPy doesn't have ResourceWarning\n # Ref: https://doc.pypy.org/en/latest/cpython_differences.html\n return\n for warn in recwarn:\n if not issubclass(warn.category, ResourceWarning):\n continue\n\n # the tests can sporadically generate resource warnings\n # due to timing issues\n # all of these sporadic warnings appear to be about socket.socket\n # and have been observed to come from requests connection pool\n msg = str(warn.message)\n if 'socket.socket' in msg:\n pytest.xfail(\n '\\n'.join((\n 'Sometimes this test fails due to '\n 'a socket.socket ResourceWarning:',\n msg,\n )),\n )\n pytest.fail(msg)\n\n # to perform the ssl handshake over that loopback socket,\n # the builtin ssl environment generation uses a thread\n for _, _, trace in thread_exceptions:\n print(trace, file=sys.stderr)\n assert not thread_exceptions, ': '.join((\n thread_exceptions[0][0].__name__,\n thread_exceptions[0][1],\n ))",
"def test_set_client_ca_list_errors(self):\n ctx = Context(SSLv23_METHOD)\n with pytest.raises(TypeError):\n ctx.set_client_ca_list(\"spam\")\n with pytest.raises(TypeError):\n ctx.set_client_ca_list([\"spam\"])",
"def ssl(self):\n return self.protocol != \"SASL_PLAINTEXT\"",
"def test_set_verify_mode(self):\n context = Context(SSLv23_METHOD)\n assert context.get_verify_mode() == 0\n context.set_verify(VERIFY_PEER | VERIFY_CLIENT_ONCE)\n assert context.get_verify_mode() == (VERIFY_PEER | VERIFY_CLIENT_ONCE)"
] | [
"0.73433053",
"0.7166888",
"0.693749",
"0.6911747",
"0.6790053",
"0.6767721",
"0.6685774",
"0.660896",
"0.6553318",
"0.654518",
"0.6388313",
"0.63858914",
"0.63777804",
"0.6243128",
"0.6116154",
"0.6102289",
"0.6054786",
"0.6047724",
"0.59661496",
"0.59522796",
"0.5937201",
"0.5935675",
"0.5908759",
"0.59068227",
"0.58849466",
"0.58498275",
"0.5822232",
"0.58183795",
"0.5791508",
"0.5782003"
] | 0.71868205 | 1 |
The configured TLS version is correctly configured by the ContextFactory. | def test_tls_client_minimum_set_passed_through_1_2(self):
config = {"federation_client_minimum_tls_version": 1.2}
t = TestConfig()
t.read_config(config, config_dir_path="", data_dir_path="")
cf = FederationPolicyForHTTPS(t)
options = _get_ssl_context_options(cf._verify_ssl_context)
# The context has had NO_TLSv1_1 and NO_TLSv1_0 set, but not NO_TLSv1_2
self.assertNotEqual(options & SSL.OP_NO_TLSv1, 0)
self.assertNotEqual(options & SSL.OP_NO_TLSv1_1, 0)
self.assertEqual(options & SSL.OP_NO_TLSv1_2, 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_tls_client_minimum_set_passed_through_1_0(self):\n config = {\"federation_client_minimum_tls_version\": 1}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n\n cf = FederationPolicyForHTTPS(t)\n options = _get_ssl_context_options(cf._verify_ssl_context)\n\n # The context has not had any of the NO_TLS set.\n self.assertEqual(options & SSL.OP_NO_TLSv1, 0)\n self.assertEqual(options & SSL.OP_NO_TLSv1_1, 0)\n self.assertEqual(options & SSL.OP_NO_TLSv1_2, 0)",
"def _config_tls(self):\n pass",
"def create_tls_context(TLSSTRENGTH):\n\n #CREATE a CONTEXT that we can then update\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS)\n\n if TLSSTRENGTH == \"tls1_3\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_3)\n\n if TLSSTRENGTH == \"tls1_2\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_2)\n\n elif TLSSTRENGTH == \"tls1_1\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_1)\n\n elif TLSSTRENGTH == \"tls1\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1)\n\n else:\n print(\"Valid TLS Protocol Not Found: Needs to be in OpenSSL format: tls_1, tls_1_1 tls_2\")\n return\n\n context.verify_mode = ssl.CERT_REQUIRED\n context.check_hostname = True\n context.load_default_certs()\n print(\"TLS Protocol Specified: {}\".format(TLSSTRENGTH))\n return context",
"def test_tls_client_minimum_1_point_3_missing(self):\n # thanks i hate it\n if hasattr(SSL, \"OP_NO_TLSv1_3\"):\n OP_NO_TLSv1_3 = SSL.OP_NO_TLSv1_3\n delattr(SSL, \"OP_NO_TLSv1_3\")\n self.addCleanup(setattr, SSL, \"SSL.OP_NO_TLSv1_3\", OP_NO_TLSv1_3)\n assert not hasattr(SSL, \"OP_NO_TLSv1_3\")\n\n config = {\"federation_client_minimum_tls_version\": 1.3}\n t = TestConfig()\n with self.assertRaises(ConfigError) as e:\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(\n e.exception.args[0],\n (\n \"federation_client_minimum_tls_version cannot be 1.3, \"\n \"your OpenSSL does not support it\"\n ),\n )",
"def test_tls_client_minimum_1_point_3_exists(self):\n # thanks i hate it, still\n if not hasattr(SSL, \"OP_NO_TLSv1_3\"):\n SSL.OP_NO_TLSv1_3 = 0x00\n self.addCleanup(lambda: delattr(SSL, \"OP_NO_TLSv1_3\"))\n assert hasattr(SSL, \"OP_NO_TLSv1_3\")\n\n config = {\"federation_client_minimum_tls_version\": 1.3}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1.3\")",
"def test_tls_client_minimum_default(self):\n config = {}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n\n self.assertEqual(t.federation_client_minimum_tls_version, \"1\")",
"def _validate_ssl_context_for_tls_in_tls(ssl_context):\n\n if not hasattr(ssl_context, \"wrap_bio\"):\n if six.PY2:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"supported on Python 2\"\n )\n else:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"available on non-native SSLContext\"\n )",
"def test_get_cipher_version_before_connect(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n assert conn.get_cipher_version() is None",
"def test_tls_min_version(self):\n self.x509 = x509main(host=self.cluster.master)\n self.x509.generate_multiple_x509_certs(servers=self.cluster.servers)\n for server in self.cluster.servers:\n _ = self.x509.upload_root_certs(server)\n self.x509.upload_node_certs(servers=self.cluster.servers)\n self.x509.delete_unused_out_of_the_box_CAs(self.cluster.master)\n self.x509.upload_client_cert_settings(server=self.cluster.servers[0])\n\n self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])\n\n rest = RestConnection(self.cluster.master)\n status, content = rest.set_min_tls_version(version='tlsv1.3')\n if not status:\n self.fail(\"Setting tls min version to 1.3 failed with content {0}\".format(content))\n\n self.validate_tls_min_version(node=self.cluster.master, version=\"1.2\", expect=\"fail\")\n self.validate_tls_min_version(node=self.cluster.master, version=\"1.3\", expect=\"pass\")\n\n self.disable_n2n_encryption_cli_on_nodes(nodes=self.cluster.servers)\n CbServer.use_https = False\n rest = RestConnection(self.cluster.master)\n status, content = rest.set_min_tls_version(version='tlsv1.2')\n if not status:\n self.fail(\"Setting tls min version to 1.2 failed with content {0}\".format(content))\n self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])\n self.validate_tls_min_version(node=self.cluster.master, version=\"1.2\", expect=\"pass\")\n\n self.x509 = x509main(host=self.cluster.master)\n self.x509.teardown_certs(servers=self.cluster.servers)",
"def min_tls_version(self):\n return self._data.get('min-tls-version', None)",
"def context():\n return Context(SSLv23_METHOD)",
"def __init__(self):\n super(TLS12AuthenticationSuite, self).__init__()\n self._protocol = ssl.PROTOCOL_TLSv1_2\n self._ciphers = ':'.join((\n 'AES128-SHA256',\n 'AES256-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-DSS-AES128-SHA256',\n 'DH-RSA-AES128-SHA256',\n 'DHE-DSS-AES128-SHA256',\n 'DHE-RSA-AES128-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-RSA-AES256-SHA256',\n 'DHE-DSS-AES256-SHA256',\n 'DHE-RSA-AES256-SHA256',\n 'ECDH-ECDSA-AES128-SHA256',\n 'ECDH-ECDSA-AES256-SHA256',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n 'ECDH-RSA-AES128-SHA256',\n 'ECDH-RSA-AES256-SHA384',\n 'ECDHE-RSA-AES128-SHA256',\n 'ECDHE-RSA-AES256-SHA384',\n 'ECDHE-ECDSA-AES128-GCM-SHA256',\n 'ECDHE-ECDSA-AES256-GCM-SHA384',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n ))",
"def getVersionName(self):\r\n if self.version == (3,0):\r\n return \"SSL 3.0\"\r\n elif self.version == (3,1):\r\n return \"TLS 1.0\"\r\n elif self.version == (3,2):\r\n return \"TLS 1.1\"\r\n else:\r\n return None",
"def test_get_context(self):\n context = Context(SSLv23_METHOD)\n connection = Connection(context, None)\n assert connection.get_context() is context",
"def get_tls_factory(self):\n if not access(self.cert_path, R_OK):\n raise RuntimeError('Error: cert file at %s is not '\n 'readable' % self.cert_path)\n if not access(self.key_path, R_OK):\n raise RuntimeError('Error: key file at %s is not '\n 'readable' % self.key_path)\n if not HAVE_PYOPENSSL:\n raise RuntimeError('Error: running with TLS (cert and key) requires'\n ' pyOpenSSL, but it does not appear to be '\n 'installed. Please \"pip install pyOpenSSL\".')\n # check certs are readable\n cf = certificateOptionsFromFiles(self.key_path, self.cert_path)\n return cf",
"def _default_ssl_context() -> ssl.SSLContext:\n ssl_context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS_CLIENT)\n ssl_context.minimum_version = ssl.TLSVersion.TLSv1_2\n ssl_context.verify_mode = ssl.CERT_REQUIRED\n ssl_context.check_hostname = True\n ssl_context.load_default_certs()\n return ssl_context",
"def _context(use_tls=False):\n if use_tls is False:\n return None\n config = Config()\n ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n ctx.load_cert_chain(config.tls_cert, config.tls_key)\n ctx.options |= ssl.OP_NO_SSLv2\n ctx.options |= ssl.OP_NO_SSLv3\n ctx.options |= ssl.OP_NO_COMPRESSION\n ctx.options |= ssl.OP_CIPHER_SERVER_PREFERENCE\n if not config.args.less_secure:\n ctx.options |= ssl.OP_SINGLE_DH_USE\n ctx.options |= ssl.OP_SINGLE_ECDH_USE\n ctx.set_ciphers(\":\".join(ciphers))\n if config.tls_dhparams:\n ctx.load_dh_params(config.tls_dhparams)\n return ctx",
"def test_set_tlsext_use_srtp_valid(self):\n context = Context(SSLv23_METHOD)\n assert context.set_tlsext_use_srtp(b\"SRTP_AES128_CM_SHA1_80\") is None",
"def __get_http2_ssl_context(self):\n # Get the basic context from the standard library.\n if self.client_side == False:\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)\n self.ctx = ssl._create_unverified_context()\n else:\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=self.server_cert)\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)\n self.ctx = ssl._create_unverified_context()\n\n # RFC 7540 Section 9.2: Implementations of HTTP/2 MUST use TLS version 1.2\n # or higher. Disable TLS 1.1 and lower.\n self.ctx.options |= (\n ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1\n )\n\n # RFC 7540 Section 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable\n # compression.\n self.ctx.options |= ssl.OP_NO_COMPRESSION\n\n # RFC 7540 Section 9.2.2: \"deployments of HTTP/2 that use TLS 1.2 MUST\n # support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\". In practice, the\n # blacklist defined in this section allows only the AES GCM and ChaCha20\n # cipher suites with ephemeral key negotiation.\n\n\n if self.client_side == False:\n self.ctx.load_cert_chain(certfile=self.server_cert, keyfile=self.server_key)\n self.ctx.load_verify_locations(cafile=self.client_certs) \n else:\n self.ctx.load_cert_chain(certfile=self.client_certs, keyfile=self.client_key)\n self.ctx.load_verify_locations(cafile=self.client_certs) \n pass\n\n\n\n # We want to negotiate using NPN and ALPN. ALPN is mandatory, but NPN may\n # be absent, so allow that. This setup allows for negotiation of HTTP/1.1.\n self.ctx.set_alpn_protocols([\"h2\", \"http/1.1\"])\n\n try:\n self.ctx.set_npn_protocols([\"h2\", \"http/1.1\"])\n except NotImplementedError as e:\n print(\"TLS Error: NotImplementedError=%s\" % (e))\n pass\n\n #self.ctx = ctx\n\n return True",
"def format_tls_string(version):\n version = version.replace(\"TLS\", \"TLS \")\n protocol, number = version.split()\n return f\"{protocol} {int(number) / 10}\"",
"def __init__(self):\n super(BasicAuthenticationSuite, self).__init__()\n self._protocol = ssl.PROTOCOL_TLSv1\n self._ciphers = ':'.join((\n 'AES128-SHA',\n 'DES-CBC3-SHA',\n 'AES256-SHA',\n 'DHE-DSS-DES-CBC3-SHA',\n 'DHE-RSA-DES-CBC3-SHA',\n 'DH-DSS-AES128-SHA',\n 'DH-RSA-AES128-SHA',\n 'DHE-DSS-AES128-SHA',\n 'DHE-RSA-AES128-SHA',\n 'DH-RSA-AES256-SHA',\n 'DHE-DSS-AES256-SHA',\n 'DHE-RSA-AES256-SHA',\n ))",
"def get_tls(self):\n configured_value = self.charm_config[\"enable-tls\"]\n if configured_value:\n return configured_value\n return False",
"def test_tls_client_minimum_set(self):\n config = {\"federation_client_minimum_tls_version\": 1}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1\")\n\n config = {\"federation_client_minimum_tls_version\": 1.1}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1.1\")\n\n config = {\"federation_client_minimum_tls_version\": 1.2}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1.2\")\n\n # Also test a string version\n config = {\"federation_client_minimum_tls_version\": \"1\"}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1\")\n\n config = {\"federation_client_minimum_tls_version\": \"1.2\"}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1.2\")",
"def test_set_options(self):\n context = Context(SSLv23_METHOD)\n options = context.set_options(OP_NO_SSLv2)\n assert options & OP_NO_SSLv2 == OP_NO_SSLv2",
"def validate_tls_min_version(self, node=None, version=\"1.2\", expect=\"fail\"):\n if node is None:\n node = self.cluster.master\n cmd = self.curl_path + \" -v --tlsv\" + version + \" --tls-max \" + version + \\\n \" -u \" + node.rest_username + \":\" + node.rest_password + \\\n \" https://\" + node.ip + \":18091/pools/ -k\"\n shell = RemoteMachineShellConnection(node)\n o, e = shell.execute_command(cmd)\n if expect == \"fail\":\n if len(o) != 0:\n shell.disconnect()\n self.fail(\"Command worked when it should have failed\")\n else:\n if len(o) == 0 or \"pools\" not in o[0]:\n shell.disconnect()\n self.fail(\"Command failed when it should have worked\")\n shell.disconnect()",
"def test_getContext(self):\n contextFactory = crypto.SSLVerifyingContextFactory(self.url)\n self.assertIsInstance(contextFactory.getContext(),\n OpenSSL.SSL.Context)",
"def tls_min_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tls_min_version\")",
"def tls_min_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tls_min_version\")",
"def test_get_tls(matrix):\n matrix.charm_config[\"enable-tls\"] = True\n result = matrix.get_tls()\n assert result is True\n matrix.charm_config[\"enable-tls\"] = False\n result = matrix.get_tls()\n assert result is False",
"def tls_max_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tls_max_version\")"
] | [
"0.7221435",
"0.7129749",
"0.70612466",
"0.6946584",
"0.69393426",
"0.6594127",
"0.65869886",
"0.65678525",
"0.6503111",
"0.6450828",
"0.6446566",
"0.64110285",
"0.63336414",
"0.6328864",
"0.63021713",
"0.62453425",
"0.62098765",
"0.6187503",
"0.6182842",
"0.61263424",
"0.6114017",
"0.6065197",
"0.60540086",
"0.6022161",
"0.6015842",
"0.6011889",
"0.5950615",
"0.5950615",
"0.5926178",
"0.58906674"
] | 0.7200872 | 1 |
The configured TLS version is correctly configured by the ContextFactory. | def test_tls_client_minimum_set_passed_through_1_0(self):
config = {"federation_client_minimum_tls_version": 1}
t = TestConfig()
t.read_config(config, config_dir_path="", data_dir_path="")
cf = FederationPolicyForHTTPS(t)
options = _get_ssl_context_options(cf._verify_ssl_context)
# The context has not had any of the NO_TLS set.
self.assertEqual(options & SSL.OP_NO_TLSv1, 0)
self.assertEqual(options & SSL.OP_NO_TLSv1_1, 0)
self.assertEqual(options & SSL.OP_NO_TLSv1_2, 0) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_tls_client_minimum_set_passed_through_1_2(self):\n config = {\"federation_client_minimum_tls_version\": 1.2}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n\n cf = FederationPolicyForHTTPS(t)\n options = _get_ssl_context_options(cf._verify_ssl_context)\n\n # The context has had NO_TLSv1_1 and NO_TLSv1_0 set, but not NO_TLSv1_2\n self.assertNotEqual(options & SSL.OP_NO_TLSv1, 0)\n self.assertNotEqual(options & SSL.OP_NO_TLSv1_1, 0)\n self.assertEqual(options & SSL.OP_NO_TLSv1_2, 0)",
"def _config_tls(self):\n pass",
"def create_tls_context(TLSSTRENGTH):\n\n #CREATE a CONTEXT that we can then update\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS)\n\n if TLSSTRENGTH == \"tls1_3\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_3)\n\n if TLSSTRENGTH == \"tls1_2\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_2)\n\n elif TLSSTRENGTH == \"tls1_1\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1_1)\n\n elif TLSSTRENGTH == \"tls1\":\n context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLSv1)\n\n else:\n print(\"Valid TLS Protocol Not Found: Needs to be in OpenSSL format: tls_1, tls_1_1 tls_2\")\n return\n\n context.verify_mode = ssl.CERT_REQUIRED\n context.check_hostname = True\n context.load_default_certs()\n print(\"TLS Protocol Specified: {}\".format(TLSSTRENGTH))\n return context",
"def test_tls_client_minimum_1_point_3_missing(self):\n # thanks i hate it\n if hasattr(SSL, \"OP_NO_TLSv1_3\"):\n OP_NO_TLSv1_3 = SSL.OP_NO_TLSv1_3\n delattr(SSL, \"OP_NO_TLSv1_3\")\n self.addCleanup(setattr, SSL, \"SSL.OP_NO_TLSv1_3\", OP_NO_TLSv1_3)\n assert not hasattr(SSL, \"OP_NO_TLSv1_3\")\n\n config = {\"federation_client_minimum_tls_version\": 1.3}\n t = TestConfig()\n with self.assertRaises(ConfigError) as e:\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(\n e.exception.args[0],\n (\n \"federation_client_minimum_tls_version cannot be 1.3, \"\n \"your OpenSSL does not support it\"\n ),\n )",
"def test_tls_client_minimum_1_point_3_exists(self):\n # thanks i hate it, still\n if not hasattr(SSL, \"OP_NO_TLSv1_3\"):\n SSL.OP_NO_TLSv1_3 = 0x00\n self.addCleanup(lambda: delattr(SSL, \"OP_NO_TLSv1_3\"))\n assert hasattr(SSL, \"OP_NO_TLSv1_3\")\n\n config = {\"federation_client_minimum_tls_version\": 1.3}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1.3\")",
"def test_tls_client_minimum_default(self):\n config = {}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n\n self.assertEqual(t.federation_client_minimum_tls_version, \"1\")",
"def _validate_ssl_context_for_tls_in_tls(ssl_context):\n\n if not hasattr(ssl_context, \"wrap_bio\"):\n if six.PY2:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"supported on Python 2\"\n )\n else:\n raise ProxySchemeUnsupported(\n \"TLS in TLS requires SSLContext.wrap_bio() which isn't \"\n \"available on non-native SSLContext\"\n )",
"def test_get_cipher_version_before_connect(self):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n assert conn.get_cipher_version() is None",
"def test_tls_min_version(self):\n self.x509 = x509main(host=self.cluster.master)\n self.x509.generate_multiple_x509_certs(servers=self.cluster.servers)\n for server in self.cluster.servers:\n _ = self.x509.upload_root_certs(server)\n self.x509.upload_node_certs(servers=self.cluster.servers)\n self.x509.delete_unused_out_of_the_box_CAs(self.cluster.master)\n self.x509.upload_client_cert_settings(server=self.cluster.servers[0])\n\n self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])\n\n rest = RestConnection(self.cluster.master)\n status, content = rest.set_min_tls_version(version='tlsv1.3')\n if not status:\n self.fail(\"Setting tls min version to 1.3 failed with content {0}\".format(content))\n\n self.validate_tls_min_version(node=self.cluster.master, version=\"1.2\", expect=\"fail\")\n self.validate_tls_min_version(node=self.cluster.master, version=\"1.3\", expect=\"pass\")\n\n self.disable_n2n_encryption_cli_on_nodes(nodes=self.cluster.servers)\n CbServer.use_https = False\n rest = RestConnection(self.cluster.master)\n status, content = rest.set_min_tls_version(version='tlsv1.2')\n if not status:\n self.fail(\"Setting tls min version to 1.2 failed with content {0}\".format(content))\n self.enable_tls_encryption_cli_on_nodes(nodes=[self.cluster.master])\n self.validate_tls_min_version(node=self.cluster.master, version=\"1.2\", expect=\"pass\")\n\n self.x509 = x509main(host=self.cluster.master)\n self.x509.teardown_certs(servers=self.cluster.servers)",
"def min_tls_version(self):\n return self._data.get('min-tls-version', None)",
"def context():\n return Context(SSLv23_METHOD)",
"def __init__(self):\n super(TLS12AuthenticationSuite, self).__init__()\n self._protocol = ssl.PROTOCOL_TLSv1_2\n self._ciphers = ':'.join((\n 'AES128-SHA256',\n 'AES256-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-DSS-AES128-SHA256',\n 'DH-RSA-AES128-SHA256',\n 'DHE-DSS-AES128-SHA256',\n 'DHE-RSA-AES128-SHA256',\n 'DH-DSS-AES256-SHA256',\n 'DH-RSA-AES256-SHA256',\n 'DHE-DSS-AES256-SHA256',\n 'DHE-RSA-AES256-SHA256',\n 'ECDH-ECDSA-AES128-SHA256',\n 'ECDH-ECDSA-AES256-SHA256',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n 'ECDH-RSA-AES128-SHA256',\n 'ECDH-RSA-AES256-SHA384',\n 'ECDHE-RSA-AES128-SHA256',\n 'ECDHE-RSA-AES256-SHA384',\n 'ECDHE-ECDSA-AES128-GCM-SHA256',\n 'ECDHE-ECDSA-AES256-GCM-SHA384',\n 'ECDHE-ECDSA-AES128-SHA256',\n 'ECDHE-ECDSA-AES256-SHA384',\n ))",
"def getVersionName(self):\r\n if self.version == (3,0):\r\n return \"SSL 3.0\"\r\n elif self.version == (3,1):\r\n return \"TLS 1.0\"\r\n elif self.version == (3,2):\r\n return \"TLS 1.1\"\r\n else:\r\n return None",
"def test_get_context(self):\n context = Context(SSLv23_METHOD)\n connection = Connection(context, None)\n assert connection.get_context() is context",
"def get_tls_factory(self):\n if not access(self.cert_path, R_OK):\n raise RuntimeError('Error: cert file at %s is not '\n 'readable' % self.cert_path)\n if not access(self.key_path, R_OK):\n raise RuntimeError('Error: key file at %s is not '\n 'readable' % self.key_path)\n if not HAVE_PYOPENSSL:\n raise RuntimeError('Error: running with TLS (cert and key) requires'\n ' pyOpenSSL, but it does not appear to be '\n 'installed. Please \"pip install pyOpenSSL\".')\n # check certs are readable\n cf = certificateOptionsFromFiles(self.key_path, self.cert_path)\n return cf",
"def _default_ssl_context() -> ssl.SSLContext:\n ssl_context = ssl.SSLContext(protocol=ssl.PROTOCOL_TLS_CLIENT)\n ssl_context.minimum_version = ssl.TLSVersion.TLSv1_2\n ssl_context.verify_mode = ssl.CERT_REQUIRED\n ssl_context.check_hostname = True\n ssl_context.load_default_certs()\n return ssl_context",
"def _context(use_tls=False):\n if use_tls is False:\n return None\n config = Config()\n ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n ctx.load_cert_chain(config.tls_cert, config.tls_key)\n ctx.options |= ssl.OP_NO_SSLv2\n ctx.options |= ssl.OP_NO_SSLv3\n ctx.options |= ssl.OP_NO_COMPRESSION\n ctx.options |= ssl.OP_CIPHER_SERVER_PREFERENCE\n if not config.args.less_secure:\n ctx.options |= ssl.OP_SINGLE_DH_USE\n ctx.options |= ssl.OP_SINGLE_ECDH_USE\n ctx.set_ciphers(\":\".join(ciphers))\n if config.tls_dhparams:\n ctx.load_dh_params(config.tls_dhparams)\n return ctx",
"def test_set_tlsext_use_srtp_valid(self):\n context = Context(SSLv23_METHOD)\n assert context.set_tlsext_use_srtp(b\"SRTP_AES128_CM_SHA1_80\") is None",
"def __get_http2_ssl_context(self):\n # Get the basic context from the standard library.\n if self.client_side == False:\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)\n self.ctx = ssl._create_unverified_context()\n else:\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=self.server_cert)\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)\n self.ctx = ssl._create_unverified_context()\n\n # RFC 7540 Section 9.2: Implementations of HTTP/2 MUST use TLS version 1.2\n # or higher. Disable TLS 1.1 and lower.\n self.ctx.options |= (\n ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1\n )\n\n # RFC 7540 Section 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable\n # compression.\n self.ctx.options |= ssl.OP_NO_COMPRESSION\n\n # RFC 7540 Section 9.2.2: \"deployments of HTTP/2 that use TLS 1.2 MUST\n # support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\". In practice, the\n # blacklist defined in this section allows only the AES GCM and ChaCha20\n # cipher suites with ephemeral key negotiation.\n\n\n if self.client_side == False:\n self.ctx.load_cert_chain(certfile=self.server_cert, keyfile=self.server_key)\n self.ctx.load_verify_locations(cafile=self.client_certs) \n else:\n self.ctx.load_cert_chain(certfile=self.client_certs, keyfile=self.client_key)\n self.ctx.load_verify_locations(cafile=self.client_certs) \n pass\n\n\n\n # We want to negotiate using NPN and ALPN. ALPN is mandatory, but NPN may\n # be absent, so allow that. This setup allows for negotiation of HTTP/1.1.\n self.ctx.set_alpn_protocols([\"h2\", \"http/1.1\"])\n\n try:\n self.ctx.set_npn_protocols([\"h2\", \"http/1.1\"])\n except NotImplementedError as e:\n print(\"TLS Error: NotImplementedError=%s\" % (e))\n pass\n\n #self.ctx = ctx\n\n return True",
"def format_tls_string(version):\n version = version.replace(\"TLS\", \"TLS \")\n protocol, number = version.split()\n return f\"{protocol} {int(number) / 10}\"",
"def __init__(self):\n super(BasicAuthenticationSuite, self).__init__()\n self._protocol = ssl.PROTOCOL_TLSv1\n self._ciphers = ':'.join((\n 'AES128-SHA',\n 'DES-CBC3-SHA',\n 'AES256-SHA',\n 'DHE-DSS-DES-CBC3-SHA',\n 'DHE-RSA-DES-CBC3-SHA',\n 'DH-DSS-AES128-SHA',\n 'DH-RSA-AES128-SHA',\n 'DHE-DSS-AES128-SHA',\n 'DHE-RSA-AES128-SHA',\n 'DH-RSA-AES256-SHA',\n 'DHE-DSS-AES256-SHA',\n 'DHE-RSA-AES256-SHA',\n ))",
"def get_tls(self):\n configured_value = self.charm_config[\"enable-tls\"]\n if configured_value:\n return configured_value\n return False",
"def test_tls_client_minimum_set(self):\n config = {\"federation_client_minimum_tls_version\": 1}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1\")\n\n config = {\"federation_client_minimum_tls_version\": 1.1}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1.1\")\n\n config = {\"federation_client_minimum_tls_version\": 1.2}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1.2\")\n\n # Also test a string version\n config = {\"federation_client_minimum_tls_version\": \"1\"}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1\")\n\n config = {\"federation_client_minimum_tls_version\": \"1.2\"}\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n self.assertEqual(t.federation_client_minimum_tls_version, \"1.2\")",
"def test_set_options(self):\n context = Context(SSLv23_METHOD)\n options = context.set_options(OP_NO_SSLv2)\n assert options & OP_NO_SSLv2 == OP_NO_SSLv2",
"def validate_tls_min_version(self, node=None, version=\"1.2\", expect=\"fail\"):\n if node is None:\n node = self.cluster.master\n cmd = self.curl_path + \" -v --tlsv\" + version + \" --tls-max \" + version + \\\n \" -u \" + node.rest_username + \":\" + node.rest_password + \\\n \" https://\" + node.ip + \":18091/pools/ -k\"\n shell = RemoteMachineShellConnection(node)\n o, e = shell.execute_command(cmd)\n if expect == \"fail\":\n if len(o) != 0:\n shell.disconnect()\n self.fail(\"Command worked when it should have failed\")\n else:\n if len(o) == 0 or \"pools\" not in o[0]:\n shell.disconnect()\n self.fail(\"Command failed when it should have worked\")\n shell.disconnect()",
"def test_getContext(self):\n contextFactory = crypto.SSLVerifyingContextFactory(self.url)\n self.assertIsInstance(contextFactory.getContext(),\n OpenSSL.SSL.Context)",
"def tls_min_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tls_min_version\")",
"def tls_min_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tls_min_version\")",
"def test_get_tls(matrix):\n matrix.charm_config[\"enable-tls\"] = True\n result = matrix.get_tls()\n assert result is True\n matrix.charm_config[\"enable-tls\"] = False\n result = matrix.get_tls()\n assert result is False",
"def tls_max_version(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"tls_max_version\")"
] | [
"0.7200872",
"0.7129749",
"0.70612466",
"0.6946584",
"0.69393426",
"0.6594127",
"0.65869886",
"0.65678525",
"0.6503111",
"0.6450828",
"0.6446566",
"0.64110285",
"0.63336414",
"0.6328864",
"0.63021713",
"0.62453425",
"0.62098765",
"0.6187503",
"0.6182842",
"0.61263424",
"0.6114017",
"0.6065197",
"0.60540086",
"0.6022161",
"0.6015842",
"0.6011889",
"0.5950615",
"0.5950615",
"0.5926178",
"0.58906674"
] | 0.7221435 | 0 |
Checks acme is enabled if the acme_domain arg is set to some string. | def test_acme_enabled_in_generated_config_domain_provided(self):
conf = TestConfig()
conf.read_config(
yaml.safe_load(
TestConfig().generate_config(
"/config_dir_path",
"my_super_secure_server",
"/data_dir_path",
tls_certificate_path="/tls_cert_path",
tls_private_key_path="tls_private_key",
acme_domain="my_supe_secure_server", # This is the acme_domain
)
),
"/config_dir_path",
)
self.assertTrue(conf.acme_enabled) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def _is_domain_allowed(email):\n domains = local_config.AuthConfig().get('whitelisted_domains', default=[])\n for domain in domains:\n if utils.normalize_email(email).endswith('@%s' % domain.lower()):\n return True\n\n return False",
"def test_acme_disabled_in_generated_config_no_acme_domain_provied(self):\n conf = TestConfig()\n conf.read_config(\n yaml.safe_load(\n TestConfig().generate_config(\n \"/config_dir_path\",\n \"my_super_secure_server\",\n \"/data_dir_path\",\n tls_certificate_path=\"/tls_cert_path\",\n tls_private_key_path=\"tls_private_key\",\n acme_domain=None, # This is the acme_domain\n )\n ),\n \"/config_dir_path\",\n )\n\n self.assertFalse(conf.acme_enabled)",
"def _supports_domain(cls, domain):\n return domain in (ZZ, QQ)",
"def _check_audience(self, request, audience):\n if not self.audiences:\n return audience == request.host_url\n for audience_pattern in self._audience_patterns:\n if audience_pattern.match(audience):\n return True\n return False",
"def is_active_domain(self, domain=\"www.google.com\", name_server='1.1.1.1'):\n my_resolver = dns.resolver.Resolver()\n my_resolver.nameservers = [name_server]\n my_resolver.timeout = 3\n my_resolver.lifetime = 3\n try:\n A = my_resolver.query(domain, 'A')\n for i in A.response.answer:\n for j in i.items:\n return self.is_actual_ip(str(j))\n except Exception as e:\n return None",
"def common_contact_authorization(dialersetting, str_contact):\n whitelist = dialersetting.whitelist\n blacklist = dialersetting.blacklist\n\n if whitelist == '*':\n whitelist = ''\n if blacklist == '*':\n blacklist = ''\n\n if whitelist and len(whitelist) > 0:\n try:\n result = re.search(whitelist, str_contact)\n if result:\n return True\n except ValueError:\n logger.error('Error to identify the whitelist')\n\n if blacklist and len(blacklist) > 0:\n try:\n result = re.search(blacklist, str_contact)\n if result:\n return False\n except ValueError:\n logger.error('Error to identify the blacklist')\n\n return True",
"def test_tenant_marketing_domain_should_be_accessible(self):\n response = self.client.get(\n self.home_url, HTTP_HOST=self.marketing_page.domain)\n self.assertEqual(response.status_code, 200)",
"def test_check_org_short_on_whitelist_true(self):\n\n org_name = 'AS36351'\n\n result = check_org_on_whitelist(org_name, self.pattern_org)\n\n self.assertTrue(result)",
"def test_check_org_on_whitelist_true(self):\n\n org_name = 'AS24940 Hetzner Online GmbH'\n\n result = check_org_on_whitelist(org_name, self.pattern_org)\n\n self.assertTrue(result)",
"def acme_challenge(self, domain):\n return self.network.send_and_receive_expected(\n messages.ChallengeRequest(identifier=domain),\n messages.Challenge)",
"def check_optin(address):\n account_info = ALGODCLIENT.account_info(address)\n try:\n account_info['assets'][str(ASSET_ID)]['amount']\n except KeyError:\n return False\n return True",
"async def validate_account(self) -> bool:\n raise NotImplementedError",
"def is_academia(organization: str):\n\n # Current list of keywords associated with academia\n keywords = [\"university\", \"college\", \"academy\"]\n\n # Check if any of the keywords can be found in the given organization\n # Case set to lower to minimize issues with formatting\n # https://stackoverflow.com/questions/8122079/python-how-to-check-a-string-for-substrings-from-a-list\n tf = any(map(organization.lower().__contains__, keywords))\n\n # Return true or false\n return tf",
"def indomain(url, domain):\n if url and domain:\n return url.startswith(domain)\n return False",
"def allowed_organization_access_use(user, org):\n try:\n up = user.get_profile()\n except AttributeError:\n return False\n\n if user.has_perm(\"vnswww.organization_use_any\"):\n return True\n elif user.has_perm(\"vnswww.organization_use_org\"):\n return org == up.org",
"def is_domain(value):\n result = any(check.isalpha() for check in value)\n return result",
"def validate_ecl(ecl: str) -> bool:\n return ecl in [\"amb\", \"blu\", \"brn\", \"gry\", \"grn\", \"hzl\", \"oth\"]",
"def valid_mx(self, domain):\n try:\n self.nslookup_installed()\n except:\n return True # Valid email as we cant check with nslookup\n\n p = subprocess.Popen(['nslookup', '-query=mx', domain], stdout=subprocess.PIPE,\n stderr=subprocess.PIPE)\n out, err = p.communicate()\n\n try:\n return bool(re.search('mail exchanger', out))\n except:\n # raise Exception(\"Exception in DNS lookup!\" + err)\n return False",
"def is_gcloud_auth_set():\n try:\n # This returns an email address of currently active account or empty string\n # if no account is active.\n output = subprocess.check_output([\n find_gcloud(), 'auth', 'list',\n '--filter=status:ACTIVE', '--format=value(account)',\n ])\n return bool(output.strip())\n except subprocess.CalledProcessError as exc:\n logging.error('Failed to check active gcloud account: %s', exc)\n return False",
"def test_tenant_domain_should_be_accessible(self):\n response = self.client.get(self.home_url, HTTP_HOST=self.site.domain)\n self.assertEqual(response.status_code, 200)",
"def contains_domain(address, domain):\n domain = r'[\\w\\.-]+@'+domain+'$'\n if re.match(domain,address):\n return True\n return False",
"def test_tenant_external_domain_should_be_accessible(self):\n response = self.client.get(self.home_url, HTTP_HOST=self.domain.domain)\n self.assertEqual(response.status_code, 200)",
"def check_anon_engine(self):\n if (self.config['anonymization'] and self.config['anonymization']['enabled']):\n # Anonymization enabled\n # Ensure that RSNA DICOM Anonymizer is found\n if not os.path.isfile('./DicomAnonymizerTool/DAT.jar'):\n questions = [\n inquirer.List('anon_files',\n message=\"RSNA DICOM Anonymizer JAR file not found. Do you still want to proceed?\",\n choices=['Continue without anonymization', 'Exit'],\n ),\n ]\n answers = inquirer.prompt(questions)\n if answers['anon_files'] == 'Exit':\n sys.exit()\n else:\n print('Anonymization DISABLED')\n return False\n\n # Ensure that anonymization scripts are found\n anon_script = self.config['anonymization']['script']\n anon_lut = self.config['anonymization']['lookup_table']\n if not os.path.isfile(anon_script):\n questions = [\n inquirer.List('anon_files',\n message=\"Anonymization script not found. Do you still want to proceed?\",\n choices=['Continue without anonymization', 'Exit'],\n ),\n ]\n answers = inquirer.prompt(questions)\n if answers['anon_files'] == 'Exit':\n sys.exit()\n else:\n print('Anonymization DISABLED')\n return False\n \n if not os.path.isfile(anon_lut):\n questions = [\n inquirer.List('anon_files',\n message=\"Anonymization look up table not found. Do you still want to proceed?\",\n choices=['Continue without look up table', 'Exit'],\n ),\n ]\n answers = inquirer.prompt(questions)\n if answers['anon_files'] == 'Exit':\n sys.exit()\n\n print('Anonymization ENABLED') \n return True\n else:\n print('Anonymization DISABLED')\n return False",
"def get_acme_account_data(self, acme_id, domains=None):\n valid_response = self.get_valid_response_entry(acme_id).copy()\n valid_response.setdefault(\"domains\", domains or [])\n return valid_response",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"def cog_check(self, ctx):\r\n return ctx.author.guild_permissions.administrator",
"def should_collapse_account(account_name):\n key = 'fava-collapse-account'\n if key in g.ledger.account_metadata(account_name):\n return g.ledger.account_metadata(account_name)[key] == 'True'\n else:\n return False",
"def has_academy_access(self, acad_id): \n\n master_access = (PermissionGroups.query\n .filter_by(group_name=\"Master\")\n .first())\n if self.has_auth_access(master_access):\n return True\n academy = Academy.query.filter_by(id=acad_id).first()\n \n if academy.id == self.academy_id:\n return True\n else:\n return False",
"def is_valid_cname(common_name: str) -> bool:\n return True if Band.band_range(common_name) else False",
"def auth_allowed(self, response, details):\n emails = self.setting('WHITELISTED_EMAILS', [])\n domains = self.setting('WHITELISTED_DOMAINS', [])\n teams = self.setting('WHITELISTED_TEAM_NAMES', [])\n team = details.get('team_name')\n email = details.get('email')\n allowed = True\n if email and (emails or domains):\n domain = email.split('@', 1)[1]\n allowed = email in emails or domain in domains\n if allowed and team and teams:\n allowed = team in teams\n return allowed"
] | [
"0.5735511",
"0.5725383",
"0.553573",
"0.5405438",
"0.54000163",
"0.52613115",
"0.5258438",
"0.5212149",
"0.5205378",
"0.51893073",
"0.5159638",
"0.5120427",
"0.5111011",
"0.50925523",
"0.5068178",
"0.5066398",
"0.5062281",
"0.50507987",
"0.49955666",
"0.49874535",
"0.49624866",
"0.4958339",
"0.4956199",
"0.49545935",
"0.4952141",
"0.4952141",
"0.49460858",
"0.4943691",
"0.4925718",
"0.4901614"
] | 0.65662074 | 0 |
The federation certificate whitelist will not allow IDNA domain names. | def test_whitelist_idna_failure(self):
config = {
"federation_certificate_verification_whitelist": [
"example.com",
"*.ドメイン.テスト",
]
}
t = TestConfig()
e = self.assertRaises(
ConfigError, t.read_config, config, config_dir_path="", data_dir_path=""
)
self.assertIn("IDNA domain names", str(e)) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_whitelist_idna_result(self):\n config = {\n \"federation_certificate_verification_whitelist\": [\n \"example.com\",\n \"*.xn--eckwd4c7c.xn--zckzah\",\n ]\n }\n t = TestConfig()\n t.read_config(config, config_dir_path=\"\", data_dir_path=\"\")\n\n cf = FederationPolicyForHTTPS(t)\n\n # Not in the whitelist\n opts = cf.get_options(b\"notexample.com\")\n self.assertTrue(opts._verifier._verify_certs)\n\n # Caught by the wildcard\n opts = cf.get_options(idna.encode(\"テスト.ドメイン.テスト\"))\n self.assertFalse(opts._verifier._verify_certs)",
"def get_domain_whitelist(self):\n whitelist = self.charm_config[\"federation-domain-whitelist\"]\n return list(filter(None, whitelist.split(\",\")))",
"def list_domain_names():\n pass",
"def validate_whitelist(self):\n\n for addr in self.whitelist:\n try:\n socket.gethostbyname_ex(addr)\n except:\n raise APIException(\n \"'{}' does not look like an ip or domain name\"\n .format(addr), 400)",
"def whitelist_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"whitelist_domains\")",
"def whitelist_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"whitelist_domains\")",
"def test_cn_ids_are_used_as_fallback(self):\n with pytest.warns(SubjectAltNameWarning):\n rv = extract_ids(X509_CN_ONLY)\n assert [\n DNSPattern(b\"www.microsoft.com\")\n ] == rv",
"def whitelist_domains(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"whitelist_domains\")",
"def _is_domain_allowed(email):\n domains = local_config.AuthConfig().get('whitelisted_domains', default=[])\n for domain in domains:\n if utils.normalize_email(email).endswith('@%s' % domain.lower()):\n return True\n\n return False",
"def list_domain_names(self) -> Dict:\n pass",
"def get_disable_public_fqdn(self) -> bool:\n return self._get_disable_public_fqdn(enable_validation=True)",
"def detect_domains (nffg):\n return {infra.domain for infra in nffg.infras}",
"def test_dns(self):\n rv = extract_ids(X509_DNS_ONLY)\n assert [\n DNSPattern(b\"www.twistedmatrix.com\"),\n DNSPattern(b\"twistedmatrix.com\")\n ] == rv",
"def allow_bare_domains(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_bare_domains\")",
"def allow_bare_domains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_bare_domains\")",
"def allow_bare_domains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_bare_domains\")",
"def enforce_hostnames(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enforce_hostnames\")",
"def enforce_hostnames(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"enforce_hostnames\")",
"def filter_domain(name):\n def wrapped(request):\n \"\"\" Function used to filter request\n \"\"\"\n if request.environ.get('HTTP_HOST'):\n url = request.environ['HTTP_HOST']\n else:\n url = request.environ['SERVER_NAME']\n if url.lower() == name.lower():\n return True\n\n return False\n return wrapped",
"def allow_unresolved_secret_tokens(self):\n return self._allow_unresolved_secret_tokens",
"def is_fqdn(address):\n\n return bool(re.match(re_fqdn, address))",
"def allowed_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"allowed_domains\")",
"def allowed_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"allowed_domains\")",
"def set_domain(self) -> None:\n self._fanfic.domain = \"Ficwad.com\"",
"def test_add_certificate_name_restrictions(self):\n response = self.client.post(\n '/api/v1/certificates',\n data=json.dumps(certificate_name_restrictions),\n content_type='application/json',\n headers=self.get_registrar_token())\n result = json.loads(response.data.decode())\n self.assertEqual(result['message'],\n 'Invalid certificate name')\n assert response.status_code == 400",
"def _ensure_fqdn(self, name):\n if name[-1:] != \".\":\n return \"%s.\" % name\n else:\n return name",
"def test_external_dns_policy(self) -> None:\n kubernetes_policies = self.iam.list_policies(PathPrefix='/kubernetes/').get('Policies')\n external_dns_policies = [policy for policy in kubernetes_policies if policy.get('PolicyName') == 'external-dns']\n self.assertEqual(1, len(external_dns_policies))",
"def _remove_common_hosts(fqdn):\n fqdn_parts = fqdn.split(\".\", 1)\n common_hosts = [\"*\", \"www\", \"mail\", \"cpanel\", \"webmail\",\n \"webdisk\", \"autodiscover\"]\n\n if len(fqdn_parts) > 1:\n if fqdn_parts[0] in common_hosts:\n return fqdn_parts[1]\n\n return fqdn",
"def chkfqdn(fqdn):\n if fqdn is None:\n return False\n hp = hostportion(fqdn)\n # not needed right now: pp = portportion(fqdn)\n # TODO need to augment this for IPv6 addresses\n return re.match('^[a-zA-Z0-9_-]+(\\\\.[a-zA-Z0-9_-]+)+$', hp) is not None",
"def relevant_domains(self):\n pass"
] | [
"0.75625795",
"0.617024",
"0.58902013",
"0.56733066",
"0.56661296",
"0.56661296",
"0.5613816",
"0.5505714",
"0.5429969",
"0.52565783",
"0.52504295",
"0.52081794",
"0.5184741",
"0.5165055",
"0.515711",
"0.515711",
"0.5116744",
"0.5116744",
"0.50894976",
"0.5084205",
"0.5051419",
"0.50486374",
"0.50486374",
"0.5041954",
"0.50363845",
"0.5012268",
"0.5011197",
"0.49993426",
"0.49982816",
"0.49812806"
] | 0.6794298 | 1 |
The federation certificate whitelist will match on IDNA encoded names. | def test_whitelist_idna_result(self):
config = {
"federation_certificate_verification_whitelist": [
"example.com",
"*.xn--eckwd4c7c.xn--zckzah",
]
}
t = TestConfig()
t.read_config(config, config_dir_path="", data_dir_path="")
cf = FederationPolicyForHTTPS(t)
# Not in the whitelist
opts = cf.get_options(b"notexample.com")
self.assertTrue(opts._verifier._verify_certs)
# Caught by the wildcard
opts = cf.get_options(idna.encode("テスト.ドメイン.テスト"))
self.assertFalse(opts._verifier._verify_certs) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_whitelist_idna_failure(self):\n config = {\n \"federation_certificate_verification_whitelist\": [\n \"example.com\",\n \"*.ドメイン.テスト\",\n ]\n }\n t = TestConfig()\n e = self.assertRaises(\n ConfigError, t.read_config, config, config_dir_path=\"\", data_dir_path=\"\"\n )\n self.assertIn(\"IDNA domain names\", str(e))",
"def test_cn_ids_are_used_as_fallback(self):\n with pytest.warns(SubjectAltNameWarning):\n rv = extract_ids(X509_CN_ONLY)\n assert [\n DNSPattern(b\"www.microsoft.com\")\n ] == rv",
"def get_domain_whitelist(self):\n whitelist = self.charm_config[\"federation-domain-whitelist\"]\n return list(filter(None, whitelist.split(\",\")))",
"def test_add_certificate_name_restrictions(self):\n response = self.client.post(\n '/api/v1/certificates',\n data=json.dumps(certificate_name_restrictions),\n content_type='application/json',\n headers=self.get_registrar_token())\n result = json.loads(response.data.decode())\n self.assertEqual(result['message'],\n 'Invalid certificate name')\n assert response.status_code == 400",
"def validate_whitelist(self):\n\n for addr in self.whitelist:\n try:\n socket.gethostbyname_ex(addr)\n except:\n raise APIException(\n \"'{}' does not look like an ip or domain name\"\n .format(addr), 400)",
"def is_valid_name(self):\n\n if self.whitelist_name == '':\n return True\n\n if len(self.whitelist_name) >= 64:\n LOGGER.debug('invalid name %s; must be less than 64 bytes',\n self.whitelist_name)\n return False\n\n return True",
"def test_legal_names(self):\n names = [prod.name for prod in generate_products()]\n sep = [(name.split()[0], name.split()[1]) for name in names]\n for name in sep:\n self.assertIn(name[0], ADJS)\n self.assertIn(name[1], NOUNS)",
"def whitelist_file(self, fkey):\n self.whitelist.update([fkey])",
"def whitelist_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"whitelist_domains\")",
"def whitelist_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"whitelist_domains\")",
"def encode_san_dns_names(self, san):\n dns_names = []\n for dns_name in san:\n dns_names.append(x509.DNSName(dns_name))\n return dns_names",
"def test_mixed_verfication(self):\n\n lists_map = ima.process_allowlists(ALLOWLIST, '')\n lists_map_wrong = ima.process_allowlists(ALLOWLIST_WRONG, '')\n lists_map_empty = ima.process_allowlists(ALLOWLIST_EMPTY, '')\n lists_map_exclude = ima.process_allowlists(ALLOWLIST, EXCLUDELIST)\n lists_map_exclude_wrong = ima.process_allowlists(ALLOWLIST_WRONG, EXCLUDELIST)\n empty_keyring = ima_file_signatures.ImaKeyring()\n\n # every entry is covered by the allowlist and there's no keyring -> this should pass\n self.assertTrue(ima.process_measurement_list(COMBINED.splitlines(), str(lists_map)) is not None)\n\n curdir = os.path.dirname(os.path.abspath(__file__))\n keydir = os.path.join(curdir, \"data\", \"ima_keys\")\n keyring = ima_file_signatures.ImaKeyring()\n\n rsakeyfile = os.path.join(keydir, \"rsa2048pub.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(rsakeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n\n eckeyfile = os.path.join(keydir, \"secp256k1.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(eckeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n\n # entries are not covered by a exclude list -> this should fail\n self.assertTrue(ima.process_measurement_list(COMBINED.splitlines(), ima_keyring=keyring) is None)\n\n # all entries are either covered by allow list or by signature verification -> this should pass\n self.assertTrue(ima.process_measurement_list(COMBINED.splitlines(), str(lists_map), ima_keyring=keyring) is not None)\n\n # the signature is valid but the hash in the allowlist is wrong -> this should fail\n self.assertTrue(ima.process_measurement_list(SIGNATURES.splitlines(), str(lists_map_wrong), ima_keyring=keyring) is None)\n\n # the signature is valid and the file is not in the allowlist -> this should pass\n self.assertTrue(ima.process_measurement_list(SIGNATURES.splitlines(), str(lists_map_empty), ima_keyring=keyring) is not None)\n\n # the signature is invalid but the correct hash is in the allowlist -> this should fail\n self.assertTrue(ima.process_measurement_list(SIGNATURES.splitlines(), str(lists_map), ima_keyring=empty_keyring) is None)\n\n # the file has no signature but the hash is correct -> this should pass\n self.assertTrue(ima.process_measurement_list(MEASUREMENTS.splitlines(), str(lists_map)))\n\n # All files are in the exclude list but hashes are invalid -> this should pass\n self.assertTrue(ima.process_measurement_list(MEASUREMENTS.splitlines(), str(lists_map_exclude_wrong)) is not None)\n\n # All files are in the exclude list and their signatures are invalid -> this should pass\n self.assertTrue(ima.process_measurement_list(SIGNATURES.splitlines(), str(lists_map_exclude), ima_keyring=empty_keyring) is not None)\n\n # All files are in the exclude list but hashes or signatures are invalid -> this should pass\n self.assertTrue(ima.process_measurement_list(MEASUREMENTS.splitlines(), str(lists_map_exclude_wrong), ima_keyring=empty_keyring) is not None)",
"def is_whitelisted(self, fkey):\n return fkey in self.whitelist",
"def whitelist_domains(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"whitelist_domains\")",
"def test_dns(self):\n rv = extract_ids(X509_DNS_ONLY)\n assert [\n DNSPattern(b\"www.twistedmatrix.com\"),\n DNSPattern(b\"twistedmatrix.com\")\n ] == rv",
"def short_whitelist(whitelist):\n for x in [\"guid-4\", \"guid-5\"]:\n whitelist.remove(x)\n return whitelist",
"def allow(self, words):\n\t\tallowed = [word for word in words if re.match('^[A-Za-z0-9\\.\\,\\:\\;\\!\\?\\(\\)\\'\\-\\$\\@\\%\\\"]+$', word) is not None]\t\t\n\t\treturn allowed",
"def whitelist_ips(self):\r\n if self.whitelist == '':\r\n return []\r\n return self.IPFilterList([addr.strip() for addr in self.whitelist.split(',')]) # pylint: disable=no-member\r",
"def allow_wildcard_certificates(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_wildcard_certificates\")",
"def allow_wildcard_certificates(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_wildcard_certificates\")",
"def detect_domains (nffg):\n return {infra.domain for infra in nffg.infras}",
"def allow_wildcard_certificates(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_wildcard_certificates\")",
"def _validate_namespace_whitelist(namespace_whitelist):\n if namespace_whitelist is None:\n return []\n if not isinstance(namespace_whitelist, list):\n raise TypeError(\"Namespace whitelist must be a list of strings.\")\n\n processed = []\n for namespace in namespace_whitelist:\n if not isinstance(namespace, six.string_types):\n raise ValueError(\"Whitelisted namespace must be a string. Got: {} of type\"\n \" {}.\".format(namespace, type(namespace)))\n processed.append(compat.as_str(namespace))\n return processed",
"def read_whitelist(whitelist_file):\n\n # create an empty dictionary to store the white lists\n whitelistdict = {}\n with open(whitelist_file, 'r') as fp:\n for line in fp:\n whitelistdict[line.strip()] = True\n\n # return the list of whitelist\n return whitelistdict.keys()",
"def ignore_listings(name_key):\n # for blacklist_str in models_blacklist:\n # if blacklist_str in name_key:\n # return True\n return False",
"def allowed(cls):\n # type: () -> List[Str]\n names = cls.names()\n allowed = names\n allowed.extend([name.lower() for name in names])\n return allowed",
"def __compare_lowercase(self, dn, dn_list):\n\t\tfor d in dn_list:\n\t\t\tif dn.lower() == d.lower():\n\t\t\t\treturn True\n\t\treturn False",
"def FullTrustAssemblies(self) -> _n_1_t_2:",
"def load_name(self, name_att_list):\n attlist = []\n got = set()\n for k, v in name_att_list:\n if k in got and k not in DN_ALLOW_MULTIPLE:\n raise InvalidCertificate(\"Multiple Name keys not allowed: %s\" % (k,))\n oid = DN_CODE_TO_OID[k]\n n = x509.NameAttribute(oid, as_unicode(v))\n attlist.append(n)\n return x509.Name(attlist)",
"def allow_unresolved_secret_tokens(self):\n return self._allow_unresolved_secret_tokens"
] | [
"0.6425439",
"0.5514384",
"0.5432561",
"0.52561045",
"0.5184046",
"0.49741006",
"0.4893893",
"0.48836952",
"0.48803678",
"0.48803678",
"0.48693806",
"0.4830566",
"0.4822639",
"0.48119327",
"0.48084122",
"0.4804135",
"0.4793144",
"0.47902942",
"0.47870547",
"0.47870547",
"0.47820476",
"0.4768539",
"0.4761599",
"0.4744953",
"0.47338533",
"0.47171032",
"0.47055587",
"0.4699709",
"0.46956438",
"0.46670696"
] | 0.7812035 | 0 |
get the options bits from an openssl context object | def _get_ssl_context_options(ssl_context: SSL.Context) -> int:
# the OpenSSL.SSL.Context wrapper doesn't expose get_options, so we have to
# use the low-level interface
return SSL._lib.SSL_CTX_get_options(ssl_context._context) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def ssl_options_to_context(ssl_options):\n ...",
"def test_set_options(self):\n context = Context(SSLv23_METHOD)\n options = context.set_options(OP_NO_SSLv2)\n assert options & OP_NO_SSLv2 == OP_NO_SSLv2",
"def _context(use_tls=False):\n if use_tls is False:\n return None\n config = Config()\n ctx = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)\n ctx.load_cert_chain(config.tls_cert, config.tls_key)\n ctx.options |= ssl.OP_NO_SSLv2\n ctx.options |= ssl.OP_NO_SSLv3\n ctx.options |= ssl.OP_NO_COMPRESSION\n ctx.options |= ssl.OP_CIPHER_SERVER_PREFERENCE\n if not config.args.less_secure:\n ctx.options |= ssl.OP_SINGLE_DH_USE\n ctx.options |= ssl.OP_SINGLE_ECDH_USE\n ctx.set_ciphers(\":\".join(ciphers))\n if config.tls_dhparams:\n ctx.load_dh_params(config.tls_dhparams)\n return ctx",
"def context():\n return Context(SSLv23_METHOD)",
"def _get_options(ret):\n attrs = {\"host\": \"host\", \"port\": \"port\", \"skip\": \"skip_on_error\", \"mode\": \"mode\"}\n\n _options = salt.returners.get_returner_options(\n __virtualname__, ret, attrs, __salt__=__salt__, __opts__=__opts__\n )\n return _options",
"def _get_options(self):\n return self.options",
"def get_options(self):\n\t\treturn self.options",
"def __call__(self):\n ssl_mode, external_ca = ssl_utils.get_ssl_mode()\n\n ctxt = {\n 'ssl_mode': ssl_mode,\n }\n\n if ssl_mode == 'off':\n close_port(config('ssl_port'))\n ssl_utils.reconfigure_client_ssl()\n return ctxt\n\n ssl_key = convert_from_base64(config('ssl_key'))\n ssl_cert = convert_from_base64(config('ssl_cert'))\n ssl_ca = convert_from_base64(config('ssl_ca'))\n ssl_port = config('ssl_port')\n\n # If external managed certs then we need all the fields.\n if (ssl_mode in ('on', 'only') and any((ssl_key, ssl_cert)) and\n not all((ssl_key, ssl_cert))):\n log('If ssl_key or ssl_cert are specified both are required.',\n level=ERROR)\n sys.exit(1)\n\n if not external_ca:\n ssl_cert, ssl_key, ssl_ca = ServiceCA.get_service_cert()\n\n ctxt.update(self.enable_ssl(\n ssl_key, ssl_cert, ssl_port, ssl_ca,\n ssl_only=(ssl_mode == \"only\"), ssl_client=False\n ))\n\n ssl_utils.reconfigure_client_ssl(True)\n open_port(ssl_port)\n\n return ctxt",
"async def get_options(self):",
"def __get_http2_ssl_context(self):\n # Get the basic context from the standard library.\n if self.client_side == False:\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.CLIENT_AUTH)\n self.ctx = ssl._create_unverified_context()\n else:\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH, cafile=self.server_cert)\n #self.ctx = ssl.create_default_context(purpose=ssl.Purpose.SERVER_AUTH)\n self.ctx = ssl._create_unverified_context()\n\n # RFC 7540 Section 9.2: Implementations of HTTP/2 MUST use TLS version 1.2\n # or higher. Disable TLS 1.1 and lower.\n self.ctx.options |= (\n ssl.OP_NO_SSLv2 | ssl.OP_NO_SSLv3 | ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1\n )\n\n # RFC 7540 Section 9.2.1: A deployment of HTTP/2 over TLS 1.2 MUST disable\n # compression.\n self.ctx.options |= ssl.OP_NO_COMPRESSION\n\n # RFC 7540 Section 9.2.2: \"deployments of HTTP/2 that use TLS 1.2 MUST\n # support TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256\". In practice, the\n # blacklist defined in this section allows only the AES GCM and ChaCha20\n # cipher suites with ephemeral key negotiation.\n\n\n if self.client_side == False:\n self.ctx.load_cert_chain(certfile=self.server_cert, keyfile=self.server_key)\n self.ctx.load_verify_locations(cafile=self.client_certs) \n else:\n self.ctx.load_cert_chain(certfile=self.client_certs, keyfile=self.client_key)\n self.ctx.load_verify_locations(cafile=self.client_certs) \n pass\n\n\n\n # We want to negotiate using NPN and ALPN. ALPN is mandatory, but NPN may\n # be absent, so allow that. This setup allows for negotiation of HTTP/1.1.\n self.ctx.set_alpn_protocols([\"h2\", \"http/1.1\"])\n\n try:\n self.ctx.set_npn_protocols([\"h2\", \"http/1.1\"])\n except NotImplementedError as e:\n print(\"TLS Error: NotImplementedError=%s\" % (e))\n pass\n\n #self.ctx = ctx\n\n return True",
"def getContext(self):\n ctx = SSL.Context(SSL.SSLv23_METHOD)\n ctx.use_certificate_file(Settings.BaseDir+'/server.pem')\n ctx.use_privatekey_file(Settings.BaseDir+'/privkey.pem')\n return ctx",
"def options(self):\r\n return self._options",
"def test_get_context(self):\n context = Context(SSLv23_METHOD)\n connection = Connection(context, None)\n assert connection.get_context() is context",
"def options(self):\n return self.__options",
"def connectionoptions(self, tokens):\n\n return self.process_value_pairs(tokens, \"connectionoptions\")",
"def getContext(self):\n ctx = SSL.Context(SSL.SSLv3_METHOD)\n ctx.use_certificate_file(config.client.certificate)\n ctx.use_privatekey_file(config.client.private_key)\n return ctx",
"def get_options(cls, player, context={}):\n\t\traise NotImplementedError()",
"def Options(self) -> SocketInformationOptions:",
"def _get_ssl_context(self):\n context = ssl.SSLContext(self.TLS_VERSION)\n context.load_cert_chain(self.ssl_cert, self.ssl_key)\n return context",
"def getclsoptions(cls, tmpcls, session=None):\n if session is not None:\n cls.setclsoptions(tmpcls, session)\n return optionsdict[tmpcls]['OPTIONS']",
"def get_options(self):\n return self._scoped_options",
"def _parser_options():\n #We have two options: get some of the details from the config file,\n import argparse\n import sys\n from matdb import base\n pdescr = \"MATDB Context Finder\"\n parser = argparse.ArgumentParser(parents=[base.bparser], description=pdescr)\n for arg, options in _script_options.items():\n parser.add_argument(arg, **options)\n \n args = base.exhandler(examples, parser)\n if args is None:\n return\n\n return args",
"def get_options():\n\n global args\n\n options = parser.add_argument_group(\"flags\")\n options.add_argument(\n \"-t\",\n \"--hash-type\",\n help=\"type of hash from the following: lm, ntlm, md4, md5, sha1, sha256, sha512\",\n metavar=\"\",\n required=True,\n choices=[\"lm\", \"ntlm\", \"md4\", \"md5\", \"sha1\", \"sha256\", \"sha512\"],\n )\n options.add_argument(\n \"-w\",\n \"--wordlist\",\n help=\"file path to wordlist\",\n metavar=\"\",\n type=argparse.FileType(\"r\"),\n required=True,\n )\n\n hash_group = options.add_mutually_exclusive_group(required=True)\n hash_group.add_argument(\n \"-s\", \"--hash-string\", help=\"hash string to crack\", metavar=\"\"\n )\n hash_group.add_argument(\n \"-l\",\n \"--hash-list\",\n help=\"file path to the list of hashes\",\n metavar=\"\",\n type=argparse.FileType(\"r\"),\n )\n\n args = parser.parse_args()",
"def get_options(self):\r\n return self._option_values",
"def svn_client_ctx_t_config_get(svn_client_ctx_t_self): # real signature unknown; restored from __doc__\n pass",
"def SoapOptions(self) -> SoapOption:",
"def _options(self):\n return",
"async def get_options(self) -> List[Tuple[str, str]]:\n options = [\n (\"TRUE\", \"true\"),\n (\"FALSE\", \"false\"),\n ]\n if self.context.get(\"null\"):\n options.insert(0, (\"\", \"\"))\n\n return options",
"def _get_options(self, *, cache_key: str) -> Union[PublicKeyCredentialRequestOptions, None]:\n options: str = self.redis.retrieve(key=cache_key)\n if options is None:\n return options\n\n # We can't use PublicKeyCredentialRequestOptions.parse_raw() because\n # json_loads_base64url_to_bytes() doesn't know to convert these few values to bytes, so we\n # have to do it manually\n options_json: dict = json_loads_base64url_to_bytes(options)\n options_json[\"challenge\"] = base64url_to_bytes(options_json[\"challenge\"])\n options_json[\"allowCredentials\"] = [\n {**cred, \"id\": base64url_to_bytes(cred[\"id\"])}\n for cred in options_json[\"allowCredentials\"]\n ]\n\n return PublicKeyCredentialRequestOptions.parse_obj(options_json)",
"def get_cipher_bits(sock):\n cipher = binding_lib.SSL_get_current_cipher(sock._ssl)\n if cipher == binding_ffi.NULL:\n return None\n\n return binding_lib.SSL_CIPHER_get_bits(cipher, binding_ffi.NULL)"
] | [
"0.73579234",
"0.64249",
"0.62168473",
"0.61505395",
"0.605283",
"0.6007271",
"0.5896078",
"0.58376265",
"0.5828241",
"0.58178514",
"0.58104944",
"0.5766421",
"0.57381654",
"0.5734007",
"0.5730924",
"0.5724922",
"0.57243115",
"0.57175404",
"0.5710651",
"0.5698366",
"0.56677544",
"0.56409144",
"0.560798",
"0.5502821",
"0.5482279",
"0.5474451",
"0.54672503",
"0.54615426",
"0.5457017",
"0.5439185"
] | 0.75272393 | 0 |
Extracts latitude and longitude from geotags. | def get_coordinates(geotags) -> Tuple[float, float]:
lat = get_decimal_from_dms(
geotags['GPSLatitude'],
geotags['GPSLatitudeRef'],
)
lon = get_decimal_from_dms(
geotags['GPSLongitude'],
geotags['GPSLongitudeRef'],
)
return lat, lon | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_geotag(post_content):\n post_map = post_content.find(\"div\", {\"id\": \"map\"})\n geotag_attr = {\"lat\": \"\", \"lon\": \"\"}\n if post_map is not None:\n geotag_attr[\"lat\"] = post_map.attrs[\"data-latitude\"]\n geotag_attr[\"lon\"] = post_map.attrs[\"data-longitude\"]\n return geotag_attr",
"def geog(self) -> typing.Union[None, typing.Tuple[float, float]]:\n geog = self.data[4]\n geog = re.findall(r'(\\d{2})(\\d{2})(\\d{2}\\.\\d+)([NS]) (\\d{3})(\\d{2})(\\d{2}\\.\\d+)([EW])', geog)\n\n if geog:\n lat_deg, lat_min, lat_sec, lat_dir, long_deg, long_min, long_sec, long_dir = geog[0]\n\n lat = Point.parse_degrees(lat_deg, lat_min, lat_sec, direction=lat_dir)\n long = Point.parse_degrees(long_deg, long_min, long_sec, direction=long_dir)\n return lat, long\n return None",
"def get_gps_data(i):\n info = i._getexif()\n exif_data={}\n lat = np.nan\n lon = np.nan\n alt = np.nan\n for tag, value in info.items():\n decoded = TAGS.get(tag, tag)\n if decoded == \"GPSInfo\":\n gps_data = {}\n for t in value:\n sub_decoded = GPSTAGS.get(t, t)\n gps_data[sub_decoded] = value[t]\n\n lat,lon,alt=latlon_from_gps_data(gps_data)\n\n return lat,lon,alt",
"def metadataGeoTags(tif: TiffFile):\n geoTag: TiffTag = tif.pages[0].tags.get('GeoKeyDirectoryTag')\n if geoTag is not None:\n g: TiffTag = tif.pages[0].tags.get(34737)\n g2: TiffTag = tif.pages[0].tags.get(34736)\n g3: TiffTag = tif.pages[0].tags.get(33922)\n g4: TiffTag = tif.pages[0].tags.get(33550)\n\n tags = [(geoTag.code, 'H', geoTag.count, geoTag.value),\n (g.code, 's', g.count, g.value),\n (g2.code, 'd', g2.count, g2.value),\n (g3.code, 'd', g3.count, g3.value),\n (g4.code, 'd', g4.count, g4.value)]\n return tags\n else:\n print('no geo tags in file')",
"def get_coordinates(self, soup: BeautifulSoup) -> None:\n try:\n url = soup.find(\n \"a\", {\"title\": \"Open this area in Google Maps (opens a new window)\"}\n )[\"href\"]\n coordinates = url[url.find(\"=\") + 1 : url.find(\"&\")]\n coordinates = [float(n) for n in coordinates.split(\",\")]\n except (AttributeError, TypeError):\n coordinates = [None, None]\n self.__collected_dic[\"latitude\"].append(coordinates[0])\n self.__collected_dic[\"longitude\"].append(coordinates[1])",
"def get_latlon():\n\t\n iss.compute() # Get the lat/long values from ephem\n long_value = [float(i) for i in str(iss.sublong).split(\":\")]\n if long_value[0] < 0:\n long_value[0] = abs(long_value[0])\n cam.exif_tags['GPS.GPSLongitudeRef'] = \"W\"\n else:\n cam.exif_tags['GPS.GPSLongitudeRef'] = \"E\"\n cam.exif_tags['GPS.GPSLongitude'] = '%d/1,%d/1,%d/10' % (long_value[0], long_value[1], long_value[2]*10)\n lat_value = [float(i) for i in str(iss.sublat).split(\":\")]\n if lat_value[0] < 0:\n lat_value[0] = abs(lat_value[0])\n cam.exif_tags['GPS.GPSLatitudeRef'] = \"S\"\n else:\n cam.exif_tags['GPS.GPSLatitudeRef'] = \"N\"\n cam.exif_tags['GPS.GPSLatitude'] = '%d/1,%d/1,%d/10' % (lat_value[0], lat_value[1], lat_value[2]*10)\n return (iss.sublat / degree, iss.sublong / degree)",
"def get_location(self):\n # h = b'\\r\\nAT-MSGEO\\r\\r\\n-MSGEO: -3936,3464,-3612,7402d50c\\r\\n\\r\\n'\n # an example of the string returned from the AT-MSGEO used for testing.\n h = self.acquire_response(b'AT-MSGEO')\n if isinstance(h, bytes):\n h = h.decode('utf-8')\n h = h.strip()\n h = h.split(':')\n h = h[1].split(',')\n x = int(h[0])*1000 # Convert coordinates to meters.\n y = int(h[1])*1000\n z = int(h[2])*1000\n else:\n print('Location not available')\n\n # 'geocent' refers to the geo-centered frame that the co-ordinates are returned in\n inProj = Proj(proj='geocent', ellps='WGS84', datum='WGS84')\n\n # 'latlong' is the frame to be converted to\n outProj = Proj(proj='latlong', ellps='WGS84', datum='WGS84')\n\n # Convert X, Y, Z to latitude, longitude and altitude\n long, lat, alt = transform(inProj, outProj, x, y, z, radians=False)\n # l = [str(long), str(lat), str(alt)]\n return long, lat, alt",
"def find_points_of_interest(geotag, location):\n\n area = \"\"\n transit_stop = \"\"\n transit_dist = float('inf')\n shuttle_stop = \"\"\n shuttle_dist = float('inf')\n\n if geotag is not None:\n # Check to see if the string description of the neighborhood matches anything in our list of neighborhoods.\n for hood in settings.NEIGHBORHOODS:\n if hood.lower() in location.lower():\n area = hood\n\n # Look to see if the listing is in any of the neighborhood boxes we defined.\n if len(area) == 0:\n for hood, box_coords in settings.BOXES.items():\n if in_box(geotag, box_coords):\n area = hood\n\n # Find the closest transit stations.\n for station, coords in settings.TRANSIT_STATIONS.items():\n dist = coord_distance(coords[0], coords[1], geotag[0], geotag[1])\n if dist < transit_dist:\n transit_stop = station\n transit_dist = dist\n\n # Find the closest shuttle stop.\n for stop, coords in settings.SHUTTLE_STOPS.items():\n dist = coord_distance(coords[0], coords[1], geotag[0], geotag[1])\n if dist < shuttle_dist:\n shuttle_stop = stop\n shuttle_dist = dist\n\n return {\n \"area\": area,\n \"transit_stop\": transit_stop,\n \"transit_dist\": transit_dist,\n \"shuttle_stop\": shuttle_stop,\n \"shuttle_dist\": shuttle_dist,\n }",
"def get_lat_lon(exif_data):\n lat = None\n lon = None\n\n if \"GPSInfo\" in exif_data: \n gps_info = exif_data[\"GPSInfo\"]\n \n gps_latitude = _get_if_exist(gps_info, \"GPSLatitude\")\n gps_latitude_ref = _get_if_exist(gps_info, 'GPSLatitudeRef')\n gps_longitude = _get_if_exist(gps_info, 'GPSLongitude')\n gps_longitude_ref = _get_if_exist(gps_info, 'GPSLongitudeRef')\n \n if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:\n lat = _convert_to_decimal(gps_latitude)\n if gps_latitude_ref != \"N\": \n lat = 0.0 - lat\n \n lon = _convert_to_decimal(gps_longitude)\n if gps_longitude_ref != \"E\":\n lon = 0.0 - lon\n return lat, lon",
"def _get_coordinates(self, place):\n if re.match(r\"-?[\\d.]+,-?[\\d.]+\", place):\n return tuple(place.split(\",\"))\n return tuple(\n str(coord) for coord in self._geocoder.geosearch(place).coordinates\n )",
"def _extract_coords_loc_entities(loc_entities: Iterable[GeoLocation]):\n return [\n (loc[\"Latitude\"], loc[\"Longitude\"])\n for loc in loc_entities\n if \"Latitude\" in loc and \"Longitude\" in loc\n ]",
"def best_coords(self):\n lat, lon = None, None\n for term in self.terms:\n # print(term)\n # print(term['weight'])\n geo = term.get(\"geo\")\n if geo:\n osm = geo['osm']\n gm = geo['gm']\n geo_data = None\n if osm:\n geo_data = osm\n elif gm:\n geo_data = gm\n if geo_data:\n g = geo_data[0]\n lat, lon = g['latitude'], g['longitude']\n break\n return lat, lon, self.region",
"def get_lat_lng(self):\n self.input_api = '%20'.join(self.parsed_question)\n self.input_api = ' '.join(self.parsed_question)\n self.google_api_url = 'https://maps.googleapis.com/maps/api/place/findplacefromtext/json?input={}&inputtype=textquery&fields=geometry,name,place_id&types=point_of_interest&key={}'.format (self.input_api, api_key) \n self.r = requests.get(url=self.google_api_url)\n self.data = self.r.json()\n self.name = self.data['candidates'][0]['name']\n self.place_id = self.data['candidates'][0]['place_id']\n self.lat = self.data['candidates'][0]['geometry']['location']['lat']\n self.lng = self.data['candidates'][0]['geometry']['location']['lng']\n print(self.lat, self.lng, self.place_id)\n return (self.lat, self.lng, self.place_id)",
"def parse_coords(geo: str) -> Tuple[float, float]:\n lat, long = [float(x.strip()) for x in geo.split(\",\")]\n if lat > 90 or lat < -90:\n raise ValueError(\"latitude does not fall in the range (-90, 90)\")\n if long > 180 or long < -180:\n raise ValueError(\"longitude does not fall in the range (-180, 180)\")\n return (lat, long)",
"def get_location(geoname):\n\n DB_NAME = global_settings.DB_NAME_GEONAMES\n db_user = global_settings.POSTGRESQL_USERNAME\n db_password = global_settings.POSTGRESQL_PASSWORD\n db_host = global_settings.POSTGRESQL_HOST\n db_port = global_settings.POSTGRESQL_PORT\n\n sql = \"SELECT latitude, longitude FROM {} WHERE name like '{}'\".format(global_settings.TABLE_NAME_GEONAMES, geoname)\n\n resp = sqlExecute(DB_NAME, db_user, db_password, db_host, db_port, sql, True)\n\n if not resp['success']:\n return []\n\n lat_long = []\n\n for data in resp['data']:\n lat_long.append([data[0], data[1]])\n\n return lat_long",
"def get_exif_location(self, exif_data, lonlat):\n\n if lonlat=='lonlat':\n lat = ''\n lon = ''\n gps_latitude = self._get_if_exist(exif_data, 'GPS GPSLatitude')\n gps_latitude_ref = self._get_if_exist(exif_data, 'GPS GPSLatitudeRef')\n gps_longitude = self._get_if_exist(exif_data, 'GPS GPSLongitude')\n gps_longitude_ref = self._get_if_exist(exif_data, 'GPS GPSLongitudeRef')\n\n if gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:\n lat = self._convert_to_degress(gps_latitude)\n if gps_latitude_ref.values[0] != 'N':\n lat = 0 - lat\n\n lon = self._convert_to_degress(gps_longitude)\n if gps_longitude_ref.values[0] != 'E':\n lon = 0 - lon\n\n return lat, lon",
"def get_coordinates(text):\n m = re.search(COORD_PATTERN, text)\n if m:\n neglat = m.groups(0)[0]\n latitude = neglat + m.groups(0)[1]\n neglong = m.groups(0)[2]\n longitude = neglong + m.groups(0)[3]\n return {\n \"lat\": latitude,\n \"lon\": longitude\n }\n return None",
"def find_points_of_interest(geotag, location):\n\tarea_found = False\n\tarea = \"\"\n\tmin_dist = None\n\tnear_ttc = False\n\tttc_dist = \"N/A\"\n\tttc = \"\"\n\t#look to see if listing is in any of neighborhood boxes defined\n\tfor a, coords in settings.BOXES.items():\n\t\tif in_box(geotag, coords):\n\t\t\tarea = a\n\t\t\tarea_found = True\n\n\t#check to see if listing is near any transit stations\n\tfor station, coords in settings.TRANSIT_STATIONS.items():\n\t\tdist = coord_distance(coords[0], coords[1], geotag[0], geotag[1])\n\t\tif (min_dist is None or dist < min_dist) and dist < settings.MAX_TRANSIT_DIST:\n\t\t\tttc = station\n\t\t\tnear_ttc = True\n\n\t\tif (min_dist is None or dist < min_dist):\n\t\t\tttc_dist = dist\n\n\t#if listing isn't in any of boxes we defined check to see if string description of neighborhood\n\t#matches anything in our list of neighborhoods\n\tif len(area) == 0:\n\t\tfor hood in settings.NEIGHBORHOODS:\n\t\t\tif hood in location.lower():\n\t\t\t\tarea = hood \n\n\treturn {\n\t\t\"area_found\": area_found,\n\t\t\"area\": area,\n\t\t\"near_ttc\": near_ttc,\n\t\t\"ttc_dist\": ttc_dist,\n\t\t\"ttc\": ttc\n\t}",
"def get_latlong():\r\n info = urllib.request.urlopen(\"https://ipinfo.io\").read()\r\n decoded = json.loads(info)\r\n print(decoded[\"loc\"])\r\n return decoded[\"loc\"]",
"def getLngLat(self):\n lat = None\n lng = None\n exif_data = self.get_exif_data()\n if \"GPS\" in exif_data:\n gps_data = exif_data[\"GPS\"]\n gps_latitude = self.get_if_exist(\n gps_data, piexif.GPSIFD.GPSLatitude)\n gps_latitude_ref = self.get_if_exist(\n gps_data, piexif.GPSIFD.GPSLatitudeRef)\n gps_longitude = self.get_if_exist(\n gps_data, piexif.GPSIFD.GPSLongitude)\n gps_longitude_ref = self.get_if_exist(\n gps_data, piexif.GPSIFD.GPSLongitudeRef)\n\t\t\t\t# \\ means backward slash which is used to repersent white space\n if gps_latitude and gps_latitude_ref \\\n and gps_longitude and gps_longitude_ref:\n gps_latitude_ref = gps_latitude_ref.decode(\"utf-8\")\n gps_longitude_ref = gps_longitude_ref.decode(\"utf-8\")\n lat = self.convert_to_degress(gps_latitude)\n if gps_latitude_ref != 'N':\n lat = 0 - lat\n lng = self.convert_to_degress(gps_longitude)\n if gps_longitude_ref != 'E':\n lng = 0 - lng\n return lat, lng",
"def get_lat_and_long(row):\r\n\tlatitude = row['latitude']\r\n\tlongitude = row['longitude']\r\n\treturn latitude, longitude",
"def get_lat_lon(exif_data):\n\tlat = None\n\tlon = None\n\tgps_latitude = None\n\tgps_latitude_ref = None\n\tgps_longitude = None\n\tgps_longitude_ref = None\n\n\tif \"GPSInfo\" in exif_data:\t\t\n\t\tgps_info = exif_data[\"GPSInfo\"]\n\n\t\tgps_latitude = gps_info[\"GPSLatitude\"]\n\t\tgps_latitude_ref = gps_info[\"GPSLatitudeRef\"]\n\t\tgps_longitude = gps_info[\"GPSLongitude\"]\n\t\tgps_longitude_ref = gps_info[\"GPSLongitudeRef\"]\n\n\t\tif gps_latitude and gps_latitude_ref and gps_longitude and gps_longitude_ref:\n\t\t\tlat = convert_to_degrees(gps_latitude)\n\t\t\tif gps_latitude_ref != \"N\": \n\t\t\t\tlat = 0 - lat\n\n\t\t\tlon = convert_to_degrees(gps_longitude)\n\t\t\tif gps_longitude_ref != \"E\":\n\t\t\t\tlon = 0 - lon\n\n\treturn lon, lat",
"def get_img_coord_tuple(img):\n\n lat = convert_to_degress(get_gps_details(img)['GPSLatitude'])\n if get_gps_details(img)['GPSLatitudeRef'] == 'S':\n lat = -lat\n\n longitude = convert_to_degress(get_gps_details(img)['GPSLongitude'])\n if get_gps_details(img)['GPSLongitudeRef'] == 'W':\n longitude = -longitude\n\n return lat, longitude",
"def get_gps_details(img):\n gpsinfo = {}\n exif = {TAGS[k]: v for k, v in img._getexif().items() if k in TAGS}\n for item in exif['GPSInfo'].keys():\n name = GPSTAGS.get(item, item)\n gpsinfo[name] = exif['GPSInfo'][item]\n return gpsinfo",
"def get_lat_lng(station_name, stops):\n for line in stops:\n if station_name.lower() in line.lower():\n l = line.replace(\"\\n\", \"\").split(\",\")\n lat = l[4].replace('\\\"', '')\n lng = l[5].replace('\\\"', '')\n return lat, lng\n return 0, 0",
"def parse_lon_lat(grid, lon, lat):\n if lat is None:\n lat = grid.origin_latitude[\"data\"][0]\n if lon is None:\n lon = grid.origin_longitude[\"data\"][0]\n return lon, lat",
"def get_apartment_latlng(self, soup, apartment_dict):\n import googlemaps\n from datetime import datetime\n\n gmaps = googlemaps.Client(key='AIzaSyBxV4EAXU1aMLGU9bnokygGL92c2BxDzCE')\n\n # Geocoding an address\n geocode_result = gmaps.geocode(apartment_dict['address'])\n\n if len(geocode_result) > 0:\n # Store lat and lng\n apartment_dict['lat'] = geocode_result[0]['geometry']['location']['lat']\n apartment_dict['lng'] = geocode_result[0]['geometry']['location']['lng']\n else:\n print(\"Failed to find lat and lng values\")",
"def get_coordinates(addresses, boroughs):\n latitude = []\n longitude = []\n for address, borough in zip(addresses, boroughs):\n try:\n g = geocoder.osm('{}, {}, New York'.format(address, borough)).json\n latitude.append(g['lat'])\n longitude.append(g['lng'])\n except:\n latitude.append(None)\n longitude.append(None)\n\n return np.array(latitude).T, np.array(longitude).T",
"def geo(address):\n API_PRIVATE = os.environ.get(\"TOM_TOM_PRIVATE\")\n encoded = urllib.parse.quote(address)\n query ='https://api.tomtom.com/search/2/geocode/' + str(encoded) + \\\n '.json?limit=1&countrySet=US&lat=42&lon=-72&topLeft=42.886%2C%20-73.508&btmRight=41.237%2C-69.928&key=' \\\n + API_PRIVATE\n\n response = requests.get(query)\n while True:\n try:\n jsonResponse = response.json()\n break\n except:\n response = requests.get(query)\n\n latit = 0\n longit = 0\n\n for address in jsonResponse['results']:\n latit = address['position']['lat']\n longit = address['position']['lon']\n return latit, longit",
"def get_lat_long(place_name):\n latitude = place_name[\"results\"][0][\"geometry\"][\"location\"][\"lat\"]\n longitude = place_name[\"results\"][0][\"geometry\"][\"location\"][\"lng\"]\n return latitude, longitude"
] | [
"0.69120336",
"0.6498285",
"0.64313525",
"0.6263169",
"0.6131598",
"0.58735377",
"0.57471067",
"0.57270646",
"0.57160133",
"0.5713735",
"0.56999207",
"0.5634059",
"0.5619045",
"0.5606678",
"0.5577931",
"0.5576704",
"0.5569433",
"0.5567942",
"0.5556589",
"0.5555639",
"0.5549103",
"0.5527535",
"0.5474378",
"0.54457915",
"0.5432698",
"0.5408103",
"0.53722405",
"0.5367577",
"0.5360957",
"0.53555787"
] | 0.78406644 | 0 |
Gets geocoding response from passed lat and long. | def get_geocoding_response(lat: float, long: float):
payload = {}
headers = {}
url = URL_TEMPLATE.format(
api_key=GEOCODING_API_KEY,
long_lat=','.join(
(str(long), str(lat)),
),
)
response = requests.request("GET", url, headers=headers, data=payload)
return response.json() | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def geocoding(address):\n AUTH = json.loads(open(\"auth.json\", \"r\").read())\n\n r = requests.get(f\"https://maps.googleapis.com/maps/api/geocode/json\", params={\n \"address\": address,\n \"key\": AUTH[\"GMAP_API\"]\n })\n\n if r.status_code == 200:\n r = r.json()\n results = r[\"results\"]\n if len(results) < 1:\n log.error(\"No result geocoding for %s\", address)\n return (-1, -1)\n\n result = results[0]\n proper_address = result[\"formatted_address\"]\n loc = result[\"geometry\"][\"location\"]\n lat = loc[\"lat\"]\n lng = loc[\"lng\"]\n\n return (proper_address, lat, lng)\n\n else:\n log.error(\"Error in Geocoding %s\", address)\n return (-1, -1)",
"def get_geocode(self, address):\n\n try:\n raw_data = self.__get_raw_data(address)\n except (URLError, ValueError):\n return 503, None\n else:\n code, coords = self.__parse_raw_data(raw_data)\n return code, coords",
"def reverse_geocoding(lat, lng, API_KEY=API_KEY, GEOCODE_API_URL=GEOCODE_API_URL):\n params = {\n 'latlng': '{},{}'.format(lat, lng),\n 'key': API_KEY\n }\n\n # Do the request and get the response data\n response = requests.get(GEOCODE_API_URL, params=params)\n response = response.json()\n geodata = parse_response(response)\n return geodata",
"def rlis_geocode(addr_str, token):\n\n url = 'http://gis.oregonmetro.gov/rlisapi2/locate/'\n params = {\n 'token': token,\n 'input': addr_str,\n 'form': 'json'\n }\n rsp = requests.get(url, params=params)\n\n if rsp.status_code != 200:\n return -1, -1, -1\n else:\n json_rsp = rsp.json()\n if json_rsp['error']:\n return -1, -1, -1\n else:\n return json_rsp['data'][0]['lat'], json_rsp['data'][0]['lng'], json_rsp['data'][0]['fullAddress']",
"def geocode(self, request):\n latlng = request.query_params.get('latlng')\n if not latlng:\n raise RequiredParameter('latlng')\n try:\n lat = float(latlng.split(',')[0])\n lng = float(latlng.split(',')[1])\n except Exception:\n raise InvalidParameter('latlng', _('Invalid `latlng`'))\n ip = get_real_ip(request)\n location = location_controller.from_location_index(lat, lng, ip)\n return Response(location)",
"def geocode(self, query, exactly_one=True, timeout=None):\n params = {\n 'addr': self.format_string % query,\n }\n if self.api_key:\n params['key'] = self.api_key\n url = \"?\".join((self.api, urlencode(params)))\n logger.debug(\"%s.geocode: %s\", self.__class__.__name__, url)\n return self._parse_json(\n self._call_geocoder(url, timeout=timeout), exactly_one\n )",
"def google_geocode(query):\n\tif not API_KEY:\n\t\traise ConfigException(\"Require API_KEY for googleapi. Reload after setting.\")\n\td = {\"address\" : query.encode(\"utf-8\"), \"key\" : API_KEY }\n\tf = urlopen(LOC_URL % (urlencode(d)))\n\tlocdata = load(f)\n\tif f.getcode() == 200:\n\t\tif \"results\" in locdata:\n\t\t\titem = locdata[\"results\"]\n\t\t\tif len(item) == 0:\n\t\t\t\treturn None\n\t\t\titem = item[0]\n\t\t\tll = item.get(\"geometry\", {}).get(\"location\") # lol tricky\n\t\t\tif not ll: return None\n\t\t\treturn item[\"formatted_address\"], ll[\"lat\"], ll[\"lng\"]\n\t\telse:\n\t\t\treturn None\n\telse:\n\t\traise RuntimeError(\"Error (%s): %s\" % (f.getcode(), locdata.replace(\"\\n\", \" \")))",
"def geocoding(address, API_KEY=API_KEY, GEOCODE_API_URL=GEOCODE_API_URL):\n # define the parameters of the search\n params = {\n 'address': '{}'.format(address),\n 'key': API_KEY\n }\n\n # Do the request and get the response data\n response = requests.get(GEOCODE_API_URL, params=params)\n response = response.json()\n\n geodata = parse_response(response)\n return geodata",
"def get(self):\n street = self.request.args.get(\"street\", \"\")\n zip = self.request.args.get(\"zip\",\"\")\n city = self.request.args.get(\"city\",\"\")\n country = self.request.args.get(\"country\",\"Germany\")\n\n if street==\"\" or city==\"\" or country==\"\":\n return {'success': False, \n 'msg': self._(\"no full address was given\")\n }\n try:\n lat, lng = self.retrieve_location(street, zip, city, country)\n except LocationNotFound:\n return {'success': False, \n 'msg': self._(\"we couldn't lookup a geo coordinates for this address\")\n }\n return {\n 'success' : True,\n 'lat' : lat,\n 'lng' : lng\n }",
"def geocode(addr_str):\n\n\tbase_url = 'http://gis.oregonmetro.gov/rlisapi2/locate/'\n\turl_template = '{0}?token={1}&input={2}&form=json'\n\turl = url_template.format(base_url, token, addr_str)\n\tresponse = requests.get(url)\n\n\tif response.status_code != 200:\n\t\tprint 'unable to establish connection with rlis api'\n\t\tprint 'status code is: {0}'.format(response.status_code)\n\t\treturn response.status_code\n\t\n\tjson_rsp = response.json()\n\tif json_rsp['error']:\n\t\tprint 'the following address could not be geocoded:'\n\t\tprint '\\'{0}\\''.format(addr_str)\n\t\tprint 'the following error message was returned:'\n\t\tprint '\\'{0}\\''.format(json_rsp['error']), '\\n'\n\telse:\n\t\treturn json_rsp['data'][0]",
"def get_lat_long(address):\n url = \"https://maps.googleapis.com/maps/api/geocode/json\"\n params = {'address':address,'key':'AIzaSyBVZhQwm7GZViRzTCuH1VBvMdIpLMwvfT4'}\n req = requests.get(url,params=params)\n stat = req.status_code\n latitude = req.json()['results'][0]['geometry']['location']['lat']\n longitude = req.json()['results'][0]['geometry']['location']['lng']\n return latitude, longitude",
"def lookup(addr, num, street, city, code, geo_dict, failure_set):\n try:\n address_url = \"https://geocoding.geo.census.gov/geocoder/locations/address?\" + \\\n \"street=\" + str(num) + \"+\" + street.replace(\" \", \"+\") + \"&city=\" + city + \"&zip=\" + \\\n str(code) + \"&benchmark=9&format=json\"\n geo_data = json.load(req.urlopen(address_url).decode('utf-8'))['result']\n except Exception:\n try:\n address_url = \"https://geocoding.geo.census.gov/geocoder/locations/address?\" + \\\n \"street=\" + str(num) + \"+\" + street.replace(\" \", \"+\") + \"&city=\" + city + \"&zip=\" + \\\n str(code) + \"&benchmark=9&format=json\"\n geo_data = json.loads(req.urlopen(address_url).read().decode('utf-8'))['result']\n except Exception as e:\n print(e, addr)\n failure_set.add(addr)\n return None\n if len(geo_data['addressMatches']) == 0:\n print(addr, ': Failure')\n failure_set.add(addr)\n return None\n print(addr, ': Success')\n location = geo_data['addressMatches'][0]['coordinates']\n latlong = ','.join([str(location['y']), str(location['x'])])\n geo_dict[addr] = latlong\n return tuple(float(geo) for geo in latlong.split(','))",
"def geocode(address):\n geo_data = requests.get(\"https://geocode.xyz/{}?json=1\".format(\n urllib.parse.quote_plus(address)))\n geo_json = json.loads(geo_data.content)\n\n return geo_json['standard']['city'], geo_json['latt'], geo_json['longt']",
"def get_address_lat_lng(geocoder_response):\n try:\n if geocoder_response.status not in [\"ERROR\", \"ZERO_RESULTS\"]:\n address = unidecode(geocoder_response.address.replace(\"'\", \"\\''\"))\n latitude = geocoder_response.lat\n longitude = geocoder_response.lng\n else:\n address = ''\n latitude = -1\n longitude = -1\n except AttributeError:\n address = ''\n latitude = -1\n longitude = -1\n return float(latitude), float(longitude), address",
"def getGeo(self):\n command = f'curl -s -m 5 http://ip-api.com/json/' + self.ip\n result = subprocess.run(command.split(), capture_output=True)\n data = result.stdout.decode(\"utf-8\").replace('\\n','')\n try:\n data = json.loads(data)\n except json.decoder.JSONDecodeError:\n # Error from ip-api.com\n data = None\n if data:\n # {\"status\":\"success\",\"country\":\"Yemen\",\"countryCode\":\"YE\",\"region\":\"SA\",\"regionName\":\"Amanat Alasimah\",\"city\":\"Sanaa\",\"zip\":\"\",\"lat\":15.3522,\"lon\":44.2095,\"timezone\":\"Asia/Aden\",\"isp\":\"Public Telecommunication Corporation\",\"org\":\"YemenNet\",\"as\":\"AS30873 Public Telecommunication Corporation\",\"query\":\"134.35.218.63\"}\n self.geodata = data\n else:\n self.geodata = None",
"async def find_by_coordinates(self, latitude: float, longitude: float):\n data = self.geocode_service.reverse_geocoding(latitude, longitude)\n if data is not None:\n return await self.address_service.search_addresses(data[\"street\"])\n return None",
"async def lat_long(zip_code: str, country: str) -> Sequence[float]:\n key: str = f\"{zip_code}, {country}\"\n url: str = f'http://www.datasciencetoolkit.org/street2coordinates/{key.replace(\" \", \"+\")}'\n\n async with aiohttp.ClientSession() as session:\n async with session.get(url) as response:\n response.raise_for_status()\n data = await response.json()\n\n city: Dict[str, Any] = data.get(f\"{zip_code}, {country}\", dict())\n return city.get(\"latitude\", 0.00), city.get(\"longitude\", 0.00)",
"def get_google_geocoding(coordinates):\n latitude = coordinates['latitude']\n longitude = coordinates['longitude']\n payload = build_google_payload(latitude=latitude, longitude=longitude)\n response = requests.get(google_url, params=payload, timeout=int(GEOCODING_INFO['timeout']))\n if response.status_code == 200:\n formated_data = extract_data_from_google_response(response.text.encode('utf-8'))\n return formated_data\n else:\n return None",
"def reverse_lookup(lat, long, key=keys.google):\n result = str(Geocoder(api_key=key).reverse_geocode(lat, long))\n location_details = result.split(\",\")\n address = location_details[0]\n zipcode = location_details[-2][-5:]\n city = location_details[1]\n state = location_details[2].split(\" \")[1]\n return address, zipcode, city, state",
"def get_address_by_location(lat, lng):\n request = \"{}/{}?key={}&lat={}&lon={}&format=json\".format(config.GEOCODE_URL, config.GEOCODE_REVERSE_PATH, config.GEOCODE_KEY, lat, lng)\n response = requests.get(request).json()\n return response",
"def geocode(location):\n geocoding_url = f'https://maps.googleapis.com/maps/api/geocode/json?' \\\n f'address={location}&key={_GEOCODING_KEY}'\n geocode_data = requests.get(geocoding_url).json()\n return geocode_data",
"def get_lat_lng(apiKey,address):\n \n url = ('https://maps.googleapis.com/maps/api/geocode/json?address={}&key={}'\n .format(address.replace(' ','+'), apiKey))\n try:\n response = requests.get(url)\n resp_json_payload = response.json()\n lat = resp_json_payload['results'][0]['geometry']['location']['lat']\n lng = resp_json_payload['results'][0]['geometry']['location']['lng']\n except:\n print('ERROR: {}'.format(address))\n lat = 0\n lng = 0\n return lat, lng",
"def get_city_by_code(post_code):\n post_code = post_code.replace(' ', '').encode('utf-8')\n error = ''\n city = ''\n opener = urllib2.build_opener()\n url = 'http://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor=false'.format(post_code)\n response = opener.open(url).read()\n response_dict = json.loads(response)\n request_status = response_dict['status']\n if request_status == 'OK':\n logger.debug('Google response')\n logger.debug(response_dict)\n results = response_dict['results']\n \"\"\"\n first get all results\n with required zip code\n \"\"\"\n results_with_required_zip_code = []\n for result in results:\n address_components = result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'postal_code' and address_component['short_name'].replace(' ', '').lower() == post_code.lower():\n results_with_required_zip_code.append(result)\n if not results_with_required_zip_code:\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n # error = 'No location with post code %s' % post_code\n else:\n \"\"\"\n next we need all results in GB\n \"\"\"\n results_with_required_zip_code_in_GB = ''\n for good_result in results_with_required_zip_code:\n address_components = good_result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'country' and address_component['short_name'].lower() == 'GB'.lower():\n results_with_required_zip_code_in_GB = good_result\n if not results_with_required_zip_code_in_GB:\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n \"\"\"\n finally find city name\n \"\"\"\n address_components = results_with_required_zip_code_in_GB['address_components']\n # first try get postal city\n searching_city = get_city_by_key(address_components, 'postal_town')\n if not searching_city:\n # next by administrative_area_level_2\n searching_city = get_city_by_key(address_components, 'administrative_area_level_2')\n if not searching_city:\n print url\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n city = searching_city\n elif request_status == 'ZERO_RESULTS':\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n else:\n error = request_status\n return {\n 'error': error,\n 'data': city\n }",
"def get_latlong():\r\n info = urllib.request.urlopen(\"https://ipinfo.io\").read()\r\n decoded = json.loads(info)\r\n print(decoded[\"loc\"])\r\n return decoded[\"loc\"]",
"def geocodeLocations(locations):\n maxResults = 1\n location_query = ''\n for location in locations:\n location_query += \"&location=%s\" % encodeUrl(location)\n url = \"http://open.mapquestapi.com/geocoding/v1/batch?maxResults=%d%s\" % (maxResults, location_query)\n print url\n results = json.loads(urllib2.urlopen(url).read())\n print results\n return\n for location_result in results['results']:\n #print location_result\n if location_result['providedLocation']['location'] == location:\n latlng = location_result['locations'][0]['displayLatLng']\n return latlng\n else:\n print location_result",
"def get_address_from_coord(lat, lon, proxy):\n address = \"\"\n try:\n # build url query to gmap reverse geocoding\n urlquery = u'http://maps.googleapis.com/maps/api/geocode/xml?latlng=%f,%f&sensor=false' % (lat, lon)\n gmap_response = requests.get(urlquery, proxies=proxy)\n gmap_xml = ET.fromstring(gmap_response.text.encode('utf-8'))\n\n # get address (first result)\n first_result = gmap_xml.find(u'result/formatted_address')\n if not (first_result is None):\n address = first_result.text\n except Exception as e:\n print(str(e))\n pass\n finally:\n return address",
"def getLatLng(zipcode=22207) -> (float, float):\n r = requests.get(f\"https://geocode.xyz/{zipcode}?json=1\")\n data = r.json()\n lat = data.get('latt')\n lng = data.get('longt')\n return lat, lng",
"def get_coord_from_address(code_postal, adresse=None):\n headers = {\"Content-Type\": \"application/json\"}\n if adresse != None:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(adresse) + \"&postcode=\" + str(code_postal)))\n else:\n url = str((\"http://api-adresse.data.gouv.fr/search/?q=\" + str(code_postal)))\n print(url)\n r = requests.get(url, headers=headers, data=\"\")\n js = json.loads(r.text)\n if code_postal == 75001:\n x = js['features'][1]['geometry']['coordinates']\n else:\n \tx = js['features'][0]['geometry']['coordinates']\n longitude = x[0]\n latitude = x[1]\n pos = []\n pos.append(longitude)\n pos.append(latitude)\n print(pos)\n return pos",
"def return_address_from_location(location='0,0'):\n if not re.compile('^(\\-?\\d+(\\.\\d+)?),\\s*(\\-?\\d+(\\.\\d+)?)$').match(location):\n raise ValueError('Location Invalid')\n base_url = 'https://maps.googleapis.com/maps/api/geocode/json?'\n latlng = 'latlng=' + location\n try:\n #This try block is for our first 150,000 requests. If we exceed this, use Jack's Token.\n key_string = '&key=' + ACCESS_TOKEN\n url = base_url + latlng + key_string #Builds the url\n result = requests.get(url).json() #Gets google maps json file\n cleaned = result['results'][0]['address_components']\n #Address to check against value of check_against_business_location\n chk = cleaned[0]['long_name'] + ' ' + cleaned[1]['long_name'] + ', ' + cleaned[3]['long_name']\n business_tuple = check_against_business_location(location, chk)\n if business_tuple[0]: #If true, the lat, lon matches a business location and we return business name\n return business_tuple[1]\n else: #otherwise, we just return the address\n return cleaned[0]['long_name'] + ' ' + cleaned[1]['short_name'] + ', ' + cleaned[3]['short_name']\n except:\n try:\n #Use Jack's Token in case of some invalid request problem with other API Token\n key_string = '&key=' + JACK_TOKEN\n url = base_url + latlng + key_string #Builds the url\n result = requests.get(url).json() #Gets google maps json file\n cleaned = result['results'][0]['address_components']\n #Address to check against value of check_against_business_location\n chk = cleaned[0]['long_name'] + ' ' + cleaned[1]['long_name'] + ', ' + cleaned[3]['long_name']\n business_tuple = check_against_business_location(location, chk)\n if business_tuple[0]: #If true, the lat, lon matches a business location and we return business name\n return business_tuple[1]\n else: #otherwise, we just return the address\n return cleaned[0]['long_name'] + ' ' + cleaned[1]['short_name'] + ', ' + cleaned[3]['short_name']\n except:\n raise ValueError(\"Something went wrong\")",
"def geocode():\n\n if \"location\" in request.vars:\n location = request.vars.location\n else:\n session.error = T(\"Need to specify a location to search for.\")\n redirect(URL(r=request, f=\"index\"))\n\n if \"service\" in request.vars:\n service = request.vars.service\n else:\n # @ToDo: service=all should be default\n service = \"google\"\n\n if service == \"google\":\n return s3base.GoogleGeocoder(location, db).get_kml()\n\n if service == \"yahoo\":\n return s3base.YahooGeocoder(location, db).get_xml()"
] | [
"0.733597",
"0.7196635",
"0.71830773",
"0.7144319",
"0.70891005",
"0.70880294",
"0.69447345",
"0.68647265",
"0.68523586",
"0.68478686",
"0.6834369",
"0.68198144",
"0.68149424",
"0.68012166",
"0.6715046",
"0.6687212",
"0.6680827",
"0.66554797",
"0.66531104",
"0.6628361",
"0.6626956",
"0.6621537",
"0.659875",
"0.6575523",
"0.656564",
"0.6562829",
"0.6548259",
"0.654123",
"0.6514406",
"0.65103334"
] | 0.82419246 | 0 |
register events for each ticks gateway for each hand | def _registerEvent(self):
# new DyStockDataTicksGateway instance for each ticks hand to avoid mutex
self._ticksGateways = [DyStockDataTicksGateway(self._eventEngine, self._info, i) for i in range(DyStockDataEventHandType.stockHistTicksHandNbr)] | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def tick(self):\n for method in self.on_ticker:\n getattr(self, method)()",
"def _add_event_detect(self):\n for gpio_channel in self.registered_gpio:\n self.gpio_add_event_detect(gpio_channel, bouncetime=300)",
"def _add_event_callback(self):\n for gpio_channel in self.registered_gpio:\n self.gpio_add_event_callback(gpio_channel)",
"def events(self):",
"def enable_callbacks(self):\n\n onObjectUpdate_received = self.message_handler.register('ObjectUpdate')\n onObjectUpdate_received.subscribe(self.onObjectUpdate)\n\n onObjectUpdateCached_received = self.message_handler.register('ObjectUpdateCached')\n onObjectUpdateCached_received.subscribe(self.onObjectUpdateCached)\n\n onObjectUpdateCompressed_received = self.message_handler.register('ObjectUpdateCompressed')\n onObjectUpdateCompressed_received.subscribe(self.onObjectUpdateCompressed)\n\n onImprovedTerseObjectUpdate_received = self.message_handler.register('ImprovedTerseObjectUpdate')\n onImprovedTerseObjectUpdate_received.subscribe(self.onImprovedTerseObjectUpdate)\n \n onObjectProperties_received = self.message_handler.register('ObjectProperties')\n onObjectProperties_received.subscribe(self.onObjectProperties)\n\n onKillObject_received = self.message_handler.register('KillObject')\n onKillObject_received.subscribe(self.onKillObject)\n\n # uncomment these to view packets sent back to simulator\n # onObjectName_sent = self.message_handler.register('ObjectName')\n # onObjectName_sent.subscribe(self.helpers.log_packet, self)\n\n # onDeRezObject_sent = self.message_handler.register('DeRezObject')\n # onDeRezObject_sent.subscribe(self.helpers.log_packet, self)",
"def _register_handlers(self):\n self.jm.register_handler(\"move_node\", self.move_node)\n self.jm.register_handler(\"copy_node\", self.copy_node)\n self.jm.register_handler(\"push_to_vospace\", self.push_to_vospace)\n self.jm.register_handler(\"push_from_vospace\", self.push_from_vospace)\n self.jm.register_handler(\"pull_to_vospace\", self.pull_to_vospace)\n self.jm.register_handler(\"pull_from_vospace\", self.pull_from_vospace)",
"def update(self, ticks):\n super().update(ticks)\n if self.give_event_handler is not None:\n self.give_event_handler.update(ticks)\n if self.give_event_handler.is_dead:\n self.is_dead = True",
"def register_signals(self):\n for _, callback in self.signal_callbacks:\n Signal.subscribe(self, callback)",
"def on_tick(self, time):\n pass",
"def register_observers_with_gamestate(self):",
"def tick(self):",
"def send(event):\r\n dog_stats_api.increment('track.send.count')\r\n\r\n for name, backend in backends.iteritems():\r\n with dog_stats_api.timer('track.send.backend.{0}'.format(name)):\r\n backend.send(event)",
"def on_tick(data):\n tick = data.dict_\n symbol = tick.symbol\n # 生成datetime对象\n if not tick.datetime:\n if '.' in tick.time:\n tick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S.%f')\n else:\n tick.datetime = datetime.strptime(' '.join([tick.date, tick.time]), '%Y%m%d %H:%M:%S')\n\n bm = bar.get(symbol, None)\n if bm:\n bm.updateTick(tick)\n if not bm:\n bar[symbol] = handle.BarGenerator()\n client[TICK_DB][tick.symbol].insert(tick.__dict__)",
"def tick(self):\n self.connect()",
"def set_floor_callbacks(self):\n\t\tfor floor, channel in enumerate(INPUT.SENSORS):\n\t\t\tself.signalPoller.add_callback_to_channel(channel, partial(self.floor_reached_callback, floor))",
"def hook_events(self):\n wxMediator.hook_events(self)\n EVT_MINE(self, wxEVT_SOCKET_DATA, self.on_data)\n EVT_MINE(self, wxEVT_NEW_LISTEN_CONN, self.new_listen_conn)\n EVT_MINE(self, wxEVT_NEW_TALK_CONN, self.new_talk_conn)",
"def handleEvents(self, events):\n pass",
"def setupEventHooks(self):\n # handle mouse clicks\n self.img.scene().sigMouseClicked.connect(self.handleClick)\n # handle mouse movement\n # Use signalproxy for ratelimiting\n sig = self.img.scene().sigMouseMoved\n self.mvProxy = pqg.SignalProxy(signal=sig, rateLimit=60, slot=self.handleMove)",
"def _register_handlers(self):\n import handlers as th\n import inspect\n for name, class_type in inspect.getmembers(th, predicate=inspect.isclass):\n if class_type is th.ZMQTopicHandlerBase:\n continue\n handler = class_type()\n topic = handler.get_topic()\n if topic in self._topic_handlers:\n self._topic_handlers.append(handler)\n else:\n self._topic_handlers[topic] = [handler]",
"def setup_timers():\n cfg_section = 'Timers'\n # Timer1\n name = 'Timer_mqtt'\n c_period = float(config.option('period_mqtt', cfg_section, 15.0))\n c_period = max(min(c_period, 180.0), 5.0)\n logger.debug('Setup timer %s: period = %ss', name, c_period)\n modTimer.Timer(\n c_period,\n cbTimer_mqtt_reconnect,\n name=name,\n )\n # Timer2\n name = 'Timer_fan'\n c_period = float(config.option('period_fan', cfg_section, 5.0))\n c_period = max(min(c_period, 60.0), 1.0)\n logger.debug('Setup timer %s: period = %ss', name, c_period)\n modTimer.Timer(\n c_period,\n cbTimer_fan,\n name=name,\n )\n # Start all timers\n modTimer.start_all()",
"def _register_handlers(self):\n DBG(\"\\nregister handlers\")\n for hook, handler in self.handlers:\n g.registerHandler(hook, handler)\n\n signal_manager.connect(self.c, 'body_changed', self._after_body_key)",
"def handle_events(self):\n for_removal = []\n\n for event in self.time_events:\n if event.next < datetime.datetime.now():\n if event.type == \"periodic\":\n event.next += event.interval\n else:\n for_removal.append(event)\n try:\n event.action()\n except:\n self.log.exception(\"Error happened in a timed event\")\n\n for item in for_removal:\n self.time_events.remove(item)",
"def create_hooks(self, hclass):\n for extension in self.db.values():\n self.events.register_callbacks_from_inst(hclass, extension)",
"def events_and_callbacks_qi_framework():\n\n # ALMemory acts as the hub for the distribution of event notifications.\n # Source: https://developer.softbankrobotics.com/nao6/naoqi-developer-guide/naoqi-apis/naoqi-core/almemory\n # Example: https://developer.softbankrobotics.com/nao6/naoqi-developer-guide/other-tutorials/python-sdk-tutorials/python-sdk-examples/vision/face\n\n # Create a broker\n # TODO(TK): why?\n naoqi.ALBroker(\"pythonBroker\", IP_ME, PORT_ME, IP_ROBOT, PORT_ROBOT)\n\n proxy_memory = naoqi.ALProxy(\"ALMemory\", IP_ROBOT, PORT_ROBOT)\n\n # Register callback:\n def mycallback(key, value):\n print(\"qi callback: key={}, value={}\".format(key, value))\n sess = proxy_memory.session()\n mem = sess.service(\"ALMemory\")\n sub = mem.subscriber(\"FaceDetected\")\n sub.signal.connect(functools.partial(mycallback, \"FaceDetected\"))\n\n # Raise an event:\n proxy_memory.raiseEvent(\"FaceDetected\", str(datetime.datetime.now()))\n proxy_memory.raiseEvent(\"AnotherEvent\", str(datetime.datetime.now()))\n time.sleep(0.1) # give it some time to process",
"def _notify_handlers(self):\n\n # Notify all handlers \n for handler_callback in self._registered_handlers:\n try:\n handler_callback(self._balloon_position)\n except Exception as e:\n # A receiver failed, catch and move on\n pass",
"def connectFSManager__(self):\n \"\"\"\n self.valkkafs_manager.setTimeCallback(lambda mstime: self.signals.set_time.emit(mstime))\n self.valkkafs_manager.setTimeLimitsCallback(lambda tup: self.signals.set_block_time_limits(tup))\n \"\"\"\n self.valkkafs_manager.setTimeCallback(self.valkkafsmanager_set_time_cb)\n self.valkkafs_manager.setTimeLimitsCallback(self.valkkafsmanager_set_block_time_limits_cb)\n self.valkkafs_manager.setBlockCallback(self.valkkafsmanager_block_cb)",
"def processTick(cls, tick):\n for key in CarRegistry.cars:\n CarRegistry.cars[key].processTick(tick)",
"def listen_and_send(self):\n hadEvent = False\n\n if not self.axis_data:\n self.axis_data = {}\n\n if not self.button_data:\n self.button_data = {}\n for i in range(self.controller.get_numbuttons()):\n self.button_data[i] = False\n\n if not self.hat_data:\n self.hat_data = {}\n for i in range(self.controller.get_numhats()):\n self.hat_data[i] = (0, 0)\n\n while True:\n for event in pygame.event.get():\n if event.type == pygame.JOYAXISMOTION:\n self.axis_data[event.axis] = round(event.value, 2)\n elif event.type == pygame.JOYBUTTONDOWN:\n self.button_data[event.button] = True\n elif event.type == pygame.JOYBUTTONUP:\n self.button_data[event.button] = False\n elif event.type == pygame.JOYHATMOTION:\n self.hat_data[event.hat] = event.value\n\n if event.type == pygame.JOYBUTTONDOWN:\n # A button on the joystick just got pushed down\n hadEvent = True\n elif event.type == pygame.JOYAXISMOTION:\n # A joystick has been moved\n hadEvent = True\n\n if hadEvent:\n\n # If platform is linux we need to change some values in axis_data\n os.system('clear')\n print(\"Axis before\")\n pprint.pprint(self.axis_data)\n if sys.platform == 'linux':\n #self.axis_data[2], self.axis_data[3], self.axis_data[4] = self.axis_data[4], self.axis_data[2], self.axis_data[3]\n temp2 = self.axis_data[2]\n temp3 = self.axis_data[3]\n temp4 = self.axis_data[4]\n self.axis_data[2] = temp4\n self.axis_data[3] = temp2\n self.axis_data[4] = temp3\n\n\n self.event_dict['axis'] = self.axis_data\n self.event_dict['button'] = self.button_data\n message = pickle.dumps(self.event_dict, protocol=4)\n message = bytes(f\"{len(message):<{HEADERSIZE}}\", 'utf-8') + message\n self.sock.sendall(message)\n\n #if self.button_data[4]:\n # self.verbose = not self.verbose\n\n if self.verbose:\n\n # print(\"Button \")\n # pprint.pprint(self.button_data)\n print(\"Axis \")\n pprint.pprint(self.axis_data)\n # print(\"Motion \")\n # pprint.pprint(self.hat_data)",
"def loop(self, stopevent):\n for bus in self.buses:\n self.buses[bus].loop(stopevent)",
"def loop(self, stopevent):\n for bus in self.buses:\n self.buses[bus].loop(stopevent)"
] | [
"0.5968426",
"0.5917992",
"0.56636107",
"0.56423134",
"0.5584876",
"0.5492231",
"0.5486046",
"0.5438875",
"0.5428285",
"0.5386084",
"0.535901",
"0.535302",
"0.5332035",
"0.53188497",
"0.5287094",
"0.52672625",
"0.52583253",
"0.5256934",
"0.52462757",
"0.52174073",
"0.52080256",
"0.52024615",
"0.51666903",
"0.5158269",
"0.51412594",
"0.5119585",
"0.51114887",
"0.51113296",
"0.5105887",
"0.5105887"
] | 0.77605575 | 0 |
Return a list of media from default storage | def list_media(storage, filter_list):
results = []
total = 0
try:
for media in storage.listdir('.')[1]:
if not media.endswith('/') and media != "":
location = storage.url(media).split('?')[0]
total += 1
if not filter_list or location in filter_list:
results += [
{'location': location,
'tags': MediaTag.objects.filter(
location=location).values_list(
'tag', flat=True)
}]
except OSError:
LOGGER.exception(
"Unable to list objects in %s.", storage.__class__.__name__)
except S3ResponseError:
LOGGER.exception(
"Unable to list objects in %s bucket.", storage.bucket_name)
return {'count': total, 'results': results} | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def get_list(self ):\n headers = { 'Authorization' : self.client.authorization_header }\n response = requests.get(\n self.client.url + '/media', \n headers = headers\n )\n\n return json.loads(response.text)",
"def get_available_galleries(include_default=False):\n galleries = []\n\n for directory in Path(MEDIA_AVATARS).dirs():\n if include_default or directory[-8:] != '_default':\n gallery = {'name': directory.name, 'images': []}\n\n images = directory.files('*.gif')\n images += directory.files('*.jpg')\n images += directory.files('*.jpeg')\n images += directory.files('*.png')\n\n for image in images:\n image_path = image[len(settings.MEDIA_ROOT):]\n if image_path.startswith('/'):\n image_path = image_path[1:]\n gallery['images'].append(image_path)\n\n if gallery['images']:\n galleries.append(gallery)\n\n return galleries",
"def listall(self):\n list_query = \"\"\"SELECT * FROM %s\"\"\" % MediaCollection.COLLECTIONS_TABLE\n self.cursor.execute(list_query)\n return [Media.fromtuple(media) for media in self.cursor.fetchall()]",
"def getMediaFiles(path):\n fileList = getMediaFileList(path)\n # dirList = getDirectoryList(path)\n\n # results = map(getMediaFiles, dirList)\n\n # for result in results:\n # fileList = fileList + result\n\n return fileList",
"def _get_media(media_types):\n get_mapped_media = (lambda x: maps.VIRTUAL_MEDIA_TYPES_MAP[x]\n if x in maps.VIRTUAL_MEDIA_TYPES_MAP else None)\n return list(map(get_mapped_media, media_types))",
"def get_real_media(self, provider_name):\n return [Media(f, provider_name) for f in self.videos]",
"def filter(self):\n for f in FileHelper.ALL_PATHS:\n media_obj = MediaObject(FileHelper.get_url(f), FileHelper.get_title(f), FileHelper.get_media_type(f), FileHelper.get_icon(f), FileHelper.get_duration(f), FileHelper.get_ctype(f))\n _id = media_obj.uuid\n if media_obj.media_type == \"image\":\n DB.IMAGES[_id] = media_obj\n elif media_obj.media_type == \"audio\":\n DB.MUSIC[_id] = media_obj\n elif media_obj.media_type == \"video\":\n DB.VIDEOS[_id] = media_obj\n else:\n print \"File '%s' doesn't play nice.\" % (f)",
"def files_storage_list(self, prefix='pipeline/', print_paths=False):\n\n return self.backend.files_storage_list(prefix=prefix, print_paths=print_paths)",
"def Files(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('_files', default)\n return [HEP.RecordFile(i) for i in tmp]",
"def get_media():\n\n error_on_unauthorized()\n\n media = Upload.query.order_by(Upload.id)\n total_num = media.count()\n\n if total_num == 0:\n return jsonify(total=0, uploads=[])\n\n try:\n count = int(request.args.get('max', total_num))\n page = int(request.args.get('page', 1))\n\n if count <= 0 or page <= 0:\n raise APIError(422, \"Query parameters out of range\")\n\n begin = (page - 1) * count\n end = min(begin + count, total_num)\n\n return jsonify(total=total_num, uploads=[upload_to_dict(u) for u in media.all()[begin:end]]), 200\n except ValueError:\n raise APIError(422, \"Invalid query parameter\")",
"def media(self, path):\n path = \"/media/%s%s\" % (self.session.root, format_path(path))\n\n url, params, headers = self.request(path, method='GET')\n\n return self.rest_client.GET(url, headers)",
"def content_list(self):\n return self.face.FACES.files.find({})",
"def get_media(api, num_tweets=25, profile=\"@hakeemangulu\", admin=False):\n # Store the media urls in a list\n media_files = []\n\n # Create cursor object for the timeline\n if admin:\n # If the admin is using the application, return his timeline\n tl = tweepy.Cursor(api.home_timeline).items(num_tweets)\n else:\n # If the admin is not using the application, return the specified\n # user's timeline\n tl = tweepy.Cursor(api.user_timeline, screen_name=profile).items(num_tweets)\n\n # Iterate through the timeline and extract images\n for status in tl:\n # Get all media from a tweet\n media = status.entities.get('media', [])\n # Add non-empty media to the set\n for image in media:\n # Only add the image if it is a photo or GIF (as opposed to a\n # video)\n if image['type'] == 'photo' or image['type'] == 'animated_gif':\n media_files.append(image['media_url'])\n return media_files",
"def list(self, _: List[str]) -> Tuple[str, FileSystemStorage]:\n files = self._get_bulma_css()\n files.extend(self._get_custom_css())\n files.extend(self._get_bulma_js())\n\n for path in files:\n yield path, self.storage",
"def _get_current_media(self):\n key = int(self.status.content_id.split(\"/\")[-1])\n media_item = self.pms.fetchItem(key).reload()\n media_idx = self.status.media_custom_data.get(\"mediaIndex\", 0)\n part_idx = self.status.media_custom_data.get(\"partIndex\", 0)\n media = media_item.media[media_idx]\n part = media.parts[part_idx]\n\n return media_item, media, part",
"def content_media_urls(*paths):\n from mezzanine.conf import settings\n media_url = settings.CONTENT_MEDIA_URL.strip(\"/\")\n return [\"/%s/%s\" % (media_url, path) for path in paths]",
"def all(self):\n return FileStorage.__objects",
"def all(self):\n return FileStorage.__objects",
"def all(self):\n return FileStorage.__objects",
"def mediaGenerator(request):\n folder = 'content/' + request\n mediaPaths = glob(folder + '/*')\n return random.choice(mediaPaths)",
"def list_media(self,\n series_id: str,\n sort: Optional[SortOption] = None,\n limit: Optional[int] = None,\n offset: Optional[int] = None,\n locale: Optional[Any] = None) -> list:\n params: Dict[str, Any] = {\n \"series_id\": series_id,\n }\n\n if sort:\n params[\"sort\"] = sort.value\n if limit:\n params[\"limit\"] = limit\n if offset:\n params[\"offset\"] = offset\n if locale:\n params[\"locale\"] = locale\n\n return self._api._api_call(\"list_media\", params)",
"async def async_browse_media(self, media_content_type=None, media_content_id=None):\n return await media_source.async_browse_media(\n self.hass,\n media_content_id,\n content_filter=lambda item: item.media_content_type.startswith(\"audio/\"),\n )\n \n #TODO: combide the BrowseMedia Media Sources above with the BrowseMedia Directory below\n #if \"udisk\" in self._source_list: \n # if media_content_id not in (None, \"root\"):\n # raise BrowseError(\n # f\"Media not found: {media_content_type} / {media_content_id}\"\n # )\n\n # source_media_name = self._source_list.get(\"udisk\", \"USB Disk\")\n\n # if len(self._trackq) > 0:\n # radio = [\n # BrowseMedia(\n # title = preset,\n # media_class = MEDIA_CLASS_MUSIC,\n # media_content_id = index,\n # media_content_type = MEDIA_TYPE_MUSIC,\n # can_play = True,\n # can_expand = False,\n # )\n # for index, preset in enumerate(self._trackq, start=1)\n # ]\n\n # root = BrowseMedia(\n # title=self._name + \" \" + source_media_name,\n # media_class = MEDIA_CLASS_DIRECTORY,\n # media_content_id = \"root\",\n # media_content_type = \"listing\",\n # can_play = False,\n # can_expand = True,\n # children = radio,\n # )\n\n # else:\n # root = BrowseMedia(\n # title=self._name + \" \" + source_media_name,\n # media_class = MEDIA_CLASS_DIRECTORY,\n # media_content_id = \"root\",\n # media_content_type = \"listing\",\n # can_play = False,\n # can_expand = False,\n # )\n\n # return root",
"def get_media(self, max_id):\r\n url = 'https://instagram.com/' + self.username + '/media'\r\n\r\n if max_id is not None:\r\n url += '?&max_id=' + max_id\r\n resp = requests.get(url)\r\n\r\n if resp.status_code == 200:\r\n media = json.loads(resp.text)\r\n\r\n if not media['items']:\r\n raise ValueError('User %s is private' % self.username)\r\n\r\n return media\r\n else:\r\n raise ValueError('User %s does not exist' % self.username)",
"def get_json_media(self, provider_name):\n return [JsonMedia(f, provider_name) for f in self.datafiles[provider_name]]",
"def download_media_from_bandwidth(media_urls):\n downloaded_media_files = []\n for media_url in media_urls:\n media_id = get_media_id(media_url)\n filename = get_media_filename(media_url)\n with open(filename, \"wb\") as f:\n try:\n downloaded_media = messaging_client.get_media(MESSAGING_ACCOUNT_ID, media_id)\n f.write(downloaded_media.body)\n except Exception as e:\n print(e)\n downloaded_media_files.append(filename)\n return downloaded_media_files",
"def media_players(self) -> List[dict]:\n return self.items_by_domain(\"media_player\")",
"def medias(self):\n ret = {}\n m = self.application_tree['medias']\n for k, v in six.iteritems(m):\n ret[k] = media.Media(k, v)\n return ret",
"def getMediaFileList(path):\n\n fileTypes = (\"jpg\", \"mov\", \"mp4\")\n fileList = []\n for base_dir, dirs, files in os.walk(path):\n fileList.extend([os.path.join(base_dir, f) for f in files if f.split(\".\")[1].lower() in fileTypes])\n\n # for the new canon camera, ther are some .Trash and trashinfo files, want to ignore them\n fileList = [file for file in fileList if \"trash\" not in file and \"Trash\" not in file]\n return fileList",
"def show_medias():\n t0 = time.time()\n print(f\"--- {request}\")\n print(f\"--- {user_session}\")\n # Set context by owner and the data selections\n u_context = UserContext(user_session, current_user, request)\n # Which range of data is shown\n u_context.set_scope_from_request(request, \"media_scope\")\n u_context.count = 20\n\n with MediaReader(\"read\", u_context) as service:\n # datareader = MediaReader(readservice, u_context)\n res = service.read_my_media_list()\n\n if Status.has_failed(res, False):\n flash(f'{res.get(\"statustext\",\"error\")}', \"error\")\n medias = res.get(\"items\", [])\n\n stk_logger(u_context, f\"-> bp.scene.media.show_medias fw n={len(medias)}\")\n return render_template(\n \"/scene/medias.html\",\n medias=medias,\n user_context=u_context,\n elapsed=time.time() - t0,\n )",
"def get_queryset(self):\n queryset = MediaFile.objects.all()\n username = self.request.query_params.get('username', None)\n userqueryset = User.objects.all()\n users = userqueryset.filter(username=username)\n if len(users) and username is not None:\n queryset = queryset.filter(owner=users[0])\n return queryset"
] | [
"0.73105615",
"0.6960371",
"0.6935916",
"0.66127723",
"0.6541802",
"0.6517254",
"0.6499656",
"0.6441851",
"0.6408212",
"0.6389894",
"0.638803",
"0.63024044",
"0.6233786",
"0.61984754",
"0.6091993",
"0.6090939",
"0.6084087",
"0.6084087",
"0.6084087",
"0.6072663",
"0.60158885",
"0.6009924",
"0.6001205",
"0.5950537",
"0.59344536",
"0.5910259",
"0.59054",
"0.58875865",
"0.58375084",
"0.583307"
] | 0.7200995 | 1 |
Call function of matrix_calculator.py to return matrices for each cluster. Select 'requested_'+classification_column_transaction of weblog that are only in entry "classification_wanted_transaction" | def cluster_classification(weblog,classification_column_transaction,\
classification_column_diversity, session_data_threshold, cluster_type, classification_wanted_transaction, verbose = False):
if verbose== True:
start_time = timelib.time()
print("\n * Computing cluster matrices ...")
browsing_matrix = {}
diversifying_matrix = {}
# Selecting sessions from each cluster
for cluster_id in session_data_threshold[cluster_type].unique():
sessions_cluster = session_data_threshold[session_data_threshold[cluster_type]==cluster_id].session_id
divpat_log = weblog[weblog.session_id.isin(sessions_cluster)]
# Filtering some requests
divpat_log=divpat_log[divpat_log['requested_'+classification_column_transaction].isin(classification_wanted_transaction)]
divpat_log=divpat_log[divpat_log['referrer_'+classification_column_transaction].isin(classification_wanted_transaction)]
# Defining matrices
diversity_columns=('referrer_'+classification_column_diversity,'requested_'+classification_column_diversity)
browsing_matrix[cluster_id],_ = compute_browsing_matrix(divpat_log,'referrer_'+classification_column_transaction,'requested_'+classification_column_transaction,labels=classification_wanted_transaction)
diversifying_matrix[cluster_id],_ = compute_diversifying_matrix(divpat_log,'referrer_'+classification_column_transaction,'requested_'+classification_column_transaction,\
diversity_columns,labels = classification_wanted_transaction)
if verbose == True:
print(" Cluster matrices computed in %.1f seconds."%(timelib.time() - start_time))
return browsing_matrix, diversifying_matrix; | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def cluster_classification_tex(f,browsing_matrix,diversifying_matrix, weblog,session_data_threshold,cluster_type,classification_column_diversity,classification_wanted_transaction):\n divpat_classification_wanted_transaction = classification_wanted_transaction\n divpat_N_classification_wanted_transaction=len(divpat_classification_wanted_transaction)\n f.write(\"\\n% 6. Cluster Classification\")\n columns_latex = '|'+'c|'*len(session_data_threshold[cluster_type].unique())\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivColumnsLatex',columns_latex)) \n columns_blank = ' ' + '& '*(len(session_data_threshold[cluster_type].unique()) -1)\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivColumnsBlank',columns_blank)) \n cluster_list = []\n ieuc_clusters = []\n star_chain_like_clusters = []\n length_clusters = []\n browsing_pattern_1 = []\n browsing_pattern_2 = []\n browsing_pattern_3 = []\n diversifying_pattern_1 = []\n diversifying_pattern_2 = []\n diversifying_pattern_3 = []\n cluster_ids = session_data_threshold[cluster_type].unique()\n cluster_ids.sort()\n for cluster_id in cluster_ids:\n cluster_list.append(str(cluster_id))\n \n cluster_session_list=session_data_threshold[session_data_threshold[cluster_type]==cluster_id].session_id.values\n temp_cluster_weblog=weblog[weblog.session_id.isin(cluster_session_list)]\n pa,pa_names = proportional_abundance(temp_cluster_weblog,'requested_'+classification_column_diversity)\n cluster_entropy=ShannonEntropy(pa,normalize=True)\n \n ieuc_clusters.append(str(round(np.power(2.0,cluster_entropy),2)))\n star_chain_like_clusters.append(star_chain_str(session_data_threshold[session_data_threshold[cluster_type]==cluster_id].star_chain_like.mean()))\n length_clusters.append(length(session_data_threshold[session_data_threshold[cluster_type]==cluster_id].requests.mean()))\n # Browsing patterns\n r,c=np.unravel_index(browsing_matrix[cluster_id][:-1,:-1].argsort(axis=None)[::-1][:3],dims=(divpat_N_classification_wanted_transaction,divpat_N_classification_wanted_transaction))\n browsing_pattern_1.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*browsing_matrix[cluster_id][r[0],c[0]],divpat_classification_wanted_transaction[r[0]],divpat_classification_wanted_transaction[c[0]]))\n browsing_pattern_2.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*browsing_matrix[cluster_id][r[1],c[1]],divpat_classification_wanted_transaction[r[1]],divpat_classification_wanted_transaction[c[1]]))\n browsing_pattern_3.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*browsing_matrix[cluster_id][r[2],c[2]],divpat_classification_wanted_transaction[r[2]],divpat_classification_wanted_transaction[c[2]]))\n \n # Diversifying patterns\n r,c=np.unravel_index(np.nan_to_num(diversifying_matrix[cluster_id])[:-1,:-1].argsort(axis=None)[::-1][:3],dims=(divpat_N_classification_wanted_transaction,divpat_N_classification_wanted_transaction))\n diversifying_pattern_1.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*diversifying_matrix[cluster_id][r[0],c[0]],divpat_classification_wanted_transaction[r[0]],divpat_classification_wanted_transaction[c[0]]))\n diversifying_pattern_2.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*diversifying_matrix[cluster_id][r[1],c[1]],divpat_classification_wanted_transaction[r[1]],divpat_classification_wanted_transaction[c[1]]))\n diversifying_pattern_3.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*diversifying_matrix[cluster_id][r[2],c[2]],divpat_classification_wanted_transaction[r[2]],divpat_classification_wanted_transaction[c[2]]))\n\n del temp_cluster_weblog\n \n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivClusterList',' & '.join(cluster_list)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivIEUCClusters',' & '.join(ieuc_clusters)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('StarChainClusters',' & '.join(star_chain_like_clusters)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('LengthClusters',' & '.join(length_clusters)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('BrowsingPatternClustersOne',' & '.join(browsing_pattern_1)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('BrowsingPatternClustersTwo',' & '.join(browsing_pattern_2)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('BrowsingPatternClustersThree',' & '.join(browsing_pattern_3)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DiversifyingPatternClustersOne',' & '.join(diversifying_pattern_1)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DiversifyingPatternClustersTwo',' & '.join(diversifying_pattern_2)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DiversifyingPatternClustersThree',' & '.join(diversifying_pattern_3)))\n\n return f;",
"def overall_classification_matrix(self, interpreter):\n return sum([ r.classification_matrix(interpreter) for r in self.results ])",
"def calc_skill_cluster_sets(blocked_days, GTD, GTD_seas, persis_thresh, SOM_nodes, blocks_one_clusnum, skill_str, seas):\r\n prec_arr, recall_arr, F1_arr, clus_num_arr = [], [], [], []\r\n\r\n prec_vals = sorted(np.unique(blocks_one_clusnum[skill_str].values), reverse = True)\r\n #loop through first element separately so that subsequent values can be appended\r\n node_cluster_set_test_str, ds_arr = [], []\r\n for prec in prec_vals:\r\n node_cluster_set_test_str_app = blocks_one_clusnum['set'][np.where(blocks_one_clusnum[skill_str]==prec)[0]].values\r\n for clus in node_cluster_set_test_str_app:\r\n #add cluster to cluster set\r\n node_cluster_set_test_str = np.append(node_cluster_set_test_str, clus)\r\n node_cluster_set_test_str = np.unique(node_cluster_set_test_str)\r\n node_num = len(node_cluster_set_test_str) # number of nodes in cluster set\r\n clus_num_arr.append(node_num)\r\n #calculate skill score of cluster set by calculating the number of days blocked from the GTD and selecting the season\r\n blocked_days_clus = calc_blocked_days_clus(blocked_days, persis_thresh, SOM_nodes, node_cluster_set_test_str)\r\n blocked_days_clus_xr = xr.DataArray(blocked_days_clus, name = \"blocking\", dims={\"time\": GTD['time']})\r\n blocked_days_clus_xr['time'] = GTD['time']\r\n blocked_days_clus_sel = blocked_days_clus_xr.sel(time=np.isin(blocked_days_clus_xr['time.season'], seas))\r\n prec, recall, F1 = calc_pr_rc_F1(GTD_seas, blocked_days_clus_sel)\r\n prec_arr.append(prec)\r\n recall_arr.append(recall)\r\n F1_arr.append(F1)\r\n\r\n return clus_num_arr, prec_arr, recall_arr, F1_arr",
"def get_communities(num_of_neighbors, is_self_loops, relevant_period_groups, full_confusion_csv, classes_csv_file, priod_group_column, similarty_csv = ''):\n\n # generate class_names dict\n cnt = 0\n class_name_dict = {}\n with open(classes_csv_file, 'r') as f:\n reader = csv.reader(f)\n for row in reader:\n if cnt > 0:\n class_name_dict[int(row[8])] = row[1]\n cnt = cnt + 1\n\n\n full_conf = np.genfromtxt(full_confusion_csv, delimiter=',')\n relevant_conf = full_conf[:,:num_of_neighbors+1]\n flatten_conf = np.zeros((relevant_conf.shape[0]*num_of_neighbors,2), dtype=np.int32)\n if similarty_csv != '':\n similarity_mat = np.genfromtxt(similarty_csv, delimiter=',')\n similarity_conf_mat = np.zeros((200, 200), dtype=np.float32)\n print(similarity_mat.shape)\n\n\n row = 0\n for k in range(relevant_conf.shape[0]):\n for m in range(num_of_neighbors):\n flatten_conf[row, 0] = relevant_conf[k,0]\n flatten_conf[row,1] = relevant_conf[k,m+1]\n if similarty_csv != '':\n similarity_conf_mat[int(relevant_conf[k,0]), int(relevant_conf[k,m+1]) ] += similarity_mat[k, m]\n\n row = row + 1\n\n confusion_mat = confusion_matrix(flatten_conf[:,0], flatten_conf[:,1])\n if similarty_csv != '':\n confusion_mat = similarity_conf_mat\n\n confusion_mat = confusion_mat.astype('float') / confusion_mat.sum(axis=1)[:, np.newaxis]\n symmetric_confusion = (confusion_mat + np.transpose(confusion_mat)) / 2\n if not is_self_loops:\n np.fill_diagonal(symmetric_confusion, 0)\n\n # taking only the relevant classes\n if relevant_period_groups != -1:\n df = pd.read_csv(classes_csv_file)\n period_groups = df[priod_group_column]\n relevant_classes = []\n for group in relevant_period_groups:\n group_slice = df[period_groups == group]\n relevant_classes.extend(group_slice['id_period_sorted'].values)\n\n L = len(relevant_classes)\n relevant_confusion = np.zeros((L,L), dtype=np.float32)\n class_node_dict = {}\n for m,cls_i in enumerate(relevant_classes):\n class_node_dict[m] = cls_i\n for n,cls_j in enumerate(relevant_classes):\n relevant_confusion[m,n] = symmetric_confusion[cls_i,cls_j]\n else:\n relevant_confusion = symmetric_confusion\n\n G = nx.from_numpy_matrix(relevant_confusion)\n\n # find best communities based on modularity grade\n resolution_vec = np.linspace(0.0,2,50)\n mod_vec = np.zeros_like(resolution_vec)\n best_modularity = -1\n best_communities = -1\n best_res = -1\n for k in range(resolution_vec.size):\n partition = community.best_partition(G, weight='weight', resolution=resolution_vec[k])\n modularity = community.modularity(partition, G, weight='weight')\n mod_vec[k] = modularity\n if (modularity > best_modularity):\n best_modularity = modularity\n best_communities = partition\n best_res = resolution_vec[k]\n\n summary_str = 'best resolution: %.3f\\nbest modularity: %.3f\\nnumber of communities: %d' % (best_res,best_modularity,len(set(best_communities.values())))\n\n #plt.plot(resolution_vec,mod_vec)\n #plt.show()\n\n # generate community summary file\n count = 0\n strr = ''\n summary_file_name = 'community_summary.csv'\n for com in set(best_communities.values()):\n count += 1.\n list_nodes = [nodes for nodes in best_communities.keys() if best_communities[nodes] == com]\n strr += 'community,' + str(com) + '\\n'\n for nd in list_nodes:\n if relevant_period_groups == -1:\n strr += class_name_dict[nd] + ',id,' + str(nd) + '\\n'\n else:\n strr += class_name_dict[class_node_dict[nd]] + ',id,' + str(class_node_dict[nd]) + '\\n'\n strr += '\\n'\n with open(summary_file_name, \"w\") as text_file:\n text_file.write(strr)\n\n print(strr)\n # summary for map visualization tool\n strr = ''\n for k in range(relevant_confusion.shape[0]):\n comm = partition[k]\n comm_members = [nodes for nodes in partition.keys() if partition[nodes] == comm]\n if relevant_period_groups == -1:\n strr += 'id,' + str(k) + ',community,' + str(comm) + ',community_members,'\n else:\n strr += 'id,' + str(class_node_dict[k]) + ',community,' + str(comm) + ',community_members,'\n for member in comm_members:\n if relevant_period_groups == -1:\n strr += str(member) + ','\n else:\n strr += str(class_node_dict[member]) + ','\n strr += '\\n'\n with open('nodes_communities.csv', \"w\") as text_file:\n text_file.write(strr)\n\n\n\n return summary_str",
"def matrix_of_changes():\n\tdrivers = ['PC','WCD']\n\ttasks = ['WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL','REST']\n\tproject='hcp'\n\tatlas = 'power'\n\tknown_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)\n\tfor driver in drivers:\n\t\tall_matrices = []\n\t\tviolin_df = pd.DataFrame()\n\t\tfor task in tasks:\n\t\t\t# subjects = np.array(hcp_subjects).copy()\n\t\t\t# subjects = list(subjects)\n\t\t\t# subjects = remove_missing_subjects(subjects,task,atlas)\n\t\t\tsubjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %('hcp',task,atlas))\n\t\t\tassert (subjects == np.load('/home/despoB/mb3152/dynamic_mod/results/hcp_%s_%s_subs_fz.npy'%(task,atlas))).all()\n\t\t\tstatic_results = graph_metrics(subjects,task,atlas)\n\t\t\tsubject_pcs = static_results['subject_pcs']\n\t\t\tsubject_mods = static_results['subject_mods']\n\t\t\tsubject_wmds = static_results['subject_wmds']\n\t\t\tmatrices = static_results['matrices']\n\t\t\ttask_perf = task_performance(subjects,task)\n\t\t\tassert subject_pcs.shape[0] == len(subjects)\n\t\t\tmean_pc = np.nanmean(subject_pcs,axis=0)\n\t\t\tmean_wmd = np.nanmean(subject_wmds,axis=0)\n\t\t\tmod_pc_corr = np.zeros(subject_pcs.shape[1])\n\t\t\tfor i in range(subject_pcs.shape[1]):\n\t\t\t\tmod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]\n\t\t\tmod_wmd_corr = np.zeros(subject_wmds.shape[1])\n\t\t\tfor i in range(subject_wmds.shape[1]):\n\t\t\t\tmod_wmd_corr[i] = nan_pearsonr(subject_mods,subject_wmds[:,i])[0]\n\t\t\tif driver == 'PC':\n\t\t\t\tpredict_nodes = np.where(mod_pc_corr>0.0)[0]\n\t\t\t\tlocal_predict_nodes = np.where(mod_pc_corr<0.0)[0]\n\t\t\t\tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_pcs,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_pc_edge_corr_z.npy' %(project,task,atlas)))\n\t\t\telse:\t\t\n\t\t\t\tpredict_nodes = np.where(mod_wmd_corr>0.0)[0]\n\t\t\t\tlocal_predict_nodes = np.where(mod_wmd_corr<0.0)[0]\n\t\t\t\tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_wmds,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_wmd_edge_corr_z.npy' %(project,task,atlas)))\n\t\t\t# Plot matrix of changes\n\t\t\tedge_thresh = 75\n\t\t\tedge_thresh = np.percentile(np.nanmean(matrices,axis=0),edge_thresh)\n\t\t\tpc_edge_corr[:,np.nanmean(matrices,axis=0)<edge_thresh,] = np.nan\n\t\t\thigh_pc_edge_matrix = np.nanmean(pc_edge_corr[predict_nodes],axis=0)\n\t\t\tlow_pc_edge_matrix = np.nanmean(pc_edge_corr[local_predict_nodes],axis=0)\n\t\t\tmatrix = (np.tril(low_pc_edge_matrix) + np.triu(high_pc_edge_matrix)).reshape((264,264))\n\t\t\tplot_matrix = matrix.copy()\n\t\t\tplot_matrix_mask = np.isnan(plot_matrix)\n\t\t\tzscores = scipy.stats.zscore(plot_matrix[plot_matrix_mask==False].reshape(-1))\n\t\t\tplot_matrix[plot_matrix_mask==False] = zscores\n\t\t\tif task != 'REST':\n\t\t\t\tall_matrices.append(plot_matrix)\n\t\t\tplot_corr_matrix(plot_matrix,network_names.copy(),out_file='/home/despoB/mb3152/dynamic_mod/figures/%s_corr_matrix_%s.pdf'%(driver,task),plot_corr=False,return_array=False)\n\n\t\t\tpc_edge_corr[np.isnan(pc_edge_corr)] = 0.0\n\t\t\tconnector_within_network_mask = pc_edge_corr.copy().astype(bool)\n\t\t\tlocal_within_network_mask = pc_edge_corr.copy().astype(bool)\n\t\t\tconnector_between_network_mask = pc_edge_corr.copy().astype(bool)\n\t\t\tlocal_between_network_mask = pc_edge_corr.copy().astype(bool)\n\t\t\tconnector_within_network_mask[:,:,:] = False\n\t\t\tlocal_within_network_mask[:,:,:] = False\n\t\t\tconnector_between_network_mask[:,:,:] = False\n\t\t\tlocal_between_network_mask[:,:,:] = False\n\t\t\t\n\t\t\tfor n in predict_nodes:\n\t\t\t\tfor node1,node2 in combinations(range(264),2):\n\t\t\t\t\tif n == node1:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif n == node2:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[node1] == 0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[node2] == 0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[node1] == known_membership[node2]:\n\t\t\t\t\t\tconnector_within_network_mask[n][node1,node2] = True\n\t\t\t\t\t\tconnector_within_network_mask[n][node2,node1] = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tconnector_between_network_mask[n][node1,node2] = True\n\t\t\t\t\t\tconnector_between_network_mask[n][node2,node1] = True\n\n\t\t\tfor n in local_predict_nodes:\n\t\t\t\tfor node1,node2 in combinations(range(264),2):\n\t\t\t\t\tif n == node1:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif n == node2:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[node1] == 0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[node2] == 0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[node1] == known_membership[node2]:\n\t\t\t\t\t\tlocal_within_network_mask[n][node1,node2] = True\n\t\t\t\t\t\tlocal_within_network_mask[n][node2,node1] = True\n\t\t\t\t\telse:\n\t\t\t\t\t\tlocal_between_network_mask[n][node1,node2] = True\n\t\t\t\t\t\tlocal_between_network_mask[n][node2,node1] = True\n\n\t\t\tdef make_strs_for_df(array_to_add,str_to_add):\n\t\t\t\tarray_len = len(array_to_add)\n\t\t\t\tstr_array_ = np.chararray(array_len,itemsize=40)\n\t\t\t\tstr_array_[:] = str_to_add\n\t\t\t\treturn str_array_\n\t\t\t\n\t\t\tdef make_array_for_df(arrays_to_add):\n\t\t\t\tappend_array = np.zeros((len(arrays_to_add[0]),len(arrays_to_add))).astype(str)\n\t\t\t\tappend_array[:,0] = arrays_to_add[0]\n\t\t\t\tappend_array[:,1] = arrays_to_add[1]\n\t\t\t\tappend_array[:,2] = arrays_to_add[2]\n\t\t\t\treturn append_array\n\n\t\t\tviolin_columns = [\"r value, node i's PCs and j's edge weights\",\"Node Type\",\"Edge Type\"]\n\t\t\ttask_violin_df = pd.DataFrame(columns=violin_columns)\n\t\t\tresult_array_to_add = pc_edge_corr[connector_within_network_mask].reshape(-1)[pc_edge_corr[connector_within_network_mask].reshape(-1)!=0.0]\n\t\t\tedge_type_ = make_strs_for_df(result_array_to_add,'Within Community')\n\t\t\tnode_type_ = make_strs_for_df(result_array_to_add,'Q+')\n\t\t\tdf_array_to_add = make_array_for_df([result_array_to_add,node_type_,edge_type_])\n\t\t\ttask_violin_df = task_violin_df.append(pd.DataFrame(data=df_array_to_add,columns=violin_columns),ignore_index=True)\n\n\t\t\tresult_array_to_add = pc_edge_corr[local_within_network_mask].reshape(-1)[pc_edge_corr[local_within_network_mask].reshape(-1)!=0.0]\n\t\t\tedge_type_ = make_strs_for_df(result_array_to_add,'Within Community')\n\t\t\tnode_type_ = make_strs_for_df(result_array_to_add,'Q-')\n\t\t\tdf_array_to_add = make_array_for_df([result_array_to_add,node_type_,edge_type_])\n\t\t\ttask_violin_df = task_violin_df.append(pd.DataFrame(data=df_array_to_add,columns=violin_columns),ignore_index=True)\n\n\t\t\tresult_array_to_add = pc_edge_corr[connector_between_network_mask].reshape(-1)[pc_edge_corr[connector_between_network_mask].reshape(-1)!=0.0]\n\t\t\tedge_type_ = make_strs_for_df(result_array_to_add,'Between Community')\n\t\t\tnode_type_ = make_strs_for_df(result_array_to_add,'Q+')\n\t\t\tdf_array_to_add = make_array_for_df([result_array_to_add,node_type_,edge_type_])\n\t\t\ttask_violin_df = task_violin_df.append(pd.DataFrame(data=df_array_to_add,columns=violin_columns),ignore_index=True)\n\n\t\t\tresult_array_to_add = pc_edge_corr[local_between_network_mask].reshape(-1)[pc_edge_corr[local_between_network_mask].reshape(-1)!=0.0]\n\t\t\tedge_type_ = make_strs_for_df(result_array_to_add,'Between Community')\n\t\t\tnode_type_ = make_strs_for_df(result_array_to_add,'Q-')\n\t\t\tdf_array_to_add = make_array_for_df([result_array_to_add,node_type_,edge_type_])\n\t\t\ttask_violin_df = task_violin_df.append(pd.DataFrame(data=df_array_to_add,columns=violin_columns),ignore_index=True)\n\t\t\ttask_violin_df[\"r value, node i's PCs and j's edge weights\"] = task_violin_df[\"r value, node i's PCs and j's edge weights\"].astype(float)\n\t\t\tif driver == 'PC':\n\t\t\t\tprint task + ', Connector Hubs(Q+): ' + str(scipy.stats.ttest_ind(task_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q+'][task_violin_df['Edge Type']=='Within Community'],\n\t\t\t\t\ttask_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q+'][task_violin_df['Edge Type']=='Between Community']))\n\t\t\t\tprint task + ', Non-Connector Hubs(Q-): ' + str(scipy.stats.ttest_ind(task_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q-'][task_violin_df['Edge Type']=='Within Community'],\n\t\t\t\t\ttask_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q-'][task_violin_df['Edge Type']=='Between Community']))\n\t\t\telse:\n\t\t\t\tprint task + ', Local Hubs(Q+): ' + str(scipy.stats.ttest_ind(task_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q+'][task_violin_df['Edge Type']=='Within Community'],\n\t\t\t\t\ttask_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q+'][task_violin_df['Edge Type']=='Between Community']))\n\t\t\t\tprint task + ', Non Local Hubs (Q-): ' + str(scipy.stats.ttest_ind(task_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q-'][task_violin_df['Edge Type']=='Within Community'],\n\t\t\t\t\ttask_violin_df[\"r value, node i's PCs and j's edge weights\"][task_violin_df['Node Type']=='Q-'][task_violin_df['Edge Type']=='Between Community']))\n\t\t\t#append for average of all\n\t\t\tviolin_df = violin_df.append(pd.DataFrame(data=task_violin_df,columns=violin_columns),ignore_index=True)\n\t\t\t#Figure for single Task\n\t\t\tsns.set_style(\"white\")\n\t\t\tsns.set_style(\"ticks\")\n\t\t\tcolors = sns.color_palette(['#fdfd96','#C4D8E2'])\n\t\t\twith sns.plotting_context(\"paper\",font_scale=2):\n\t\t\t\tplt.figure(figsize=(24,16))\n\t\t\t\tsns.boxplot(x=\"Node Type\", y=\"r value, node i's PCs and j's edge weights\", hue=\"Edge Type\", order=['Q+','Q-'], data=task_violin_df)\n\t\t\t\tplt.savefig('/home/despoB/mb3152/dynamic_mod/figures/%s_edge_mod_%s.pdf'%(driver,task),dpi=4600)\n\t\t\t\tplt.close()\n\t\t# Average of All\n\t\tplot_corr_matrix(np.nanmean(all_matrices,axis=0),network_names.copy(),out_file='/home/despoB/mb3152/dynamic_mod/figures/%s_corr_matrix_avg.pdf'%(driver),plot_corr=False,return_array=False)\n\t\tif driver == 'PC':\n\t\t\tprint task + ',Connector Hubs(Q+): ' + str(scipy.stats.ttest_ind(violin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q+'][violin_df['Edge Type']=='Within Community'],\n\t\t\t\tviolin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q+'][violin_df['Edge Type']=='Between Community']))\n\t\t\tprint task + ', Non-Connector Hubs(Q-): ' + str(scipy.stats.ttest_ind(violin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q-'][violin_df['Edge Type']=='Within Community'],\n\t\t\t\tviolin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q-'][violin_df['Edge Type']=='Between Community']))\n\t\telse:\n\t\t\tprint task + ', Local Hubs(Q+): ' + str(scipy.stats.ttest_ind(violin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q+'][violin_df['Edge Type']=='Within Community'],\n\t\t\t\tviolin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q+'][violin_df['Edge Type']=='Between Community']))\n\t\t\tprint task + ', Non-Local Hubs(Q-): ' + str(scipy.stats.ttest_ind(violin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q-'][violin_df['Edge Type']=='Within Community'],\n\t\t\t\tviolin_df[\"r value, node i's PCs and j's edge weights\"][violin_df['Node Type']=='Q-'][violin_df['Edge Type']=='Between Community']))\n\t\tsns.set_style(\"white\")\n\t\tsns.set_style(\"ticks\")\n\t\tcolors = sns.color_palette(['#fdfd96','#C4D8E2'])\n\t\twith sns.plotting_context(\"paper\",font_scale=3):\n\t\t\tplt.figure(figsize=(24,16))\n\t\t\tsns.boxplot(x=\"Node Type\", y=\"r value, node i's PCs and j's edge weights\",hue=\"Edge Type\", palette=colors,order=['Q+','Q-'], data=violin_df)\n\t\t\tplt.savefig('/home/despoB/mb3152/dynamic_mod/figures/%s_edge_mod_avg.pdf'%(driver),dpi=4600)\n\t\t\tplt.close()",
"def calc_skill_clusters(blocked_days, GTD, GTD_seas, persis_thresh, SOM_nodes, SOM_clusters_block, seas):\r\n ds_arr_ones = []\r\n for clus in SOM_clusters_block:\r\n node_cluster_set_test = [clus]\r\n node_cluster_set_test_str = [str(clus).replace(',', '') for clus in node_cluster_set_test]\r\n #calculate the blocked days which the new cluster determines\r\n blocked_days_clus = calc_blocked_days_clus(blocked_days, persis_thresh, SOM_nodes, node_cluster_set_test_str)\r\n #define as DataArray and select JJA to remove the extended days included for classifying blocks\r\n blocked_days_clus_xr = xr.DataArray(blocked_days_clus, name = \"blocking\", dims={\"time\": GTD['time']})\r\n blocked_days_clus_xr['time'] = GTD['time']\r\n blocked_days_clus_seas = blocked_days_clus_xr.sel(time=np.isin(blocked_days_clus_xr['time.season'], seas))\r\n prec, recall, F1 = calc_pr_rc_F1(GTD_seas, blocked_days_clus_seas)\r\n #calculate precision, recall and F1\r\n if len(str(node_cluster_set_test)) == 1:\r\n comb_str = f\"{node_cluster_set_test[0]}\".replace(\"'\", \"\")\r\n else:\r\n comb_str = f\"{str(node_cluster_set_test)[1:-1]}\".replace(\"'\", \"\") \r\n ds=xr.Dataset({'precision': prec, 'recall': recall, 'F1': F1, 'clus_num': int(len(node_cluster_set_test)), 'set': str(comb_str)})\r\n ds_arr_ones.append(ds)\r\n blocks_one_clusnum = xr.concat(ds_arr_ones, dim = \"set\")\r\n return blocks_one_clusnum",
"def run_classification_experiment ( feature_matrix, target_array, colmap ):\n np.random.seed ( 7062020 ) # Due date\n\n # Split off validation set and cross-validation set\n X_validation = feature_matrix [ : feature_matrix.shape [ 0 ] // 10 ]\n X_cross_validation = feature_matrix [ feature_matrix.shape [ 0 ] // 10 : ]\n y_validation = target_array [ : feature_matrix.shape [ 0 ] // 10 ]\n y_cross_validation = target_array [ feature_matrix.shape [ 0 ] // 10 : ]\n\n experiment_results = {}\n experiment_num = 1\n\n # Use 5-Fold stratified CV\n kfold_strat = KFoldStratifiedCV ( number_of_folds = 5, shuffle = True )\n\n for train, test in kfold_strat.split ( feature_matrix = X_cross_validation, target_array = y_cross_validation ):\n logger.info ( f\"Experiment Number: { experiment_num }\" )\n\n # Get training set\n X_train = X_cross_validation [ train, : ]\n y_train = y_cross_validation [ train ]\n\n # Fit the tree\n d_tree = DecisionTreeClassifier ( evaluate_function = entropy, map_column_node_type = colmap )\n d_tree.fit ( X_train, y_train )\n\n # Prune the tree\n pruned_tree = PostPruner (\n d_tree,\n X_validation = X_validation,\n y_validation = y_validation,\n evaluate_function = accuracy,\n ).prune_tree()\n\n # Get post-pruned predictions\n pruned_preds = pruned_tree.predict ( X_cross_validation [ test, : ] )\n\n # Save the results\n experiment_results [ experiment_num ] = {\n \"actuals\": y_cross_validation [ test ],\n \"preds\": pruned_preds,\n \"model\": pruned_tree,\n }\n experiment_num += 1\n\n return experiment_results\n # End run_classification_experiment",
"def mapRev2Cluster(self):\n\n # For each condition, operating on the side effect matching file to reduce down into\n # the more general categories\n clusterMapping = pd.read_csv('ClusteredSideEffects.csv', sep='$', index_col=0)\n for condition in self.conditions:\n print(\"I'm working on {:s}\".format(condition))\n files = glob.glob('ReviewsMatched2SideEffects/{:s}*csv'.format(condition))\n files = np.sort(files)\n\n for i,f in enumerate(files):\n df = pd.read_csv(f, sep='$', index_col=0)\n\n for cluster in np.unique(clusterMapping['Cluster']):\n # Finding the relevant SEs for the cluster\n SEs = clusterMapping[clusterMapping['Cluster'].eq(cluster)]['Side effect']\n\n # Summing across all those SEs in the dataframe and creating a new column\n match = [SE for SE in SEs if SE in df.columns]\n df[cluster] = (df[match].sum(axis=1) > 0)\n \n if not match:\n df[cluster] = [0]*len(df)\n \n # Stacking to allow for the depression split\n if i == 0:\n master_df = df.copy()\n else:\n master_df = master_df.append(df, ignore_index=0, sort=False)\n\n\n # Dropping all columns not in clusters\n clusters = list(np.unique(clusterMapping['Cluster']))\n keepers = ['Medication','Positive polarity','Negative polarity','Medication mentions','Effectiveness']\n keepers += clusters\n master_df = master_df[keepers]\n \n # Writing the stack to a file to load on to AWS\n master_df.to_csv('FinalProcessedReviews/{:s}_processed.csv'.format(condition), sep='$')\n print(\"I've saved the clustered file\\n\")",
"def create_sparse_matrix(self, matrix_df):\n\n print('creating sparse matrix...')\n sparse_seg_tmp_df = matrix_df.groupby(['segment_id','day_of_week','time_idx'])[self.args['cluster_variable']].mean().reset_index()\n sparse_rt_tmp_df = matrix_df.groupby(['road_type','day_of_week','time_idx'])[self.args['cluster_variable']].mean().reset_index()\n time_seg_df = sparse_seg_tmp_df.groupby(['day_of_week','time_idx'])[self.args['cluster_variable']].mean().reset_index()\n time_rt_df = sparse_rt_tmp_df.groupby(['day_of_week','time_idx'])[self.args['cluster_variable']].mean().reset_index()\n #time_seg_df['time_id'] = time_seg_df.index\n #time_rt_df['time_id'] = time_rt_df.index\n times = list(range(24*60/self.args['time_resolution']))\n full_time_idx = pd.DataFrame([i * 30 for i in times],columns = ['time_idx'])\n full_time_idx['key'] = 1\n full_day_of_week = pd.DataFrame(list(range(7)), columns = ['day_of_week'])\n full_day_of_week['key'] = 1\n full_times = pd.merge(full_time_idx, full_day_of_week, on='key')\n full_times['time_id'] = full_times.index\n time_seg_df = pd.merge(time_seg_df, full_times[['time_idx','day_of_week','time_id']], on=['time_idx','day_of_week'])\n time_rt_df = pd.merge(time_rt_df, full_times[['time_idx','day_of_week','time_id']], on=['time_idx','day_of_week'])\n \n matrix_seg_keys_df = pd.merge(sparse_seg_tmp_df, time_seg_df[['time_id','day_of_week','time_idx']], how='left', on=['day_of_week','time_idx'])\n matrix_rt_keys_df = pd.merge(sparse_rt_tmp_df, time_rt_df[['time_id','day_of_week','time_idx']], how='left', on=['day_of_week','time_idx'])\n\n time_seg_array = np.array(matrix_seg_keys_df['time_id'])\n time_rt_array = np.array(matrix_rt_keys_df['time_id'])\n segment_array = np.array(matrix_seg_keys_df['segment_id'])\n rt_array = np.array(matrix_rt_keys_df['road_type'])\n\n uniquesegments = np.array(list(set(segment_array)))\n uniquerts = np.array(list(set(rt_array)))\n keyuniquesegments = np.array(range(len(uniquesegments)))\n keyuniquerts = np.array(range(len(uniquerts)))\n uniquesegments_df = pd.DataFrame({'segmentskey':keyuniquesegments, 'segment_id':uniquesegments})\n uniquerts_df = pd.DataFrame({'roadtypekey':keyuniquerts, 'road_type':uniquerts})\n\n segments_df = pd.DataFrame(segment_array, columns = ['segment_id'])\n rt_df = pd.DataFrame(rt_array, columns = ['road_type'])\n segments_keys_df = pd.merge(segments_df, uniquesegments_df, how='left', on=['segment_id'])\n rt_keys_df = pd.merge(rt_df, uniquerts_df, how='left', on=['road_type'])\n segmentkeys = np.array(segments_keys_df['segmentskey'])\n rtkeys = np.array(rt_keys_df['road_type'])\n\n level_array_seg = np.array(matrix_seg_keys_df['level_max'])\n sparse_matrix_s = csr_matrix((level_array_seg, (segmentkeys,time_seg_array))).toarray()\n sparse_matrix_seg = preprocessing.scale(sparse_matrix_s)\n level_array_rt = np.array(matrix_rt_keys_df['level_max'])\n sparse_matrix_r = csr_matrix((level_array_rt, (rtkeys,time_rt_array))).toarray()\n sparse_matrix_rt = preprocessing.scale(sparse_matrix_r)\n \n if self.args['perform_pca']:\n sparse_matrix_seg, self.pca_model = self.run_PCA(sparse_matrix_seg)\n sparse_matrix_rt, self.pca_model = self.run_PCA(sparse_matrix_rt)\n else:\n sparse_matrix_seg = sparse_matrix_seg\n sparse_matrix_rt = sparse_matrix_rt\n \n sparse_matrix_withsegkey = pd.DataFrame(sparse_matrix_seg)\n sparse_matrix_withrtkey = pd.DataFrame(sparse_matrix_rt)\n sparse_matrix_withsegkey['segmentskey'] = sparse_matrix_withsegkey.index\n sparse_matrix_withseg = pd.merge(uniquesegments_df, sparse_matrix_withsegkey, on=['segmentskey'])\n sparse_matrix_withrtkey['roadtypekey'] = sparse_matrix_withrtkey.index\n sparse_matrix_withrt = pd.merge(uniquerts_df, sparse_matrix_withrtkey, on=['roadtypekey'])\n \n # write sparse_matrix to database as 'clustering' table\n print('writing sparse matrix to db...')\n sqlalchemy_conn_str = open('../conf/sqlalchemy_conn_str.txt', 'r').read()\n engine = create_engine(sqlalchemy_conn_str)\n if self.split_type == 'random':\n sparse_matrix_withseg.to_sql(name='clust_sparse_avebysegment_random', con=engine, if_exists='replace')\n sparse_matrix_withrt.to_sql(name='clust_sparse_avebyrt_random', con=engine, if_exists='replace')\n elif self.split_type == 'date':\n sparse_matrix_withseg.to_sql(name='clust_sparse_avebysegment_date', con=engine, if_exists='replace')\n sparse_matrix_withrt.to_sql(name='clust_sparse_avebyrt_date', con=engine, if_exists='replace')\n \n print('returning train sparse matrix...')\n return (uniquesegments_df, sparse_matrix_seg)",
"def costMatrix(row_feats, col_feats, row_labels, col_labels, metric=\"Pearson\"):\n\n # Get unique label values in non-moving and moving brain\n row_labs = np.asarray(list(set(row_labels).difference({-1, 0})))\n col_labs = np.asarray(list(set(col_labels).difference({-1, 0})))\n\n # Initialize cost matrix\n costMatrix = np.zeros((len(row_labs), len(col_labs)))\n print(costMatrix.shape)\n\n # Compute pairwise costs between all label sets\n for i, r in enumerate(row_labs):\n indr = np.where(row_labels == r)[0]\n lr = len(indr)\n\n if metric in [\"Spearman\",\"Euclidean\",\"Pearson\"]:\n featr = row_feats[indr, :]\n\n for j, c in enumerate(col_labs):\n indc = np.where(col_labels == c)[0]\n \n if metric in [\"Spearman\",\"Euclidean\",\"Pearson\"]:\n featc = col_feats[indc, :]\n\n if metric == \"Spearman\":\n [rVal, _] = spearmanr(featr, featc, axis=1)\n rVal = 1-rVal[lr:, 0:lr]\n\n elif metric == \"Pearson\":\n rVal = cdist(featr, featc, metric='Correlation').mean()\n\n elif metric == \"Euclidean\":\n rVal = cdist(featr, featc).mean()\n\n elif metric == \"Dice\":\n rVal = 1-hmg.dice(indr, indc)\n\n elif metric == \"EMD\":\n rmu = row_feats[indr, :].mean(0)\n rmu = rmu/rmu.sum()\n\n cmu = col_feats[indc, :].mean(0)\n cmu = cmu/cmu.sum()\n\n rVal = emd(rmu, cmu)\n\n\n costMatrix[i, j] = rVal\n\n return [row_labs, col_labs, costMatrix]",
"def get_clusters(ensemble, grouping, clustering):\n\n\t# Prevent SQL injected since column names cannot be parameterized.\n\tif \";\" in ensemble or \";\" in grouping or \";\" in clustering:\n\t\treturn None\n\n\tensemble = ensemble.replace('EnsEns','Ens')\n\tdf = None;\n\n\tif grouping in ['annotation','cluster']:\n\t\tgroupingu = ensemble+\".\"+grouping+\"_\"+clustering\n\telif grouping in ['NeuN']:\n\t\tgroupingu = \"CONCAT('NeuN',cells.\"+grouping+\")\"\n\telse:\n\t\tgroupingu = \"cells.\"+grouping\n\n\t# Get methylation info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'snmC' as modality, \\\n\t\t%(groupingu)s as groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'groupingu': groupingu,\n\t\t\t\t\t'clustering': clustering}\n\ttry:\n\t\tdf = pd.read_sql(query, db.get_engine(current_app, 'methylation_data'))\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\t\t# return None\n\n\t# Get snATAC info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'snATAC' AS modality, %(ensemble)s.cluster_ATAC groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'grouping': grouping,\n\t\t\t\t\t'clustering': clustering}\n\n\ttry:\n\t\tdf_atac = pd.read_sql(query, db.get_engine(current_app, 'snATAC_data'))\n\t\tdf=df.append(df_atac)\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\n\n\t# Get snRNA info\n\tquery = \"SELECT count(cells.cell_id) ncells, 'RNA' AS modality, %(ensemble)s.cluster_RNA groups \\\n\t\tFROM cells \\\n\t\tINNER JOIN %(ensemble)s ON cells.cell_id = %(ensemble)s.cell_id \\\n\t\tGROUP BY groups \" % {'ensemble': ensemble,\n\t\t\t\t\t'grouping': grouping,\n\t\t\t\t\t'clustering': clustering}\n\n\ttry:\n\t\tdf_rna = pd.read_sql(query, db.get_engine(current_app, 'RNA_data'))\n\t\tdf=df.append(df_rna)\n\texcept exc.ProgrammingError as e:\n\t\tnow = datetime.datetime.now()\n\t\tprint(\"[{}] ERROR in app(get_clusters): {}\".format(str(now), e))\n\t\tsys.stdout.flush()\n\n\treturn df",
"def unit_cell_analysis(self):\n\n # Will not run clustering if only one integration result found or if turned off\n if not self.info.categories[\"integrated\"]:\n util.main_log(\n self.info.logfile, \"\\n\\n{:-^80}\\n\".format(\" UNIT CELL ANALYSIS \"), True\n )\n util.main_log(self.info.logfile, \"\\n UNIT CELL CANNOT BE DETERMINED!\", True)\n\n elif len(self.info.categories[\"integrated\"]) == 1:\n unit_cell = self.info.cluster_iterable[0][:5]\n point_group = self.info.cluster_iterable[0][6]\n util.main_log(\n self.info.logfile, \"\\n\\n{:-^80}\\n\".format(\" UNIT CELL ANALYSIS \"), True\n )\n uc_line = (\n \"{:<6} {:^4}: {:<6.2f}, {:<6.2f}, {:<6.2f}, {:<6.2f}, \"\n \"{:<6.2f}, {:<6.2f}\".format(\n \"(1)\",\n point_group,\n unit_cell[0],\n unit_cell[1],\n unit_cell[2],\n unit_cell[3],\n unit_cell[4],\n unit_cell[5],\n )\n )\n util.main_log(self.info.logfile, uc_line, True)\n\n self.info.best_pg = str(point_group)\n self.info.best_uc = unit_cell\n\n else:\n uc_table = []\n uc_summary = []\n\n if self.params.analysis.clustering.flag_on:\n # run hierarchical clustering analysis\n from xfel.clustering.cluster import Cluster\n\n counter = 0\n self.info.clusters = []\n\n threshold = self.params.analysis.clustering.threshold\n cluster_limit = self.params.analysis.clustering.limit\n final_pickles = self.info.categories[\"integrated\"][0]\n\n pickles = []\n if self.params.analysis.clustering.n_images:\n import random\n\n for i in range(len(self.params.analysis.clustering.n_images)):\n random_number = random.randrange(0, len(final_pickles))\n if final_pickles[random_number] in pickles:\n while final_pickles[random_number] in pickles:\n random_number = random.randrange(0, len(final_pickles))\n pickles.append(final_pickles[random_number])\n else:\n pickles = final_pickles\n\n # Cluster from files (slow, but will keep for now)\n ucs = Cluster.from_files(pickle_list=pickles)\n\n # Do clustering\n clusters, _ = ucs.ab_cluster(\n threshold=threshold,\n log=False,\n write_file_lists=False,\n schnell=False,\n doplot=False,\n )\n uc_table.append(\"\\n\\n{:-^80}\\n\" \"\".format(\" UNIT CELL ANALYSIS \"))\n\n # extract clustering info and add to summary output list\n if cluster_limit is None:\n if len(pickles) / 10 >= 10:\n cluster_limit = 10\n else:\n cluster_limit = len(pickles) / 10\n\n for cluster in clusters:\n sorted_pg_comp = sorted(\n cluster.pg_composition.items(), key=lambda x: -1 * x[1]\n )\n pg_nums = [pg[1] for pg in sorted_pg_comp]\n cons_pg = sorted_pg_comp[np.argmax(pg_nums)]\n\n if len(cluster.members) > cluster_limit:\n counter += 1\n\n # Write to file\n cluster_filenames = [j.path for j in cluster.members]\n if self.params.analysis.clustering.write_files:\n output_file = os.path.join(\n self.info.int_base, \"uc_cluster_{}.lst\".format(counter)\n )\n for fn in cluster_filenames:\n with open(output_file, \"a\") as scf:\n scf.write(\"{}\\n\".format(fn))\n\n mark_output = os.path.basename(output_file)\n else:\n mark_output = \"*\"\n output_file = None\n\n else:\n mark_output = \"\"\n output_file = None\n\n # Populate clustering info for GUI display\n uc_init = uctbx.unit_cell(cluster.medians)\n symmetry = crystal.symmetry(\n unit_cell=uc_init, space_group_symbol=\"P1\"\n )\n groups = metric_subgroups(input_symmetry=symmetry, max_delta=3)\n top_group = groups.result_groups[0]\n best_sg = str(groups.lattice_group_info()).split(\"(\")[0]\n best_uc = top_group[\"best_subsym\"].unit_cell().parameters()\n # best_sg = str(top_group['best_subsym'].space_group_info())\n\n uc_no_stdev = (\n \"{:<6.2f} {:<6.2f} {:<6.2f} \"\n \"{:<6.2f} {:<6.2f} {:<6.2f} \"\n \"\".format(\n best_uc[0],\n best_uc[1],\n best_uc[2],\n best_uc[3],\n best_uc[4],\n best_uc[5],\n )\n )\n cluster_info = {\n \"number\": len(cluster.members),\n \"pg\": best_sg,\n \"uc\": uc_no_stdev,\n \"filename\": mark_output,\n }\n self.info.clusters.append(cluster_info)\n\n # format and record output\n # TODO: How to propagate stdevs after conversion from Niggli?\n # uc_line = \"{:<6} {:^4}: {:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), \"\\\n # \"{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), \"\\\n # \"{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}) \"\\\n # \"{}\".format('({})'.format(len(cluster.members)), cons_pg[0],\n # cluster.medians[0], cluster.stdevs[0],\n # cluster.medians[1], cluster.stdevs[1],\n # cluster.medians[2], cluster.stdevs[2],\n # cluster.medians[3], cluster.stdevs[3],\n # cluster.medians[4], cluster.stdevs[4],\n # cluster.medians[5], cluster.stdevs[5],\n # mark_output)\n # uc_table.append(uc_line)\n uc_table.append(\n \"{:<6}: {} {}\".format(\n len(cluster.members), uc_no_stdev, mark_output\n )\n )\n lattices = \", \".join(\n [\"{} ({})\".format(i[0], i[1]) for i in sorted_pg_comp]\n )\n # uc_info = [len(cluster.members), cons_pg[0], cluster.medians,\n # output_file, uc_line, lattices]\n uc_info = [\n len(cluster.members),\n best_sg,\n best_uc,\n output_file,\n uc_no_stdev,\n lattices,\n ]\n uc_summary.append(uc_info)\n\n else:\n # generate average unit cell\n uc_table.append(\n \"\\n\\n{:-^80}\\n\" \"\".format(\" UNIT CELL AVERAGING (no clustering) \")\n )\n uc_a, uc_b, uc_c, uc_alpha, uc_beta, uc_gamma, uc_sg = list(\n zip(*self.info.cluster_iterable)\n )\n cons_pg = Counter(uc_sg).most_common(1)[0][0]\n all_pgs = Counter(uc_sg).most_common()\n unit_cell = (\n np.median(uc_a),\n np.median(uc_b),\n np.median(uc_c),\n np.median(uc_alpha),\n np.median(uc_beta),\n np.median(uc_gamma),\n )\n\n # Populate clustering info for GUI display\n uc_init = uctbx.unit_cell(unit_cell)\n symmetry = crystal.symmetry(unit_cell=uc_init, space_group_symbol=\"P1\")\n groups = metric_subgroups(input_symmetry=symmetry, max_delta=3)\n top_group = groups.result_groups[0]\n best_sg = str(groups.lattice_group_info()).split(\"(\")[0]\n best_uc = top_group[\"best_subsym\"].unit_cell().parameters()\n # best_sg = str(top_group['best_subsym'].space_group_info())\n\n uc_no_stdev = (\n \"{:<6.2f} {:<6.2f} {:<6.2f} \"\n \"{:<6.2f} {:<6.2f} {:<6.2f} \"\n \"\".format(\n best_uc[0],\n best_uc[1],\n best_uc[2],\n best_uc[3],\n best_uc[4],\n best_uc[5],\n )\n )\n cluster_info = {\n \"number\": len(self.info.cluster_iterable),\n \"pg\": best_sg,\n \"uc\": uc_no_stdev,\n \"filename\": None,\n }\n self.info.clusters.append(cluster_info)\n\n # uc_line = \"{:<6} {:^4}: {:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), \" \\\n # \"{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}), \" \\\n # \"{:<6.2f} ({:>5.2f}), {:<6.2f} ({:>5.2f}) \" \\\n # \"{}\".format('({})'.format(len(self.final_objects)), cons_pg,\n # np.median(uc_a), np.std(uc_a),\n # np.median(uc_b), np.std(uc_b),\n # np.median(uc_c), np.std(uc_c),\n # np.median(uc_alpha), np.std(uc_alpha),\n # np.median(uc_beta), np.std(uc_beta),\n # np.median(uc_gamma), np.std(uc_gamma), '')\n #\n # uc_table.append(uc_line)\n uc_table.append(uc_no_stdev)\n lattices = \", \".join([\"{} ({})\".format(i[0], i[1]) for i in all_pgs])\n # uc_info = [len(self.final_objects), cons_pg, unit_cell, None,\n # uc_line, lattices]\n uc_info = [\n len(self.info.cluster_iterable),\n best_sg,\n best_uc,\n None,\n uc_no_stdev,\n lattices,\n ]\n uc_summary.append(uc_info)\n\n uc_table.append(\"\\nMost common unit cell:\\n\")\n\n # select the most prevalent unit cell (most members in cluster)\n uc_freqs = [i[0] for i in uc_summary]\n uc_pick = uc_summary[np.argmax(uc_freqs)]\n uc_table.append(uc_pick[4])\n uc_table.append(\n \"\\nBravais Lattices in Biggest Cluster: {}\" \"\".format(uc_pick[5])\n )\n self.info.best_pg = str(uc_pick[1])\n self.info.best_uc = uc_pick[2]\n\n if uc_pick[3] is not None:\n self.prime_data_path = uc_pick[3]\n\n for item in uc_table:\n util.main_log(self.info.logfile, item, False)\n self.info.update(uc_table=uc_table)\n\n if self.gui_mode:\n return self.info.clusters",
"def plot_sim_matrix(\n clusterings: list, scoring: Callable[[object, object], object]\n) -> object:\n forDF = []\n for c in clusterings:\n cID = c.get_description()\n for c2 in clusterings:\n c2ID = c2.get_description()\n forDF.append([cID, c2ID, scoring(c, c2).score])\n df = pd.DataFrame(columns=[\"com1\", \"com2\", \"score\"], data=forDF)\n df = df.pivot(index=\"com1\", columns=\"com2\", values=\"score\")\n return sns.clustermap(df)",
"def confusionMatrixCalculation(node,fileName):\n attributeValues, classes, _ = readData(fileName)\n attributeValues = np.array(attributeValues)\n numberofClasses = 4\n\n confusionMatrix = []\n\n for _ in range(numberofClasses):\n confusionMatrix.append([])\n for _ in range(numberofClasses):\n confusionMatrix[-1].append(0)\n for val in range(attributeValues.shape[0]):\n result = classes[val]\n predicted_value = classify(attributeValues[val], node)\n confusionMatrix[int(predicted_value) - 1][int(result) - 1] += 1\n\n \n return confusionMatrix",
"def computeF1Score_delete(num_cluster, matching_algo, actual_clusters, threshold_algo, save_matrix=False):\r\n F1_score = np.zeros(num_cluster)\r\n for cluster in range(num_cluster):\r\n matched_cluster = matching_algo[cluster]\r\n true_matrix = actual_clusters[cluster]\r\n estimated_matrix = threshold_algo[matched_cluster]\r\n if save_matrix: np.savetxt(\"estimated_matrix_cluster=\" + str(\r\n cluster)+\".csv\", estimated_matrix, delimiter=\",\", fmt=\"%1.4f\")\r\n TP = 0\r\n TN = 0\r\n FP = 0\r\n FN = 0\r\n for i in range(num_stacked*n):\r\n for j in range(num_stacked*n):\r\n if estimated_matrix[i, j] == 1 and true_matrix[i, j] != 0:\r\n TP += 1.0\r\n elif estimated_matrix[i, j] == 0 and true_matrix[i, j] == 0:\r\n TN += 1.0\r\n elif estimated_matrix[i, j] == 1 and true_matrix[i, j] == 0:\r\n FP += 1.0\r\n else:\r\n FN += 1.0\r\n precision = (TP)/(TP + FP)\r\n print(\"cluster #\", cluster)\r\n print(\"TP,TN,FP,FN---------->\", (TP, TN, FP, FN))\r\n recall = TP/(TP + FN)\r\n f1 = (2*precision*recall)/(precision + recall)\r\n F1_score[cluster] = f1\r\n return F1_score",
"def cluster_matrix_average(M, cluster_assignments):\n\n# #TODO FIGURE OUT TEST FOR THIS FUNCTION\n# \n# ## from individual_group_clustered_maps(indiv_stability_list, clusters_G, roi_mask_file)\n# \n# indiv_stability_set = np.asarray([np.load(ism_file) for ism_file in indiv_stability_list])\n# #\n# \n# cluster_voxel_scores = np.zeros((nClusters, nSubjects, nVoxels))\n# for i in range(nSubjects):\n# cluster_voxel_scores[:,i] = utils.cluster_matrix_average(indiv_stability_set[i], clusters_G)\n# ##\n# \n \n\n if np.any(np.isnan(M)):\n #np.save('bad_M.npz', M)\n raise ValueError('M matrix has a nan value')\n\n cluster_ids = np.unique(cluster_assignments)\n vox_cluster_label = np.zeros((cluster_ids.shape[0], cluster_assignments.shape[0]), dtype='float64')\n s_idx = 0\n K_mask=np.zeros(M.shape)\n for cluster_id in cluster_ids:\n #import pdb;pdb.set_trace()\n vox_cluster_label[s_idx, :] = M[:,cluster_assignments == cluster_id].mean(1)\n \n \n \n k = (cluster_assignments == cluster_id)[:, np.newaxis]\n k=k*1\n print('Cluster %i size: %i' % (cluster_id, k.sum()))\n K = np.dot(k,k.T)\n K[np.diag_indices_from(K)] = False\n Ktemp=K*1\n K_mask=K_mask+Ktemp\n #import pdb;pdb.set_trace()\n if K.sum() == 0: # Voxel with its own cluster\n #import pdb; pdb.set_trace()\n vox_cluster_label[k[:,0]] = 0.0\n s_idx += 1\n else:\n Kbool=K.astype(bool)\n vox_cluster_label[s_idx,k[:,0].T] = M[Kbool].mean()\n s_idx += 1\n #import pdb; pdb.set_trace()\n return vox_cluster_label, K_mask",
"def get_umap_subsets(self, nn=100, md=0.1, **kwargs):\n # First get umap results:\n results = Table.read(\"../data/dimred_results/apogee_rc_dimred_hyperparametertest.fits\")\n self.Xu = results[\"X_umap_euclidean_nn\"+str(nn) + \"_md\"+str(md)]\n self.Yu = results[\"Y_umap_euclidean_nn\"+str(nn) + \"_md\"+str(md)]\n \n # Now run HDBSCAN to define the subsets\n import hdbscan\n clusterer = hdbscan.HDBSCAN(**kwargs)\n clusterer.fit( np.vstack((self.Xu, self.Yu)).T )\n self.classcol = clusterer.labels_\n self.classprob= clusterer.probabilities_\n self.subsets = np.unique(clusterer.labels_)\n #self.classcol= np.char.rstrip(self.data[\"tsne_class_teffcut40\"],b' ')#.decode('utf8').strip()\n #self.subsets = [\"thin\", \"thick1\", \"thick2\", \"thick3\", \"thick4\",\n # \"mpthin\", \"mpthintrans\", \"smr\", \"t4trans\", \"youngthin\",\n # \"debris1\", \"debris2\", \"debris3\", \"debris4\", \"debris5\", \n # \"smr2\", \"t2trans1\", \"highTi\",\"lowMg\",\"highAlMg?\"]\n self.names = [\"\", \"\", \"\", \"\",\n \"\", \"\", \"Transition group\", \"\", \"\",\n \"Young local disc\", \"\", \"\", \"[s/Fe]-enhanced\", \"\", \"\", r\"\", \"Debris candidate\", \n r\"Extreme-Ti star\", r\"Low-[Mg/Fe] star\", \"High-[Al/Mg] star\"]\n self.Xcoords = [10, 11, 4.5, -12, 18, -31, 22, 26,-22.5, -14, -2, -25]\n self.Ycoords = [5.5,.5, -2, -4, 6, 0, 1.5, -.5, -7, -2, -6, 14]\n self.fsize = [20 , 16, 12, 12, 15, 13, 11, 11, 11, 11, 11, 11]\n self.sym = [\"o\", \"v\", \"^\", \">\", \"<\", \"s\", \"o\", \"*\", \"<\", \"o\",\n \"h\", \"d\", \"D\", \"v\", \"p\", \"*\", \"D\", \"p\", \"s\", \"8\"]\n self.al = [.6, .8, .8, .8, .8, .8, .8,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1]\n self.lw = [0,.5,.5,.5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5, .5]\n self.size= [7,12,12,12,12,15,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18,18]\n self.col = [\"grey\", \"m\", \"hotpink\", \"crimson\", \"r\",\n \"g\", \"brown\", \"orange\", \"gold\", \"k\",\n \"yellow\", \n \"gold\", \"lime\", \"k\", \"royalblue\"]",
"def create_nonsparse_matrix(self, matrix_df):\n\n print('creating nonsparse matrix...')\n clustering_df_tmp = matrix_df[['date','time','date_idx', 'time_idx', 'day_of_week', 'segment_id', 'road_type', 'lat1', 'lat2', 'lon1', 'lon2', 'level_min', 'level_max', 'level_mean', 'level_count', 'level_binary']]\n seg_averages = clustering_df_tmp.groupby(['segment_id', 'day_of_week', 'time_idx'])[['level_min', 'level_max', 'level_mean', 'level_count', 'level_binary']].mean().reset_index()\n rt_averages = clustering_df_tmp.groupby(['road_type', 'day_of_week', 'time_idx'])[['level_min', 'level_max', 'level_mean', 'level_count', 'level_binary']].mean().reset_index()\n \n # write nonsparse_matrix_db to database as 'clustering' table\n print('writing nonsparse matrix to db...')\n sqlalchemy_conn_str = open('../conf/sqlalchemy_conn_str.txt', 'r').read()\n engine = create_engine(sqlalchemy_conn_str)\n if self.split_type == 'random':\n seg_averages.to_sql(name='clust_nonsparse_avebysegment_random', con=engine, if_exists='replace')\n rt_averages.to_sql(name='clust_nonsparse_avebyrt_random', con=engine, if_exists='replace')\n elif self.split_type == 'date':\n seg_averages.to_sql(name='clust_nonsparse_avebysegment_date', con=engine, if_exists='replace')\n rt_averages.to_sql(name='clust_nonsparse_avebyrt_date', con=engine, if_exists='replace')\n \n print('returning train nonsparse matrix...')\n train_averages = clustering_df_tmp.groupby(['segment_id', 'day_of_week', 'time_idx'])[['level_min', 'level_max', 'level_mean', 'level_count','level_binary']].mean().reset_index()\n train_averages.columns = ['segment_id', 'day_of_week', 'time_idx', 'ave_level_min', 'ave_level_max', 'ave_level_mean', 'ave_level_count', 'ave_level_binary']\n nonsparse_matrix = pd.merge(clustering_df_tmp, train_averages, how='left', on=['segment_id', 'day_of_week', 'time_idx'])\n segtimes_df = nonsparse_matrix[['segment_id', 'date', 'time']]\n nonsparse_matrix_final = nonsparse_matrix.drop(columns=['segment_id', 'date','time', 'level_min', 'level_max', 'level_mean', 'level_count', 'level_binary'])\n \n return (segtimes_df, nonsparse_matrix_final)",
"def build_hybrid_sparse_matrix(run_parameters, normalize_by_sum, construct_by_union):\n pg_network_df = kn.get_network_df(run_parameters['pg_network_name_full_path'])\n gg_network_df = kn.get_network_df(run_parameters['gg_network_name_full_path'])\n\n pg_network_n1_names, \\\n pg_network_n2_names = kn.extract_network_node_names(pg_network_df)\n\n gg_network_n1_names, \\\n gg_network_n2_names = kn.extract_network_node_names(gg_network_df)\n\n # limit the gene set to the intersection of networks (gene_gene and prop_gene) and user gene set\n unique_gene_names = kn.find_unique_node_names(gg_network_n1_names, gg_network_n2_names)\n\n if construct_by_union is True:\n unique_gene_names = kn.find_unique_node_names(unique_gene_names, pg_network_n2_names)\n else:\n pg_network_df = kn.update_network_df(pg_network_df, unique_gene_names, 'node_2')\n\n unique_gene_names_dict = kn.create_node_names_dict(unique_gene_names)\n pg_network_n1_names_dict = kn.create_node_names_dict(\n pg_network_n1_names, len(unique_gene_names))\n\n unique_all_node_names = unique_gene_names + pg_network_n1_names\n # map every gene name to a sequential integer index\n gg_network_df = kn.map_node_names_to_index(gg_network_df, unique_gene_names_dict, \"node_1\")\n gg_network_df = kn.map_node_names_to_index(gg_network_df, unique_gene_names_dict, \"node_2\")\n pg_network_df = kn.map_node_names_to_index(pg_network_df, pg_network_n1_names_dict, \"node_1\")\n pg_network_df = kn.map_node_names_to_index(pg_network_df, unique_gene_names_dict, \"node_2\")\n\n gg_network_df = kn.symmetrize_df(gg_network_df)\n pg_network_df = kn.symmetrize_df(pg_network_df)\n\n if normalize_by_sum is True:\n gg_network_df = kn.normalize_network_df_by_sum(gg_network_df, 'wt')\n pg_network_df = kn.normalize_network_df_by_sum(pg_network_df, 'wt')\n\n hybrid_network_df = kn.form_hybrid_network_df([gg_network_df, pg_network_df])\n\n # store the network in a csr sparse format\n network_sparse = kn.convert_network_df_to_sparse(\n hybrid_network_df, len(unique_all_node_names), len(unique_all_node_names))\n\n return network_sparse, unique_gene_names, pg_network_n1_names",
"def prediction(prediction_file_name, clusters_list, svd_use_flag):\n \n coords = misc_functions.getWindowCoords()\n \n test_users = range(coords[0], coords[2] + 1) \n test_items = range(coords[1], coords[3] + 1)\n \n #print \"len(test_users) = \", len(test_users)\n #print \"len(test_items) = \", len(test_items)\n #print \"test_items = \", test_items\n \n # this matrix to be written as result finally\n #misc_functions.step()\n prediction_matrix = zeros((len(test_users), len(test_items)), dtype = float)\n \n training_matrix = scipy.io.mmio.mmread(\"history.mtx\").tocsr()\n \n item_X_meta_matrix = scipy.io.mmio.mmread(\"../../../well_done/items-metas_global.mtx\").toarray()\n \n # getting meta matrices for corresponding using metas\n meta_ctr = 0\n meta_matrices = []\n for meta in METAS_TO_USE:\n if svd_use_flag:\n meta_matrice_file_name = \"users-\" + METAS_TO_USE[meta] + \".svd.mtx\"\n else:\n meta_matrice_file_name = \"users-\" + METAS_TO_USE[meta] + \".mtx\"\n exec(\"meta_matrices.append(scipy.io.mmio.mmread(\\\"\" + meta_matrice_file_name + \"\\\").toarray())\")\n\n #user_counter = 0\n #for user in test_users:\n for cur_cluster in clusters_list:\n \n #print \"cur_cluster[0] = \", cur_cluster[0]\n user = int (cur_cluster[0].split(\"\\t\")[1])\n #print \"user #\", user\n \n #user_metas = {} - changed to list because of problem with dimension\n user_metas = []\n \n values = zeros((len(METAS_TO_USE), len(test_items)), dtype = float)\n meta_ctr = 0\n for meta in METAS_TO_USE:\n \n #print \" meta_matrices = \", meta_matrices\n #print \" meta_matrices[meta_ctr] = \", meta_matrices[meta_ctr]\n user_vector = meta_matrices[meta_ctr][user]\n #print \" user_vector = \", user_vector\n #print \" len(user_metas) = \", len(user_metas)\n #print \" meta_ctr = \", meta_ctr\n #print \"meta = \", meta\n #misc_functions.step()\n \n # normalizing counts of visited metas to use them as weights later\n if max(user_vector) != 0:\n user_metas.append(1.0 * user_vector / max(user_vector))\n else:\n user_metas.append(zeros((len(user_vector), ), dtype = float))\n #print \" user_metas[meta_ctr] = \", user_metas[meta_ctr]\n #print \" user_metas[meta_ctr].shape = \", user_metas[meta_ctr].shape\n \n #for item in test_items:\n for cluster in cur_cluster[1 : ]:\n start_cluster_item = int(cluster.split(\"\\t\")[0])\n stop_cluster_item = int(cluster.split(\"\\t\")[2])\n \n cluster_items = range(start_cluster_item, stop_cluster_item + 1)\n \n for item in cluster_items:\n meta_value = item_X_meta_matrix[item, meta]\n \n # PRICE\n if meta == 8:\n meta_value = priceToPriceCat(meta_value)\n \n # CITY HEURISTIC\n if meta == 11:\n if user_metas[meta_ctr][meta_value - 1] < CITY_TRESHOLD:\n values[:, item - coords[1]] *= CITY_COEF\n \"\"\"\n # DAYTIME\n if meta == 17:\n meta_value = dayTime(meta_value)\n \"\"\"\n \n #print \" meta_value = \", meta_value\n #print \" item = \", item\n #step()\n values[meta_ctr][item - coords[1]] = (user_metas[meta_ctr])[meta_value - 1]\n \n \"\"\"HEURISTICS \"\"\"\n \n \n \n \n \n \"\"\"\\\\ HEURISTICS \"\"\"\n\n meta_ctr += 1\n #print \"values[:, 0:10] = \", values[:, 0:10]\n prediction_vector = numpy.sum(META_WEIGHTS * values, axis = 0)\n #print \"prediction_vector[0:10] = \", prediction_vector[0:10]\n #print \"sum(prediction_vector) = \", sum(prediction_vector)\n prediction_matrix[user - coords[0]] = prediction_vector\n \n #step()\n \n# ===== END OF MAIN CYCLE ===== \n\n result_matrix = scipy.sparse.csr_matrix(prediction_matrix)\n scipy.io.mmio.mmwrite(prediction_file_name, result_matrix, field = 'real', precision = 5)",
"def compute_cost_matrix(self):\n\n if rank == 0:\n #do random sampling of a parameters\n if self.sampling == \"LHS\":\n lhs = Lhs(lhs_type=\"classic\", criterion=None)\n param_samples = lhs.generate(self.sample_space, self.niters)\n elif self.sampling == \"rsampling\":\n param_samples = self.sample_space.rvs(self.niters)\n elif self.sampling == \"Sobol\":\n sobol = Sobol()\n param_samples = sobol.generate(self.sample_space.dimensions, self.niters)\n \n # generate param samples split\n niters_rank0 = self.niters//size + self.niters % size\n niters_rank = self.niters//size\n count_scatter = [niters_rank0]\n count_scatter.extend((size-2)*[niters_rank])\n count_scatter = np.cumsum(count_scatter)\n\n param_samples_split = np.split(param_samples,count_scatter)\n else:\n param_samples_split = None\n \n #scatter parameter samples data\n param_samps = comm.scatter(param_samples_split,root=0)\n\n # initialize data\n param_samples_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n param_samples_diff_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n jac_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n qoi_dict_rank = {qoi_name:[] for qoi_name in self.funcnames}\n\n \n\n # evaluate QoI at random sampling\n for sample in param_samps: \n qoi_sample, jac_sample = self.jac(sample).values()\n # store output\n for qoi_name in self.funcnames:\n if not (jac_sample[qoi_name] is None):\n param_samples_dict_rank[qoi_name].append(jac_sample[qoi_name])\n jac_dict_rank[qoi_name].append(jac_sample[qoi_name])\n qoi_dict_rank[qoi_name].append(qoi_sample[qoi_name])\n else:\n param_samples_diff_dict_rank[qoi_name].append(sample)\n\n # gather data\n param_samples = None\n param_samples_diff_int = None\n jac_dict = None\n qoi_dict= None\n\n param_samples_dict = comm.gather(param_samples_dict_rank, root=0)\n params_samples_diff_dict = comm.gather(param_samples_diff_dict_rank, root=0)\n jac_dict = comm.gather(jac_dict_rank, root=0)\n qoi_dict = comm.gather(qoi_dict_rank, root=0)\n\n # format gathered data\n if rank == 0:\n #flatten data\n param_samples_dict_flattened = {qoi_name:[] for qoi_name in self.funcnames}\n param_samples_diff_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n jac_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n qoi_dict_flattened = {qoi_name: [] for qoi_name in self.funcnames}\n\n for cpurank in range(size):\n for qoi_name in self.funcnames:\n param_samples_dict_flattened[qoi_name].extend(param_samples_dict[cpurank][qoi_name]) \n param_samples_diff_dict_flattened[qoi_name].extend(params_samples_diff_dict[cpurank][qoi_name])\n jac_dict_flattened[qoi_name].extend(jac_dict[cpurank][qoi_name])\n qoi_dict_flattened[qoi_name].extend(qoi_dict[cpurank][qoi_name])\n\n #compute outer product\n jac_outer_dict = {qoi_name: [] for qoi_name in self.funcnames}\n nfuncs_dict = {qoi_name: 0 for qoi_name in self.funcnames}\n\n for qoi_name in self.funcnames:\n for i in range(len(jac_dict_flattened[qoi_name])):\n jac_sample = jac_dict_flattened[qoi_name][i]\n jac_outer_dict[qoi_name].append(np.outer(jac_sample,jac_sample))\n nfuncs_dict[qoi_name] += 1\n\n # compute cost matrix and norm convergence\n cost_matrix_dict = {}\n cost_matrix_cumul_dict = {}\n norm_convergence_dict = {}\n\n for qoi_name in self.funcnames:\n cost_cumsum = np.cumsum(jac_outer_dict[qoi_name],axis=0)/np.arange(1,nfuncs_dict[qoi_name]+1)[:,None,None]\n cost_matrix_cumul_dict[qoi_name] = cost_cumsum\n cost_matrix_dict[qoi_name] = cost_cumsum[-1,:,:]\n norm_convergence_dict[qoi_name] = np.linalg.norm(cost_cumsum,ord='fro',axis=(1,2))\n\n # compute variance matrix\n variance_matrix_dict = {}\n for qoi_name in self.funcnames:\n variance_mat = np.sum((jac_outer_dict[qoi_name]-cost_matrix_dict[qoi_name])**2/(nfuncs_dict[qoi_name]-1),axis=0) \n variance_matrix_dict[qoi_name] = variance_mat\n\n param_results = {\"PARAM_SAMPLES\": param_samples_dict_flattened,\n \"DIFFICULT_PARAM_SAMPLES\": param_samples_diff_dict_flattened}\n\n fun_results = {\"NUMBER_OF_FUNCTION_SUCCESS\": nfuncs_dict,\n \"NORM_OF_SEQ_OF_CUMUL_SUMS\": norm_convergence_dict,\n \"SEQ_OF_CUMUL_SUMS\": cost_matrix_cumul_dict, \n \"VARIANCE_OF_ENTRIES\": variance_matrix_dict,\n \"FINAL_COST_MATRIX\":cost_matrix_dict}\n\n return {'PARAMETER_RESULTS': param_results, 'FUNCTION_RESULTS': fun_results}",
"def get_lumped_matrices(self):\n total_dof = DOF_PER_NODE_STRUCTURAL * len(self.preprocessor.nodes)\n \n if self.frequencies is None:\n cols = 1\n else:\n cols = len(self.frequencies)\n \n list_Kdata = []\n list_Mdata = []\n list_Cdata = []\n\n i_indexes_M, j_indexes_M = [], []\n i_indexes_K, j_indexes_K = [], []\n i_indexes_C, j_indexes_C = [], []\n \n self.nodes_with_lumped_masses = []\n self.nodes_connected_to_springs = []\n self.nodes_connected_to_dampers = []\n # self.nodes_with_nodal_elastic_links = []\n\n flag_Clump = False\n\n # processing external elements by node\n for node in self.preprocessor.nodes.values():\n\n # processing mass added\n if node.there_are_lumped_stiffness:\n position = node.global_dof\n self.nodes_connected_to_springs.append(node)\n list_Kdata.append(self.get_bc_array_for_all_frequencies(node.loaded_table_for_lumped_stiffness, node.lumped_stiffness))\n i_indexes_K.append(position)\n j_indexes_K.append(position)\n\n # processing mass added\n if node.there_are_lumped_masses:\n position = node.global_dof\n self.nodes_with_lumped_masses.append(node)\n list_Mdata.append(self.get_bc_array_for_all_frequencies(node.loaded_table_for_lumped_masses, node.lumped_masses))\n i_indexes_M.append(position)\n j_indexes_M.append(position)\n\n # processing damper added\n if node.there_are_lumped_dampings:\n position = node.global_dof\n self.nodes_connected_to_dampers.append(node)\n list_Cdata.append(self.get_bc_array_for_all_frequencies(node.loaded_table_for_lumped_dampings, node.lumped_dampings))\n i_indexes_C.append(position)\n j_indexes_C.append(position)\n flag_Clump = True\n \n for key, cluster_data in self.preprocessor.nodes_with_elastic_link_stiffness.items():\n node = self.preprocessor.nodes[int(key.split(\"-\")[0])]\n for indexes_i, indexes_j, data, in cluster_data:\n for i in range(2):\n i_indexes_K.append(indexes_i[i])\n j_indexes_K.append(indexes_j[i])\n list_Kdata.append(self.get_bc_array_for_all_frequencies(node.loaded_table_for_elastic_link_stiffness, data[i]))\n \n for key, cluster_data in self.preprocessor.nodes_with_elastic_link_dampings.items():\n node = self.preprocessor.nodes[int(key.split(\"-\")[0])]\n for indexes_i, indexes_j, data, in cluster_data:\n for i in range(2):\n i_indexes_C.append(indexes_i[i])\n j_indexes_C.append(indexes_j[i])\n list_Cdata.append(self.get_bc_array_for_all_frequencies(node.loaded_table_for_elastic_link_dampings, data[i]))\n\n data_Klump = np.array(list_Kdata).reshape(-1, cols)\n data_Mlump = np.array(list_Mdata).reshape(-1, cols)\n data_Clump = np.array(list_Cdata).reshape(-1, cols)\n \n i_indexes_K = np.array(i_indexes_K).flatten()\n i_indexes_M = np.array(i_indexes_M).flatten()\n i_indexes_C = np.array(i_indexes_C).flatten()\n\n j_indexes_K = np.array(j_indexes_K).flatten()\n j_indexes_M = np.array(j_indexes_M).flatten()\n j_indexes_C = np.array(j_indexes_C).flatten()\n\n full_K = [csr_matrix((data_Klump[:,j], (i_indexes_K, j_indexes_K)), shape=[total_dof, total_dof]) for j in range(cols)]\n full_M = [csr_matrix((data_Mlump[:,j], (i_indexes_M, j_indexes_M)), shape=[total_dof, total_dof]) for j in range(cols)]\n full_C = [csr_matrix((data_Clump[:,j], (i_indexes_C, j_indexes_C)), shape=[total_dof, total_dof]) for j in range(cols)]\n \n K_lump = [sparse_matrix[self.unprescribed_indexes, :][:, self.unprescribed_indexes] for sparse_matrix in full_K]\n M_lump = [sparse_matrix[self.unprescribed_indexes, :][:, self.unprescribed_indexes] for sparse_matrix in full_M]\n C_lump = [sparse_matrix[self.unprescribed_indexes, :][:, self.unprescribed_indexes] for sparse_matrix in full_C]\n\n Kr_lump = [sparse_matrix[:, self.prescribed_indexes] for sparse_matrix in full_K]\n Mr_lump = [sparse_matrix[:, self.prescribed_indexes] for sparse_matrix in full_M]\n Cr_lump = [sparse_matrix[:, self.prescribed_indexes] for sparse_matrix in full_C]\n\n return K_lump, M_lump, C_lump, Kr_lump, Mr_lump, Cr_lump, flag_Clump",
"def classify_treeInsert(self, query_name, cluster_naming_function):\n classes = Set(self.class_map.values())\n\n full_dist_matrix = self.orig_dist_matrix.drop(query_name)\n full_dist_matrix = full_dist_matrix.drop(query_name, axis=1)\n\n if PROGRESS: print '\\nStarting treeInsert!'\n \n #1] Build a tree for each class\n class_trees = {}\n all_elements = full_dist_matrix.columns.values.tolist()\n classes_done = 0\n num_of_classes = len(classes)\n for c in classes:\n\n #1a. Construct a mini distance matrix for the current class\n nonclass_members = [i for i in all_elements if self.class_map[i] != c]\n class_dist_matrix = full_dist_matrix.drop(nonclass_members)\n class_dist_matrix = class_dist_matrix.drop(nonclass_members, axis=1)\n\n #1b. Build class tree\n if PROGRESS: print 'Building class tree for ' + c\n\n class_njt = NJTree()\n class_njt.build(class_dist_matrix, self.class_map, myClusterNaming)\n class_trees[c] = class_njt\n classes_done = classes_done + 1\n\n if PROGRESS:\n print str(classes_done) + \" classes down, \" + str(num_of_classes - classes_done) \n + \" to go...\"\n\n #2] Determine the insertion cost of each tree\n class_insert_costs = {}\n for c,class_tree in class_trees.iteritems():\n\n #2a. Find insertion cost of each leaf in the tree\n leaves = [i for i in class_tree.tree.nodes() if class_tree.isLeaf(i)] \n leaf_insert_costs = {}\n for leaf_i in leaves:\n\n parent_i = class_tree.tree.neighbors(leaf_i)[0] \n cons = ({'type': 'eq',\n 'fun': lambda x: x[0] + x[1] - nx.shortest_path_length(class_tree.tree, \n source=parent_i, target=leaf_i, weight='length')})\n optimum_leaf_insert_cost = optimize.minimize(_leaf_insertion_cost, [0,0,0], \n args=(class_tree.orig_dist_matrix, leaf_i, leaves, query_name, self), method='SLSQP', \n constraints=cons)\n\n if DEBUG:\n print \"Optimum cost for \", leaf_i, \" : \", optimum_leaf_insert_cost.x[0]\n\n leaf_insert_costs[leaf_i] = optimum_leaf_insert_cost.x[0]\n \n class_insert_costs[c] = min(list(leaf_insert_costs.values()))\n\n #3] Output the class name of tree with minimum insertion cost\n min_insert_cost = min(list(class_insert_costs.values()))\n for c,cost in class_insert_costs.iteritems():\n if cost==min_insert_cost:\n return c\n break",
"def prepare_data_matrix():\n # create matrix X and list of languages\n\n lds = {}\n for fn in listdir(\"clustering\"):\n if fn.lower().endswith(\".txt\"):\n with open(join(\"clustering\", fn), encoding=\"utf8\") as f:\n text = f.read()\n nter = terke(text, n=3)\n lds[fn] = nter\n #print(lds.keys())\n \n #lds is a dictionary of dictionaries: {\"slovenian.txt\": {\"abc\":3,\"efg\":4...}, \"macedonian.txt\":{\"efg\":6...},...}\n l=listOfTuples(lds) #list of strings\n #print(l[:100])\n languages = list(lds.keys()) # ['Slo', 'Mac', ]\n # which language represents row number i: languages[i]\n # which row does language s represent: languagues.index(s)\n X=np.zeros([len(languages),100])\n for i in range(len(languages)):\n #print(languages[i])\n count = 0\n for j in range(100):\n if l[j] in lds[languages[i]]:\n X[i,j]=lds[languages[i]][l[j]]\n count += 1\n # print(count)\n\n #print([sum(x) for x in X])\n \n return X, languages\n # X, languages = prepare_data_matrix()",
"def get_motif_pssm(self, cluster_num, motif_num):\n #conn = sql3.connect(self.dbfile)\n #cursor = conn.cursor()\n #cursor.execute('select max(iteration) from motif_infos')\n #iteration = cursor.fetchone()[0]\n\n #query = 'select rowid from motif_infos where iteration=? and cluster=? and motif_num=?'\n #params = [self.iteration, cluster_num, motif_num]\n #cursor.execute(query, params)\n #rowid = cursor.fetchone()[0]\n #motif_infos = self.tables['motif_infos']\n #rowid = motif_infos[(motif_infos.iteration==self.iteration) & \n # (motif_infos.cluster==cluster_num) & (motif_infos.motif_num==motif_num)].index.values[0]+1\n rowid = self.__get_motif_id(cluster_num, motif_num)\n\n #query = 'select a,c,g,t from motif_pssm_rows where iteration=? and motif_info_id=?'\n #params = [self.iteration, rowid]\n #pssm = pd.read_sql( query, conn, params=params )\n motif_pssm_rows = self.tables['motif_pssm_rows']\n pssm = motif_pssm_rows[(motif_pssm_rows.iteration==self.iteration) & (motif_pssm_rows.motif_info_id==rowid)]\n pssm.drop( ['motif_info_id', 'iteration', 'row'], 1, inplace=True )\n return pssm",
"def getClusters(self) :\n \n similarityMatrix=self.similarityMatrix\n matrixSize=similarityMatrix.shape[0]\n \n realClusters=[]\n weightsFilePath=\"input.txt\"\n clusterFilePath=\"output.txt\"\n\n # write the weights file\n print \" Writing similarity matrix into File ...\"\n l=sorted(zip(similarityMatrix.row, similarityMatrix.col, similarityMatrix.data))\n if (l[-1][1]<matrixSize-1) : l.append((matrixSize-2,matrixSize-1,0))\n lines=\"\\n\".join([\"{0}\\t{1}\\t{2}\".format(i,j,v) for i,j,v in l])\n with open(weightsFilePath, 'w') as weightsFile :\n weightsFile.write(lines)\n \n return clusterFromSimilarityFile(weightsFilePath=weightsFilePath,clusterFilePath=clusterFilePath)",
"def get_result_matrix(self, db, solver_configs, instances, cost='resultTime', fixed_limit=None):\n num_successful = dict(\n (i.idInstance, dict((sc.idSolverConfig, 0) for sc in solver_configs)) for i in instances)\n num_completed = dict(\n (i.idInstance, dict((sc.idSolverConfig, 0) for sc in solver_configs)) for i in instances)\n M = dict((i.idInstance, dict((sc.idSolverConfig, list()) for sc in solver_configs)) for i in instances)\n solver_config_ids = [sc.idSolverConfig for sc in solver_configs]\n instance_ids = [i.idInstance for i in instances]\n if not solver_config_ids or not instance_ids:\n return M, 0, 0\n table = db.metadata.tables['ExperimentResults']\n table_result_codes = db.metadata.tables['ResultCodes']\n from_table = table\n table_has_prop = db.metadata.tables['ExperimentResult_has_Property']\n table_has_prop_value = db.metadata.tables['ExperimentResult_has_PropertyValue']\n\n status_column = table.c['status']\n result_code_column = table.c['resultCode']\n if cost == 'resultTime':\n cost_column = table.c['resultTime']\n cost_property = db.ExperimentResult.resultTime\n cost_limit_column = table.c['CPUTimeLimit']\n\n if fixed_limit:\n cost_column = expression.case([(table.c['resultTime'] > fixed_limit, fixed_limit)],\n else_=table.c['resultTime'])\n cost_limit_column = literal(fixed_limit)\n status_column = expression.case([(table.c['resultTime'] > fixed_limit, literal(21))],\n else_=table.c['status'])\n result_code_column = expression.case([(table.c['resultTime'] > fixed_limit, literal(-21))],\n else_=table.c['resultCode'])\n elif cost == 'wallTime':\n cost_column = table.c['wallTime']\n cost_property = db.ExperimentResult.wallTime\n cost_limit_column = table.c['wallClockTimeLimit']\n\n if fixed_limit:\n cost_column = expression.case([(table.c['wallTime'] > fixed_limit, fixed_limit)],\n else_=table.c['wallTime'])\n cost_limit_column = literal(fixed_limit)\n status_column = expression.case([(table.c['wallTime'] > fixed_limit, literal(22))],\n else_=table.c['status'])\n result_code_column = expression.case([(table.c['wallTime'] > fixed_limit, literal(-22))],\n else_=table.c['resultCode'])\n elif cost == 'cost':\n cost_column = table.c['cost']\n cost_property = db.ExperimentResult.cost\n inf = float('inf')\n cost_limit_column = table.c['CPUTimeLimit'] # doesnt matter\n else:\n cost_column = table_has_prop_value.c['value']\n cost_property = db.ResultPropertyValue.value\n inf = float('inf')\n cost_limit_column = table.c['CPUTimeLimit']\n from_table = table.join(table_has_prop, and_(table_has_prop.c['idProperty'] == int(cost),\n table_has_prop.c['idExperimentResults'] == table.c[\n 'idJob'])).join(table_has_prop_value)\n\n s = select([table.c['idJob'], expression.label('resultCode', result_code_column),\n expression.label('cost', cost_column), expression.label('status', status_column),\n table.c['SolverConfig_idSolverConfig'], table.c['Instances_idInstance'],\n table_result_codes.c['description'], expression.label('limit', cost_limit_column)],\n and_(table.c['Experiment_idExperiment'] == self.idExperiment,\n table.c['SolverConfig_idSolverConfig'].in_(solver_config_ids),\n table.c['Instances_idInstance'].in_(instance_ids)),\n from_obj=from_table.join(table_result_codes))\n\n Run = namedtuple('Run', ['idJob', 'status', 'result_code_description', 'resultCode', 'resultTime',\n 'successful', 'penalized_time10', 'idSolverConfig', 'idInstance',\n 'penalized_time1', 'censored'])\n\n for r in db.session.connection().execute(s):\n if r.Instances_idInstance not in M: continue\n if r.SolverConfig_idSolverConfig not in M[r.Instances_idInstance]: continue\n if str(r.resultCode).startswith('1'): num_successful[r.Instances_idInstance][\n r.SolverConfig_idSolverConfig] += 1\n if r.status not in STATUS_PROCESSING: num_completed[r.Instances_idInstance][\n r.SolverConfig_idSolverConfig] += 1\n M[r.Instances_idInstance][r.SolverConfig_idSolverConfig].append(\n Run(r.idJob, int(r.status), r[6], int(r.resultCode),\n None if int(r.status) <= 0 else float(r.cost), str(r.resultCode).startswith('1'),\n float(r.cost) if str(r.resultCode).startswith('1') else (inf if cost not in (\n 'resultTime', 'wallTime') else float(r.limit)) * 10,\n r.SolverConfig_idSolverConfig, r.Instances_idInstance,\n float(r.cost) if str(r.resultCode).startswith('1') else (\n inf if cost not in ('resultTime', 'wallTime') else float(r.limit)),\n not str(r.resultCode).startswith('1')))\n return M, num_successful, num_completed",
"def confusionMatrix(testDataPredictions, testDataOriginal):\n matrix = {\"predicted >50K correctly as >50K\": 0, \"predicted >50K incorrectly as <=50K\": 0,\n \"predicted <=50K correctly as <=50K\": 0, \"predicted <=50K incorrectly as >50K\": 0}\n\n for instance in range(len(testDataPredictions)):\n prediction = testDataPredictions[instance]\n original = testDataOriginal[14].iloc[instance]\n\n #calculating total number of TP,TN,FP and FN\n\n if prediction == 1.0 and original == 1.0:\n matrix[\"predicted >50K correctly as >50K\"] += 1.00\n elif prediction == 0.0 and original == 1.0:\n matrix[\"predicted >50K incorrectly as <=50K\"] += 1.00\n elif prediction == 0.0 and original == 0.0:\n matrix[\"predicted <=50K correctly as <=50K\"] += 1.00\n elif prediction == 1.0 and original == 0.0:\n matrix[\"predicted <=50K incorrectly as >50K\"] += 1.00\n\n #Making the confusion matrix look readable on console printing\n print('----------------')\n print('CONFUSION MATRIX')\n print( 'TP: ', matrix[\"predicted >50K correctly as >50K\"], '||', 'FP: ', matrix[\"predicted >50K incorrectly as <=50K\"])\n print('----------------')\n print('FN: ', matrix[\"predicted <=50K incorrectly as >50K\"], '||', 'TN: ', matrix[\"predicted <=50K correctly as <=50K\"])\n\n # definition of sensitivity, precision and specificity formulas\n sensitivity = matrix[\"predicted >50K correctly as >50K\"] / (\n matrix[\"predicted >50K correctly as >50K\"] + matrix[\"predicted <=50K incorrectly as >50K\"])\n\n precision = matrix[\"predicted >50K correctly as >50K\"]/ (\n matrix[\"predicted >50K correctly as >50K\"] + matrix[\"predicted >50K incorrectly as <=50K\"])\n\n specificity = matrix[\"predicted <=50K correctly as <=50K\"] / (\n matrix[\"predicted <=50K correctly as <=50K\"] + matrix[\"predicted >50K incorrectly as <=50K\"])\n\n print('Precision: ' + str(precision*100) + '%')\n print('Sensitivity: '+ str(sensitivity*100)+ '%')\n print('Specificity: '+ str(specificity*100) +'%')\n\n return matrix, precision, sensitivity, specificity",
"def run(self):\n print(' strategies...')\n matrix_file = ''\n matrix_s, matrix_c = None, None\n # run for all but the optimal version\n item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')\n for rec_type in self.data_set.graphs:\n for graph in self.data_set.graphs[rec_type]:\n print(' ', graph)\n gt_graph = load_graph(graph)\n for strategy in Strategy.strategies:\n if strategy == 'optimal':\n continue\n print(' ', strategy)\n m_new = self.data_set.matrices[rec_type][graph][strategy][0]\n m_newc = self.data_set.matrices[rec_type][graph][strategy][1]\n debug(' ----', m_new)\n debug(' ----', m_newc)\n if not m_new:\n debug(' ---- not m_new')\n matrix_s, matrix_c, matrix_file = None, None, None\n elif matrix_file != m_new:\n matrix_s = SimilarityMatrix(item2matrix, m_new)\n matrix_c = SimilarityMatrix(item2matrix, m_newc)\n matrix_file = m_new\n debug(' ---- matrix_file != m_new')\n # for miss in self.data_set.missions[rec_type][graph][strategy]:\n for miss in Mission.missions:\n print(' ', miss)\n if 'Information Foraging' in miss or 'Berrypicking' in miss:\n matrix = matrix_c\n else:\n matrix = matrix_s\n for m in self.data_set.missions[rec_type][graph][strategy][miss]:\n for ti in xrange(len(m.targets_original)):\n start = m.path[-2] if m.path else m.start\n debug('++++' * 16, 'mission', ti, '/',\n len(m.targets_original))\n debug(m.targets_original[ti])\n self.navigate(gt_graph, strategy, m, start,\n None, matrix)\n if ti > 0 and len(m.targets_original[ti]) == len(m.targets[0]):\n # print('breaking...')\n m.reset()\n break\n if not (ti + 1) == len(m.targets_original):\n m.path.append(u'*')\n m.reset()\n\n # run the simulations for the optimal solution\n print(' optimal...')\n for rec_type in self.data_set.graphs:\n for graph in self.data_set.graphs[rec_type]:\n print(' ', graph)\n sp_file = graph.rsplit('.', 1)[0] + '.npy'\n with open(sp_file, 'rb') as infile:\n sp = pickle.load(infile)\n for miss in self.data_set.missions[rec_type][graph]['optimal']:\n for m in self.data_set.missions[rec_type][graph]['optimal'][miss]:\n for ti in xrange(len(m.targets_original)):\n start = m.path[-2] if m.path else m.start\n debug('++++' * 16, 'mission', ti, '/', len(m.targets_original))\n debug(m.targets_original[ti])\n self.optimal_path(m, start, sp)\n if not (ti + 1) == len(m.targets_original):\n m.path.append(u'*')\n m.reset()\n\n # # DEBUG\n # item2matrix = os.path.join(self.data_set.base_folder, 'item2matrix.txt')\n # for rec_type in ['rbar']:\n # for graph in self.data_set.graphs[rec_type]:\n # print(' ', graph)\n # gt_graph = load_graph(graph)\n # sp_file = graph.rsplit('.', 1)[0] + '.npy'\n # with open(sp_file, 'rb') as infile:\n # sp = pickle.load(infile)\n # m_newc = self.data_set.matrices[rec_type][graph]['title'][1]\n # matrix = SimilarityMatrix(item2matrix, m_newc)\n # sc = 'Berrypicking'\n # mc1 = self.data_set.missions[rec_type][graph]['title'][sc]\n # mc2 = self.data_set.missions[rec_type][graph]['optimal'][sc]\n # mc3 = self.data_set.missions[rec_type][graph]['random'][sc]\n # for m1, m2, m3 in zip(\n # mc1,\n # mc2,\n # mc3\n # ):\n # # evalute with title strategy\n # for ti in xrange(len(m1.targets_original)):\n # start = m1.path[-2] if m1.path else m1.start\n # debug('++++' * 16, 'mission', ti, '/', len(m1.targets_original))\n # # debug(m1.targets_original[ti])\n # self.navigate(gt_graph, 'title', m1, start, None, matrix)\n # # print(m1.path, ti, len(m1.targets_original[ti]), len(m1.targets[0]))\n # if ti > 0 and len(m1.targets_original[ti]) == len(m1.targets[0]):\n # # print('breaking...')\n # m1.reset()\n # break\n # if not (ti + 1) == len(m1.targets_original):\n # m1.path.append(u'*')\n # m1.reset()\n #\n # # evaluate with optimal strategy\n # for ti in xrange(len(m2.targets_original)):\n # start = m2.path[-2] if m2.path else m2.start\n # # debug('++++' * 16, 'mission', ti, '/', len(m2.targets_original))\n # # debug(m2.targets_original[ti])\n # self.optimal_path(m2, start, sp)\n # if not (ti + 1) == len(m2.targets_original):\n # m2.path.append(u'*')\n # m2.reset()\n # # pdb.set_trace()\n #\n # # if len(m1.path) < len(m2.path):\n # # print(len(m1.path), len(m2.path))\n # # pdb.set_trace()\n # # m1.compute_stats()\n # # m2.compute_stats()\n # # if m1.stats[-1] > m2.stats[-1]:\n # # print(m1.stats)\n # # print(m2.stats)\n # # pdb.set_trace()\n #\n # print('MISSION COLLECTION DONE')\n # mc1.compute_stats()\n # mc2.compute_stats()\n # print(mc1.stats[-1], mc2.stats[-1])\n # pdb.set_trace()\n\n # fname_5 = u'../data/bookcrossing/graphs/rbar_5.gt'\n # fname_20 = u'../data/bookcrossing/graphs/rbar_20.gt'\n # sp_file_5 = fname_5.rsplit('.', 1)[0] + '.npy'\n # sp_file_20 = fname_20.rsplit('.', 1)[0] + '.npy'\n # with open(sp_file_5, 'rb') as infile:\n # sp_5 = pickle.load(infile)\n # with open(sp_file_20, 'rb') as infile:\n # sp_20 = pickle.load(infile)\n # sc = 'Berrypicking'\n # mc_5 = self.data_set.missions['rbar'][fname_5]['optimal'][sc]\n # mc_52 = self.data_set.missions['rbar'][fname_5]['title'][sc]\n # mc_20 = self.data_set.missions['rbar'][fname_20]['optimal'][sc]\n # mc_202 = self.data_set.missions['rbar'][fname_20]['title'][sc]\n # for m5, m20, m52, m202 in zip(\n # mc_5,\n # mc_20,\n # mc_52,\n # mc_202\n # ):\n # # evaluate 5 with optimal strategy\n # for ti in xrange(len(m5.targets_original)):\n # start = m5.path[-2] if m5.path else m5.start\n # self.optimal_path(m5, start, sp_5)\n # if not (ti + 1) == len(m5.targets_original):\n # m5.path.append(u'*')\n # m5.reset()\n #\n # # evaluate 20 with optimal strategy\n # for ti in xrange(len(m20.targets_original)):\n # start = m20.path[-2] if m20.path else m20.start\n # self.optimal_path(m20, start, sp_20)\n # if not (ti + 1) == len(m20.targets_original):\n # m20.path.append(u'*')\n # m20.reset()\n #\n # # if len(m5.path) < len(m20.path) or \\\n # if m5.path.count('*') > m20.path.count('*'):\n # print(len(m5.path))\n # for part in ' '.join(m5.path[2:]).split('*'):\n # print(' ', part)\n # print(len(m20.path))\n # for part in ' '.join(m20.path[2:]).split('*'):\n # print(' ', part)\n # pdb.set_trace()\n #\n # print('MISSION COLLECTION DONE')\n # mc_5.compute_stats()\n # mc_20.compute_stats()\n # print(mc_5.stats[-1], mc_20.stats[-1])\n #\n # for m5, m20 in zip(mc_5.missions, mc_20.missions):\n # if m5.stats[-1] > m20.stats[-1]:\n # print(m5.stats)\n # print(m20.stats)\n # pdb.set_trace()\n # pdb.set_trace()\n\n # write the results to a file\n # self.write_paths()\n self.save()",
"def cluster(self, similarity=0.3, l_go_selective=False):\n sw=util.StopWatch(\"GO_Cluster::cluster\")\n #K=stats.kappa_stat(self.data.values)\n #T_edge=pd.DataFrame({'Gene_A':[],'Gene_B':[],'TYPE':[],'SCORE':[]})\n M=self.data.values\n n,m=M.shape\n print(\"Matrix size: %d genes x %d GOs\" % (n, m))\n S_go=self.data.header()\n out=self.t_go\n if m==0:\n self.DM=np.zeros(0)\n if out is None:\n return {}\n return None\n if m==1:\n # only 1, no need to cluster\n if out is None:\n return {S_go[0]:1}\n out['GROUP_ID']=1\n out['FirstInGroupByEnrichment']=1\n out['FirstInGroupByLogP']=1\n if l_go_selective: out['FirstInGroupByGini']=1\n self.DM=np.zeros(0)\n return out\n #T_edge=pd.DataFrame({'Gene_A':[S_go[0]],'Gene_B':[S_go[0]],'TYPE':['Direct'],'SCORE':[1.0]})\n if self.DM is None:\n self.DM=stats.kappa_stat(self.data.values, n_CPU=self.n_CPU)\n sw.check(\"Kappa done ...\")\n #import ms.msobject as mo\n #mo.MSObject.dump_object(self.DM, s_name='untitled', s_cache_dir=\".\")\n import scipy.cluster.hierarchy as clst\n import fastcluster\n Z=fastcluster.linkage(1.0-self.DM, method='average')\n S=clst.fcluster(Z, 1-similarity, criterion='distance')\n c_grp={ x:S[i] for i,x in enumerate(S_go) }\n if out is None:\n return c_grp\n out['GROUP_ID']=out.GO.apply(lambda x: c_grp[x])\n self.similarity=similarity\n if l_go_selective:\n out.sort_values(['GROUP_ID','GiniIndex','LogP','Enrichment'], ascending=[True,False,True,False], inplace=True)\n out['FirstInGroupByGini']=0\n else:\n out.sort_values(['GROUP_ID','LogP','Enrichment'], ascending=[True,True,False], inplace=True)\n out['FirstInGroupByEnrichment']=0\n out['FirstInGroupByLogP']=0\n iB=iE=0\n n=len(out)\n out.index=list(range(n))\n for i in range(1,n+1):\n if i>=n or out.ix[i,'GROUP_ID']!=out.ix[i-1,'GROUP_ID']:\n iE=i-1\n out.ix[iB:iE,'BestLogPInGroup']=out.ix[iB:iE,'LogP'].min()\n out.ix[iB:iE,'BestEnrichmentInGroup']=out.ix[iB:iE,'Enrichment'].max()\n idx=out.ix[iB:iE,'LogP'].argmin()\n out.ix[idx, 'FirstInGroupByLogP']=1\n out.ix[iB, 'FirstInGroupByEnrichment']=1\n if l_go_selective:\n out.ix[iB:iE,'BestGiniInGroup']=out.ix[iB:iE,'GiniIndex'].max()\n idx=out.ix[iB:iE,'GiniIndex'].argmax()\n out.ix[idx, 'FirstInGroupByGini']=1\n iB=i\n if l_go_selective:\n out.sort_values(['BestGiniInGroup','BestLogPInGroup','GROUP_ID','FirstInGroupByGini','GiniIndex','LogP','Enrichment'], ascending=[False,True,True,False,False,True,False], inplace=True)\n out.index=list(range(n))\n# out.to_csv('t0.csv', index=False)\n # iteratively pick unique patterns\n S_pattern=util.unique2(out._PATTERN_) # unique but preserve order\n n_pattern=len(S_pattern)\n iB=iE=0\n i_pattern={k:(i+1) for i,k in enumerate(S_pattern)}\n c_pattern={k:0 for k in S_pattern}\n out['NEW_GROUP_ID']=0\n for i in range(1,n+1):\n if i>=n or out.ix[i,'GROUP_ID']!=out.ix[i-1,'GROUP_ID']:\n iE=i-1\n s_pat=out.ix[iB, '_PATTERN_']\n out.ix[iB:iE, 'NEW_GROUP_ID']=c_pattern[s_pat]*n_pattern+i_pattern[s_pat]\n c_pattern[s_pat]+=1\n iB=i\n out.sort_values(['NEW_GROUP_ID'], inplace=True)\n out.drop('NEW_GROUP_ID', axis=1, inplace=True)\n else:\n out.sort_values(['BestLogPInGroup','GROUP_ID','FirstInGroupByLogP','LogP','Enrichment'], ascending=[True,True,False,True,False], inplace=True)\n\n # relabel group id, so that group id are in order of statistical significance\n c_order={}\n cnt=1\n for grp in out.GROUP_ID:\n if grp not in c_order:\n c_order[grp]=cnt\n cnt+=1\n out['GROUP_ID']=out['GROUP_ID'].apply(lambda x: c_order[x])\n\n out['URL']=''\n out.index=list(range(len(out)))\n S_URL=out.URL.tolist()\n for i in out.index:\n if out.ix[i,'GO'].startswith('M'): #MsigDB\n if re.search(r'\\s\\(.+\\)$', out.ix[i,'Description']):\n # Notice: description may contain \")\" \"GSE8515_IL1_VS_IL6_4H_STIM_)MAC_DN\"\n s_key=re.search(r'\\s\\(.+\\)$', out.ix[i,'Description']).group()[2:-1]\n S_URL[i]='http://http://www.broadinstitute.org/gsea/msigdb/geneset_page.jsp?geneSetName='+s_key\n out['URL']=S_URL\n return out"
] | [
"0.6683406",
"0.6142232",
"0.5941017",
"0.5886404",
"0.57107574",
"0.56908506",
"0.56840414",
"0.56270343",
"0.5577767",
"0.54864204",
"0.54859143",
"0.54727614",
"0.54714465",
"0.5455581",
"0.54445",
"0.5434564",
"0.5425108",
"0.54225826",
"0.541594",
"0.5410452",
"0.5405825",
"0.53911835",
"0.5389713",
"0.53510684",
"0.53380495",
"0.5309687",
"0.5275885",
"0.5273452",
"0.5265697",
"0.5243992"
] | 0.7701423 | 0 |
Arbitry choices to associate number of requests with length | def length(requests_mean):
if requests_mean<=5.:
return 'short';
elif requests_mean<=10:
return 'medium';
else:
return 'long'; | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def NUMBER_OF_REC_CHOICE():\n return 13",
"def get_num_to_request():\n #ALLOWABLE_REQUEST_SIZES = [1, 3, 5, 8, 12]\n ALLOWABLE_REQUEST_SIZES = [1, 3, 5, 8]\n avgrate = jobtracker.query(\"SELECT AVG(files.size/\" \\\n \"(TO_SECONDS(download_attempts.updated_at)*1/86400. - \" \\\n \"TO_SECONDS(download_attempts.created_at)*1/86400.)) \" \\\n \"FROM files, download_attempts \" \\\n \"WHERE files.id=download_attempts.file_id \" \\\n \"AND download_attempts.status='downloaded'\", \\\n fetchone=True)\n avgsize = jobtracker.query(\"SELECT AVG(size/numrequested) FROM requests \" \\\n \"WHERE numbits=%d AND \" \\\n \"file_type='%s'\" % \\\n (config.download.request_numbits, \\\n config.download.request_datatype.lower()), \\\n fetchone=True)\n if avgrate is None or avgsize is None:\n return min(ALLOWABLE_REQUEST_SIZES)\n\n # Total number requested that can be downloaded per day (on average).\n max_to_request_per_day = avgrate/avgsize\n \n used = get_space_used()\n avail = get_space_available()\n reserved = get_space_committed()\n \n # Maximum number of bytes that we should request\n max_bytes = min([avail-reserved-config.download.min_free_space, \\\n config.download.space_to_use-reserved-used])\n # Maximum number to request\n max_num = max_bytes/avgsize\n\n ideal_num_to_request = min([max_num, max_to_request_per_day])\n\n if debug.DOWNLOAD:\n print \"Average dl rate: %.2f bytes/day\" % avgrate\n print \"Average size per request unit: %d bytes\" % avgsize\n print \"Max can dl per day: %d\" % max_to_request_per_day\n print \"Max num to request: %d\" % max_num\n print \"Ideal to request: %d\" % ideal_num_to_request\n\n # Return the closest allowable request size without exceeding\n # 'ideal_num_to_request'\n num_to_request = max([0]+[N for N in ALLOWABLE_REQUEST_SIZES \\\n if N <= ideal_num_to_request])\n\n return num_to_request",
"def get_total_combo(pool, size):\n return len(pool) ** argp.length",
"def _requestSizeHeterogeneous(self, children):\n primary_length_name = self.PRIMARY_LENGTH\n secondary_length_name = self.SECONDARY_LENGTH\n primary_length = secondary_length = 0\n for child in children:\n child_size = child.requested_size\n primary_length += getattr(child_size, primary_length_name)\n cell_secondary_length = getattr(child_size, secondary_length_name)\n if cell_secondary_length > secondary_length:\n secondary_length = cell_secondary_length\n\n children_nb = len(children)\n primary_length += (children_nb - 1) * self.spacing\n\n result = Size(0, 0)\n setattr(result, primary_length_name, primary_length)\n setattr(result, secondary_length_name, secondary_length)\n return result",
"def how_long(length: int=4, choices: int=len(words), speed: int=1000 * 1000 * 1000 * 1000,\n optimism: int=2) -> int:\n # https://github.com/python/mypy/issues/7765\n assert choices > 0\n assert length > 0\n count: int = (choices ** length)\n return int(count / (speed * optimism))",
"def test_requests_num(self):\n\n requests_num = len(self.response.context['requests'])\n self.assertLessEqual(requests_num, 10)",
"def _choice_protocol(self):\n # space to add more complex choice algorithms, if desired\n return 0",
"def default_length(self) -> int:\r\n ...",
"def _requestSizeHomogeneous(self, children):\n primary_length_name = self.PRIMARY_LENGTH\n secondary_length_name = self.SECONDARY_LENGTH\n max_size = Size(0, 0)\n try:\n for child in children:\n max_size |= child.requested_size\n except AttributeError as ex:\n # I want to modify the text that's in the first arg. But strings\n # as well as tuples are immutable. So I just make a list for\n # a little while.\n args = list(ex.args)\n args[0] += \", did you forget to request the size of the child?\"\n ex.args = tuple(args)\n raise\n children_nb = len(children)\n primary_length = children_nb * getattr(max_size, primary_length_name)\n secondary_length = getattr(max_size, secondary_length_name)\n\n primary_length += (children_nb - 1) * self.spacing\n\n result = Size(0, 0)\n setattr(result, primary_length_name, primary_length)\n setattr(result, secondary_length_name, secondary_length)\n return result",
"def num_accepts(self, max_len: int, bound: Sequence[Text] = ()) -> Tuple[int, int, int]:\n lt1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n lt2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n gt1: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n gt2: Dict[FrozenSet[int], int] = collections.defaultdict(int)\n eq1[frozenset(self.start_nodes)] = 1\n num_accepted_le = int(self.accepts(\"\"))\n num_accepted_gt = 0\n for c in itertools.islice(itertools.chain(bound, itertools.repeat(None)), 0, max_len):\n for nodes, count in lt1.items():\n for element in self.possible_transitions(nodes):\n next_nodes = frozenset(self.next_nodes(nodes, element))\n lt2[next_nodes] += count\n for nodes, count in eq1.items():\n for element in self.possible_transitions(nodes):\n next_nodes = frozenset(self.next_nodes(nodes, element))\n if c is None or (element is not None and element > c):\n gt2[next_nodes] += count\n elif element == c:\n eq2[next_nodes] += count\n else:\n lt2[next_nodes] += count\n for nodes, count in gt1.items():\n for element in self.possible_transitions(nodes):\n next_nodes = frozenset(self.next_nodes(nodes, element))\n gt2[next_nodes] += count\n num_accepted_le += self._sum_tables(eq2)\n num_accepted_le += self._sum_tables(lt2)\n num_accepted_gt += self._sum_tables(gt2)\n if not lt2 and not eq2 and not gt2:\n break # Exit early if we know this regex cannot accept anymore strings.\n lt1, lt2 = lt2, collections.defaultdict(int)\n eq1, eq2 = eq2, collections.defaultdict(int)\n gt1, gt2 = gt2, collections.defaultdict(int)\n num_accepted_eq = int(len(bound) <= max_len and self.accepts(bound))\n return num_accepted_le - num_accepted_eq, num_accepted_eq, num_accepted_gt",
"def pick_length(self, ak_spec: Union[str, BKT]) -> Tuple[Optional[List[Hedron]], Optional[BKT]]:\n ...",
"def length(self):\n ...",
"def testsize(self):\n for size in range(5):\n AttributeAbility(size=size + 1)",
"def get_response_pdu_size(self):\n return 1 + 1 + 2 * self.count",
"def get_num_of_choices(self) -> int:\n return len(self._choices)",
"def selectables_length(self) -> int:\n\n return 1",
"def __len__(self, context=None):\n if context is not None:\n context = self._repair_context(context)\n uri = self.rest_services[\"size\"]\n payload=dict()\n if context:\n context = context.n3()\n payload[\"context\"] = context\n r = requests.get(uri, params = payload)\n return int(r.text)",
"def __len__(self) -> int:\n return 3",
"def requests(self, state):\n # poisson distribution for generating random no of requests based on average\n location = state[0]\n if location == 0:\n requests = np.random.poisson(2)\n if location == 1:\n requests = np.random.poisson(12)\n if location == 2:\n requests = np.random.poisson(4)\n if location == 3:\n requests = np.random.poisson(7)\n if location == 4:\n requests = np.random.poisson(8)\n # limiting no of requests to 15\n if requests > 15:\n requests = 15\n # (0,0) is not considered as customer request, however the driver is free to reject all\n possible_actions_index = random.sample(range(1, (m-1)*m + 1), requests) + [0]\n actions = [self.action_space[i] for i in possible_actions_index]\n \n\n return possible_actions_index, actions",
"def response_space():",
"def count_choices(self) -> dict:\r\n times_chosen = dict()\r\n\r\n # exclude the optimistic value when counting choices\r\n for arm, values in self.rewards.items():\r\n if self.optim_c not in values:\r\n times_chosen[arm] = len(values)\r\n else:\r\n times_chosen[arm] = 0\r\n\r\n return times_chosen",
"def __len__(self):\n\t\treturn 3",
"def request_to_speak(_) -> int:\n return 1 << 32",
"def request_to_speak(_) -> int:\n return 1 << 32",
"def __len__():",
"def __len__():",
"def __len__():",
"def __len__(self):\n return 3",
"def __len__(self):\n return 3",
"def __len__(self):\n # replica + max wait + min bytes + len(topics)\n size = self.HEADER_LEN + 4 + 4 + 4 + 4\n for topic, parts in iteritems(self._reqs):\n # topic name + len(parts)\n size += 2 + len(topic) + 4\n # partition + fetch offset + max bytes => for each partition\n size += (4 + 8 + 4) * len(parts)\n return size"
] | [
"0.60413057",
"0.5787301",
"0.5696149",
"0.56239843",
"0.5614543",
"0.554054",
"0.55402803",
"0.54318595",
"0.5417974",
"0.54012364",
"0.5351382",
"0.53226864",
"0.53211933",
"0.53190243",
"0.5317493",
"0.5313957",
"0.52929085",
"0.52625877",
"0.52498925",
"0.522541",
"0.5171876",
"0.5157086",
"0.5135717",
"0.5135717",
"0.5116699",
"0.5116699",
"0.5116699",
"0.51114863",
"0.51114863",
"0.510824"
] | 0.588172 | 1 |
Write on latex file variables that are calculated with cluster classification | def cluster_classification_tex(f,browsing_matrix,diversifying_matrix, weblog,session_data_threshold,cluster_type,classification_column_diversity,classification_wanted_transaction):
divpat_classification_wanted_transaction = classification_wanted_transaction
divpat_N_classification_wanted_transaction=len(divpat_classification_wanted_transaction)
f.write("\n% 6. Cluster Classification")
columns_latex = '|'+'c|'*len(session_data_threshold[cluster_type].unique())
f.write("\n\\newcommand{\\%s}{%s}"%('DivColumnsLatex',columns_latex))
columns_blank = ' ' + '& '*(len(session_data_threshold[cluster_type].unique()) -1)
f.write("\n\\newcommand{\\%s}{%s}"%('DivColumnsBlank',columns_blank))
cluster_list = []
ieuc_clusters = []
star_chain_like_clusters = []
length_clusters = []
browsing_pattern_1 = []
browsing_pattern_2 = []
browsing_pattern_3 = []
diversifying_pattern_1 = []
diversifying_pattern_2 = []
diversifying_pattern_3 = []
cluster_ids = session_data_threshold[cluster_type].unique()
cluster_ids.sort()
for cluster_id in cluster_ids:
cluster_list.append(str(cluster_id))
cluster_session_list=session_data_threshold[session_data_threshold[cluster_type]==cluster_id].session_id.values
temp_cluster_weblog=weblog[weblog.session_id.isin(cluster_session_list)]
pa,pa_names = proportional_abundance(temp_cluster_weblog,'requested_'+classification_column_diversity)
cluster_entropy=ShannonEntropy(pa,normalize=True)
ieuc_clusters.append(str(round(np.power(2.0,cluster_entropy),2)))
star_chain_like_clusters.append(star_chain_str(session_data_threshold[session_data_threshold[cluster_type]==cluster_id].star_chain_like.mean()))
length_clusters.append(length(session_data_threshold[session_data_threshold[cluster_type]==cluster_id].requests.mean()))
# Browsing patterns
r,c=np.unravel_index(browsing_matrix[cluster_id][:-1,:-1].argsort(axis=None)[::-1][:3],dims=(divpat_N_classification_wanted_transaction,divpat_N_classification_wanted_transaction))
browsing_pattern_1.append('%.1f\%%: %s$\\rightarrow$%s'%(100.0*browsing_matrix[cluster_id][r[0],c[0]],divpat_classification_wanted_transaction[r[0]],divpat_classification_wanted_transaction[c[0]]))
browsing_pattern_2.append('%.1f\%%: %s$\\rightarrow$%s'%(100.0*browsing_matrix[cluster_id][r[1],c[1]],divpat_classification_wanted_transaction[r[1]],divpat_classification_wanted_transaction[c[1]]))
browsing_pattern_3.append('%.1f\%%: %s$\\rightarrow$%s'%(100.0*browsing_matrix[cluster_id][r[2],c[2]],divpat_classification_wanted_transaction[r[2]],divpat_classification_wanted_transaction[c[2]]))
# Diversifying patterns
r,c=np.unravel_index(np.nan_to_num(diversifying_matrix[cluster_id])[:-1,:-1].argsort(axis=None)[::-1][:3],dims=(divpat_N_classification_wanted_transaction,divpat_N_classification_wanted_transaction))
diversifying_pattern_1.append('%.1f\%%: %s$\\rightarrow$%s'%(100.0*diversifying_matrix[cluster_id][r[0],c[0]],divpat_classification_wanted_transaction[r[0]],divpat_classification_wanted_transaction[c[0]]))
diversifying_pattern_2.append('%.1f\%%: %s$\\rightarrow$%s'%(100.0*diversifying_matrix[cluster_id][r[1],c[1]],divpat_classification_wanted_transaction[r[1]],divpat_classification_wanted_transaction[c[1]]))
diversifying_pattern_3.append('%.1f\%%: %s$\\rightarrow$%s'%(100.0*diversifying_matrix[cluster_id][r[2],c[2]],divpat_classification_wanted_transaction[r[2]],divpat_classification_wanted_transaction[c[2]]))
del temp_cluster_weblog
f.write("\n\\newcommand{\\%s}{%s}"%('DivClusterList',' & '.join(cluster_list)))
f.write("\n\\newcommand{\\%s}{%s}"%('DivIEUCClusters',' & '.join(ieuc_clusters)))
f.write("\n\\newcommand{\\%s}{%s}"%('StarChainClusters',' & '.join(star_chain_like_clusters)))
f.write("\n\\newcommand{\\%s}{%s}"%('LengthClusters',' & '.join(length_clusters)))
f.write("\n\\newcommand{\\%s}{%s}"%('BrowsingPatternClustersOne',' & '.join(browsing_pattern_1)))
f.write("\n\\newcommand{\\%s}{%s}"%('BrowsingPatternClustersTwo',' & '.join(browsing_pattern_2)))
f.write("\n\\newcommand{\\%s}{%s}"%('BrowsingPatternClustersThree',' & '.join(browsing_pattern_3)))
f.write("\n\\newcommand{\\%s}{%s}"%('DiversifyingPatternClustersOne',' & '.join(diversifying_pattern_1)))
f.write("\n\\newcommand{\\%s}{%s}"%('DiversifyingPatternClustersTwo',' & '.join(diversifying_pattern_2)))
f.write("\n\\newcommand{\\%s}{%s}"%('DiversifyingPatternClustersThree',' & '.join(diversifying_pattern_3)))
return f; | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def output(\n self,\n fileformat,\n **keywords\n ):\n \n # add the default parameters, they will be checked against the keywords\n defaults = {\n 'ref':'cogid',\n 'entry':'concept',\n 'missing':0,\n 'filename':'lingpy-{0}'.format(str(date.today())),\n }\n \n # compare with keywords and add missing ones\n for key in defaults:\n if key not in keywords:\n keywords[key] = defaults[key]\n\n if fileformat == 'paps.nex':\n paps = self.get_paps(\n ref=keywords['ref'],\n entry=keywords['entry'],\n missing=keywords['missing']\n )\n pap2nex(\n self.cols,\n paps,\n missing=keywords['missing'],\n filename=keywords['filename']+'.paps'\n )\n\n if fileformat == 'taxa':\n out = ''\n for col in self.cols:\n out += col + '\\n'\n f = open(keywords['filename'] + '.taxa','w')\n f.write(out)\n f.close()",
"def write_to_latex(arr, title, n, m, function_type, region):\r\n df = pd.DataFrame(arr)\r\n df.to_csv(df.to_csv('%s_n=%s_m=%s.csv'\r\n % (title, n, m)))\r\n with open('%s_n=%s_m=%s_%s_%s.tex' %\r\n (title, n, m, function_type, region), 'w') as tf:\r\n tf.write(df.to_latex())",
"def perf2latex(latex_fn, all_perf, metrics_name, slice_v, cam_v):\n slice_num = slice_v.shape[0]\n\n f = open('%s'%latex_fn, 'w')\n f.write(\"\\\\documentclass{article}\\n\")\n f.write(\"\\\\usepackage[utf8]{inputenc}\\n\")\n f.write(\"\\\\usepackage{booktabs} \\n\")\n f.write(\"\\\\usepackage[]{float}\\n\")\n f.write(\"\\\\usepackage[margin=1.2in]{geometry}\\n\")\n f.write(\"\\\\begin{document}\\n\\n\")\n\n for m_name in metrics_name:\n print(m_name)\n f.write('\\\\begin{table}[tbh]\\n')\n f.write('\\\\begin{center}\\n')\n f.write('\\\\begin{tabular}{|*{%d}{c|}}\\n'%(slice_num + 1))\n f.write('\\\\hline\\n')\n f.write(' Survey')\n #for slice_idx, slice_id in enumerate(slice_cam_id[:-1]):\n for j, (slice_id, cam_id) in enumerate(zip(slice_v, cam_v)):\n f.write(' & %d\\_c%d'%(slice_id, cam_id))\n f.write(' \\\\\\\\ \\n')\n f.write('\\\\hline\\n')\n\n m = all_perf[m_name]\n print(m.shape)\n survey_num = m.shape[0]\n for i in range(survey_num):\n f.write('%d'%(i))\n for j in range(slice_num):\n f.write(' & %.3f'%m[i,j])\n f.write(' \\\\\\\\ \\n')\n\n f.write('\\\\hline\\n')\n f.write('\\\\end{tabular}\\n')\n f.write('\\\\end{center}\\n')\n f.write('\\\\caption{Metric: %s}\\n'%(m_name))\n f.write('\\\\end{table}\\n\\n\\n')\n\n f.write('\\\\end{document}\\n')\n print('\\\\end{document}\\n')\n \n f.close()",
"def write_labels():\n with open('../data/labels.txt', 'w') as labels_file:\n labels = generate_labels()\n labels_file.write('\\n'.join(labels))",
"def write_gct_file(output_file, class_names, class_counts, expression_matrix):\n total_genes = len(expression_matrix)\n first_key = list(expression_matrix.keys())[0]\n total_samples = len(expression_matrix[first_key])\n\n headers = ['NAME', 'DESCRIPTION']\n\n for c_name, c_count in zip(class_names, class_counts):\n for i in range(c_count):\n headers.append('{}_{}'.format(c_name, i + 1))\n\n with open(output_file, 'w') as f:\n f.write('#1.2\\n')\n f.write('{} {}\\n'.format(total_genes, total_samples))\n f.write('\\t'.join(headers))\n f.write('\\n')\n\n for g_name, values in expression_matrix.items():\n f.write(g_name)\n f.write('\\tna\\t')\n f.write('\\t'.join(\n ['{0:.2f}'.format(v) for v in values]\n ))\n f.write('\\n')",
"def md_writer(clf, features, outcome, eval_folder,\n config_file, summary_df, i=''):\n if config_file.endswith('.xlsx'):\n config = pd.read_excel(config_file, sheetname='Sheet1')\n elif config_file.endswith('.csv'):\n config = pd.read_csv(config_file)\n\n clf_params = clf.get_params()\n clf_name = str(clf)[:str(clf).index('(')]\n clf_img = clf_name+str(i)\n\n file_name = clf_name+str(i)+'_Evaluation.md'\n\n save_file = open(eval_folder+file_name, 'w')\n\n def new_line():\n save_file.write('\\n')\n\n save_file.write('<link rel=\"stylesheet\" href=\"style.css\" type=\"text/css\" />\\n')\n save_file.write('# Model Evaluation Report\\n')\n new_line()\n\n save_file.write('## Data Configuration:\\n')\n new_line()\n save_file.write(config.to_html(na_rep='', index=False).replace('NaT', ''))\n new_line()\n\n save_file.write('## Classifier Parameters: '+clf_name+'\\n')\n new_line()\n for elem in clf_params:\n save_file.write('* {}: {}\\n'.format(elem, clf_params[elem]))\n new_line()\n\n summary_df = summary_df.T\n summary_df.columns = ['value']\n\n save_file.write('## Evaluation Metrics; Summary\\n')\n new_line()\n save_file.write(summary_df.to_html())\n new_line()\n\n save_file.write('## ROC Curve\\n')\n new_line()\n save_file.write('\\n')\n new_line()\n\n save_file.write('## Precision-Recall Curve\\n')\n new_line()\n save_file.write('\\n')\n new_line()\n\n save_file.write('## Precision, Recall vs % Population\\n')\n new_line()\n save_file.write('\\n')\n\n if clf_name in ['LogisticRegression']:\n save_file.write('## Coefficients\\n')\n new_line()\n for i,coef in enumerate(clf.coef_[0]):\n save_file.write('*<b>{}: {}</b>\\n'.format(features[i], round(coef,4)))\n new_line()\n\n if clf_name in ['WeightedQuestions']:\n save_file.write('## Weights\\n')\n new_line()\n for i,wt in enumerate(clf.weights):\n save_file.write('*<b>{}: {}</b>\\n'.format(features[i], wt))\n new_line()\n\n save_file.close()\n\n def markdown_to_html(md_file, out_file_name=None):\n import markdown\n\n with open(md_file, 'r') as f:\n html = markdown.markdown(f.read())\n\n if out_file_name is None:\n out_file_name = md_file.split('.')[0]+'.html'\n with open(out_file_name, 'w') as f:\n f.write(html)\n\n markdown_to_html(eval_folder+file_name)",
"def write_output(label1, label2, label3, submission_file):\n with open(submission_file, 'w') as f:\n f.write('Id,Bound'+ '\\n')\n for index, lab in enumerate(label1):\n f.write(str(index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label2):\n f.write(str(len(label1) + index) + ',' + str(int(lab)) + '\\n')\n for index, lab in enumerate(label3):\n f.write(str(len(label1) + len(label2) + index) + ',' + str(int(lab)))\n if index < len(label3) - 1:\n f.write('\\n')",
"def produce_output_txt(self):\n\n NAME = \"TODO get name form cpacs object\"\n\n result_dir = get_results_directory(\"WeightConventional\")\n\n output_file = Path(result_dir, \"Aircraft_Geometry.out\")\n\n OutputTextFile = open(output_file, \"w\")\n\n OutputTextFile.write(\"\\n#################################################\")\n OutputTextFile.write(\"\\n###### AIRCRAFT GEOMETRY EVALUATION MODULE ######\")\n OutputTextFile.write(\"\\n###### OUTPUTS ######\")\n OutputTextFile.write(\"\\n#################################################\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nAircraft: \" + NAME)\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nGeometry Evaluations-----------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nUSEFUL INFO -------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\n \"\\nIf fuselage or wing number is greater than 1 the\\n\"\n \"information of each obj are listed in an \"\n \"array ordered\\nprogressively\"\n )\n OutputTextFile.write(\n \"\\nSymmetry output: 0 = no symmetry, 1 = x-y,\\n\" + \"2 = x-z, 3 = y-z planes\"\n )\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nRESULTS -----------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nFUSELAGE ----------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(f\"\\nNumber of fuselage sections [-]: {self.fuse_sec_nb}\")\n OutputTextFile.write(f\"\\nNumber of fuselage segments [-]: {self.fuse_seg_nb}\")\n OutputTextFile.write(f\"\\nCabin segments array [-]: {self.cabin_seg}\")\n OutputTextFile.write(f\"\\nFuse Length [m]: {np.around(self.fuse_length, 5)}\")\n OutputTextFile.write(f\"\\nFuse nose Length [m]: {np.around(self.fuse_nose_length, 5)}\")\n OutputTextFile.write(f\"\\nFuse cabin Length [m]: {np.around(self.fuse_cabin_length, 5)}\")\n OutputTextFile.write(f\"\\nFuse tail Length [m]: {np.around(self.fuse_tail_length, 5)}\")\n OutputTextFile.write(f\"\\nAircraft Length [m]: {np.around(self.tot_length, 5)}\")\n OutputTextFile.write(\n \"\\nCircumference of each section of the fuselage [m]:\"\n f\"\\n{np.around(self.fuse_sec_circ, 5)}\"\n )\n OutputTextFile.write(\n \"\\nRelative distance of each section of the\"\n + \"fuselage, respect to the first one [m]: \\n\"\n + str(np.around(self.fuse_sec_rel_dist, 5))\n )\n OutputTextFile.write(\n \"\\nLength of each segment of the fuselage [m]: \\n\"\n + str(np.around(self.fuse_seg_length, 5))\n )\n OutputTextFile.write(\n \"\\nMean fuselage width [m]: \" + str(np.around(self.fuse_mean_width, 5))\n )\n OutputTextFile.write(\n \"\\nWidth of each section of the fuselage [m]: \\n\"\n + str(np.around(self.fuse_sec_width, 5))\n )\n OutputTextFile.write(\n \"\\nVolume of each segment of the fuselage \"\n \"[m^3]: \\n\" + str(np.around(self.fuse_seg_vol, 5))\n )\n OutputTextFile.write(\n \"\\nVolume of the cabin [m^3]: \" + str(np.around(self.fuse_cabin_vol, 5))\n )\n OutputTextFile.write(\"\\nVolume of the fuselage [m^3]: \" + str(np.around(self.fuse_vol, 5)))\n OutputTextFile.write(\"\\n\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(\"\\nWINGS -------------------------------------------\")\n OutputTextFile.write(\"\\n-------------------------------------------------\")\n OutputTextFile.write(f\"\\nNumber of Wings [-]: {self.wing_nb}\")\n OutputTextFile.write(f\"\\nWing symmetry plane [-]: {self.wing_sym}\")\n OutputTextFile.write(f\"\\nNumber of wing sections [-]: {self.wing_sec_nb}\")\n OutputTextFile.write(f\"\\nNumber of wing segments [-]: {self.wing_seg_nb}\")\n OutputTextFile.write(f\"\\nWing Span [m]: \\n{np.around(self.wing_span, 5)}\")\n OutputTextFile.write(\n \"\\nWing MAC length [m]: \\n\"\n + str(\n np.around(\n self.wing_mac[\n 0,\n ],\n 5,\n )\n )\n )\n OutputTextFile.write(\n \"\\nWing MAC x,y,z coordinate [m]: \\n\"\n + str(\n np.around(\n self.wing_mac[\n 1:4,\n ],\n 5,\n )\n )\n )\n OutputTextFile.write(\n \"\\nWings sections thickness [m]: \\n\" + str(np.around(self.wing_sec_thickness, 5))\n )\n OutputTextFile.write(\n \"\\nWings sections mean thickness [m]: \\n\" + str(np.around(self.wing_sec_mean_thick, 5))\n )\n OutputTextFile.write(\n \"\\nWing segments length [m]: \\n\" + str(np.around(self.wing_seg_length, 5))\n )\n OutputTextFile.write(\n \"\\nWing max chord length [m]: \\n\" + str(np.around(self.wing_max_chord, 5))\n )\n OutputTextFile.write(\n \"\\nWing min chord length [m]: \\n\" + str(np.around(self.wing_min_chord, 5))\n )\n OutputTextFile.write(\n \"\\nWings planform area [m^2]: \\n\" + str(np.around(self.wing_plt_area, 5))\n )\n OutputTextFile.write(\n \"\\nMain wing planform area [m^2]: \" + str(np.around(self.wing_plt_area_main, 5))\n )\n OutputTextFile.write(\"\\nVolume of each wing [m^3]: \\n\" + str(np.around(self.wing_vol, 5)))\n OutputTextFile.write(\"\\nTotal wing volume [m^3]: \" + str(np.around(self.wing_tot_vol, 5)))\n OutputTextFile.write(\"\\nWing volume for fuel storage [m^3]: \" + str(self.wing_fuel_vol))\n\n # Close Text File\n OutputTextFile.close()",
"def cluster_and_render(conf, dbname, outname=\"./text.html\", nclusters=8):\n\n\n db = sqlite3.connect(dbname)\n r = db.execute(\"select min(year), max(year) from counts where conf=?\", (conf,))\n minyear, maxyear = r.fetchone()\n\n # total words per year for normalization purposes\n r = db.execute(\"select year, count(*) from counts where conf=? order by year\", (conf,))\n year2c = dict([(year, c) for year, c in r])\n yearcounts = dict2arr(year2c, range(minyear, maxyear+1), 1)\n\n\n def add_content(subcluster, content, suffix):\n \"\"\"\n Render the cluster as an image\n \"\"\"\n\n fname = './plots/%s_%s.png' % (conf, suffix)\n\n # pick the top 10 terms\n subcluster = sorted(subcluster, key=lambda t: max(t[1:].astype(float)), reverse=True)\n subcluster = subcluster[:10]\n\n words = np.array(subcluster)[:,0]\n ys = np.array(subcluster)[:,1:].astype(float)\n mean = [np.mean(ys[:,i]) for i in xrange(ys.shape[1])]\n maxmean = max(mean)\n idx = mean.index(maxmean)\n\n # this is used to make the top-k list in the HTML later\n content.append(('', words, fname, idx))\n\n\n data = []\n for arr in subcluster:\n word = arr[0]\n for x, y in enumerate(map(float, arr[1:])):\n data.append(dict(\n group=\"normal\",\n word=word,\n x=xs[x],\n y=y, \n alpha=0.3\n ))\n\n # add a line for the mean\n for x, y in enumerate(mean):\n data.append(dict(group=\"aggregate\", word='___mean___', x=xs[x], y=y, alpha=1))\n\n if 1:\n maxy = max(10, max(pluckone(data, 'y')))\n if maxy <= 10:\n breaks = [0, 5, 10]\n\n\n # pygg lets you write ggplot2 syntax in python\n p = ggplot(data, aes(x='x', y='y', group='word', color='group', alpha='alpha'))\n p += geom_line(size=1)\n p += scale_color_manual(values=\"c('normal' = '#7777dd','aggregate' = 'black')\", guide=\"FALSE\")\n p += scale_alpha_continuous(guide=\"FALSE\")\n if 1:\n if maxy <= 10:\n p += scale_y_continuous(lim=[0, maxy], breaks=breaks, labels = \"function (x) as.integer(x)\")\n else:\n p += scale_y_continuous(lim=[0, maxy], labels = \"function (x) as.integer(x)\")\n p += legend_bottom\n p += theme(**{\n \"axis.title\":element_blank()\n })\n ggsave(fname, p, width=10, height=4, libs=['grid'])\n \n\n\n def vectors():\n \"\"\"\n Extract a matrix of term count vectors\n\n Return: [\n [word, count1, count2, ...],\n ...\n ]\n \"\"\"\n r = db.execute(\"select word, year, c from counts where conf=? order by word, year\", (conf,))\n vects = defaultdict(dict)\n for w,y,c in r:\n l = vects[w]\n l[y] = float(c) \n\n\n ret = []\n for w in vects:\n d = vects[w]\n\n # if word is super uncommon, skip it\n if (max(d.values()) <= 3):\n continue\n if (max([v / (1.+year2c.get(y,0)) for y, v in d.items()]) < .1): \n continue\n\n # some years may not have the word\n counts = dict2arr(d, xrange(minyear, maxyear+1), 1.0)\n\n \n # naive window averaging smoothing over the trend curve\n smooth = []\n for i in xrange(len(counts)):\n smooth.append(np.mean(counts[max(0,i-2):i+2]))\n if max(smooth) > 2:\n ret.append([w] + smooth)\n return np.array(ret)\n\n\n vects = vectors()\n # dimensions: words (row) x year (col)\n data = vects[:,1:].astype(float)\n\n # there's a bajillion ways to normalize the counts before clustering.\n # we do the following:\n\n # 1. divide by the total number of words in that year\n # (normalize by column)\n for idx, base in enumerate(yearcounts):\n data[:,idx] /= float(base)\n\n # 2. ensure zero mean and 1 std\n # (normalize by row)\n data = np.array([(l - np.mean(l)) / (max(l)) for l in data ])\n\n\n clusterer = KMeans(nclusters, n_init=50, init='k-means++')\n clusterer.fit(data) \n labels = clusterer.labels_\n xs = np.array(range(minyear, maxyear+1))\n\n content = []\n\n # each label is a cluster\n for label in set(labels):\n idxs = labels == label\n cluster = vects[idxs]\n\n # sort the words/clusters by their max count\n cluster = sorted(cluster, key=lambda t: max(t[1:].astype(float)), reverse=True)\n if not len(cluster): continue\n cluster = np.array(cluster)\n words = cluster[:,0]\n words = list(words)\n\n add_content(cluster, content, label)\n\n content.sort(key=lambda c: c[-1])\n\n\n\n # make HTML\n from jinja2 import Template\n template = Template(file('./clustertemplate.html').read())\n\n with file(outname, 'w') as f:\n f.write( template.render(content=content))",
"def output_classLabel_to_txt(save_path):\n file_obj = open(save_path,'w')\n length = len(class_label)\n for i in range(0,length):\n line = '%d:%s'%(i,class_label[i])\n file_obj.writelines(line+'\\n')\n return True",
"def print_latex(self):\n\n pdf = pylatex.Document(\n \"default\"\n )\n\n with pdf.create(pylatex.Section(\n \"Equações Diofantinas\"\n )) as section:\n\n section.append(\"Equação:\")\n ultimo = self.numbers[-1]\n eq = []\n cont = 1\n for i in self.numbers:\n simbolo = \"+\"\n if i == ultimo:\n simbolo = \"= 1\"\n eq.append(\n pylatex.NoEscape(\n \" {}x_{} {}\".format(i, cont, simbolo)\n )\n )\n cont = cont + 1\n\n section.append(pylatex.Math(data=eq))\n\n text = \"n = {}\".format(self.order)\n section.append(text)\n\n m = pylatex.Matrix(self.take_vec(), mtype='b')\n matrix = pylatex.Math(data=['b = ', m])\n section.append(matrix)\n\n m = pylatex.Matrix(self.take_matrix(), mtype='b')\n matrix = pylatex.Math(data=['A = ', m])\n section.append(matrix)\n\n section.append(\"Resposta = {}\".format(self.cofactor_matrix()))\n\n section.append(pylatex.LineBreak())\n section.append(\"Confirmando:\")\n section.append(pylatex.LineBreak())\n s = 0\n for i in range(len(self.numbers)):\n r = self.numbers[i] * self.cofactor_matrix()[i]\n s = s + r\n resp = \"\\t {}\\t{} \\t* \\t{} \\t= \\t{} \\t({})\\n\".format(\n i,\n self.numbers[i],\n self.cofactor_matrix()[i],\n r,\n s\n )\n section.append(resp)\n\n if self.create_pdf:\n pdf.generate_pdf()\n\n pdf.generate_tex()",
"def fortout(\n cluster,\n filename=\"fort.10\",\n reset_nbody_scale=False,\n reset_nbody_mass=True,\n reset_nbody_radii=True,\n):\n units0, origin0 = save_cluster(cluster)\n cluster.to_centre()\n\n if reset_nbody_scale:\n reset_nbody_scale(cluster, mass=reset_nbody_mass, radii=reset_nbody_radii)\n\n cluster.to_nbody()\n\n np.savetxt(\n filename,\n np.column_stack(\n [\n cluster.m,\n cluster.x,\n cluster.y,\n cluster.z,\n cluster.vx,\n cluster.vy,\n cluster.vz,\n ]\n ),\n )\n\n return_cluster(cluster, units0, origin0)\n\n return 0",
"def write(self, filename=None):\n if filename == None:\n filename = self.ofilename\n\n ofile = open(filename, 'w')\n\n ofile.write('# Susceptibility: %E d(susc): %E Coercivity: %E d(coer): %E\\n' % (self.susceptibility_mean, self.susceptibility_std, self.coercivity_mean, self.coercivity_std) )\n ofile.write('# H[] M[] Mfit[]\\n')\n\n #for i in range(len(self.h)):\n # ofile.write(\" %12.10f %12.10f %12.10f\\n\" % ( self.h[i], self.m[i], self.m_fit[i] ) )\n\n ofile.close()",
"def write():\n #with st.spinner(\"Loading Dashboard ...\"):\n #ast.shared.components.title_awesome(\"\")\n\n st.title('arXiv - Analytics')\n st.text(\"\")\n if st.checkbox('Most similar words in w2v'):\n user_input = st.text_input(\"Topic (please enter up to two keywords)\", 'Machine Learning')\n user_input = user_input.lower().replace(\" \", \"_\")\n st.text(\"\")\n number_of_similar_words = st.slider('Select a modulus', 3, 50)\n plot_similar_words(model, user_input, number_of_similar_words)\n st.pyplot()\n st.text(\"\")\n if st.checkbox('Word Cloud'):\n cluster = st.slider('Select a cluster', 0, 49)\n word_cloud_kmeans(cluster)\n st.pyplot()",
"def main(input_filepath, latex):\n logger = logging.getLogger(__name__)\n\n df = pd.read_csv(input_filepath)\n out = df.head()\n if latex:\n out = out.to_latex()\n print(out)",
"def print_data(self, latex=False, name=None):\n factor_3d_dens = 4.04367e-8\n\n rho_g = self.local_density_mean()\n sg = self.local_surface_density()\n\n expected_lj = (sg/(rho_g))*1e9 # in pc\n proj_lj = self.expected_lj_mart(sg, rho_g/1e9)\n\n if not latex:\n print(\"rho: {} nh/cm3, sg: {} ms/pc2, sg/rho: {} pc, Z: {} pc, lj: {} pc, sfr: {} gyr\".format(\n rho_g * factor_3d_dens,\n sg,\n expected_lj,\n abs(self.popt[1]) * 1e3,\n proj_lj,\n tau_sfr(rho_g/1e9)/1e9))\n else:\n print(\"{} & {:.3g} & {:.3g} & {:.3g} & {:.3g} & {:.3g} \\\\\\\\\".format(\n name,\n rho_g * factor_3d_dens,\n sg,\n expected_lj,\n proj_lj,\n abs(self.popt[1])*1e3))\n\n\n return",
"def __str__(self):\n outs = str(self.cluster_subspace).split(\"\\n\")[:6]\n\n if self.regression_data is not None:\n # This might need to be redefined to take \"expectation\" using measure\n feature_avg = np.average(self.feature_matrix, axis=0)\n feature_std = np.std(self.feature_matrix, axis=0)\n outs += [\n f\"Regression Data : estimator={self.regression_data.estimator_name}\",\n f\" module={self.regression_data.module}\",\n f\" parameters={self.regression_data.parameters}\",\n f\"Target Property : \"\n f\"mean={np.mean(self.regression_data.property_vector):0.4f} \"\n f\"std={np.std(self.regression_data.property_vector):0.4f}\",\n ]\n fit_var = sum(\n self._subspace.function_total_multiplicities[1:] * self.eci[1:] ** 2\n )\n outs += [\n f\"ECI-based Property : mean={self.eci[0]:0.4f}\"\n f\" std={np.sqrt(fit_var):0.4f}\",\n \"Fit Summary\",\n ]\n\n for i, term in enumerate(self._subspace.external_terms):\n outs.append(f\"{repr(term)}={self.coefs[len(self.eci) + i]:0.3f}\")\n\n if self.regression_data is not None:\n outs += [\n \" ---------------------------------------------------------------------\"\n \"-------------------------------\",\n \" | ID Orbit ID Degree Cluster Diameter ECI Feature AVG\"\n \" Feature STD ECI * STD |\",\n f\" | 0 0 0 NA \"\n f\"{self.eci[0]:^7.3f}{feature_avg[0]:^15.3f}\"\n f\"{feature_std[0]:^15.3f}{feature_std[0] * self.eci[0]:^13.3f}|\",\n ]\n else:\n outs += [\n \" ---------------------------------------------------------\",\n \" | ID Orbit ID Degree Cluster Diameter ECI |\",\n f\" | 0 0 0 NA \"\n f\"{self.eci[0]:^7.3f} |\",\n ]\n\n for degree, orbits in self.cluster_subspace.orbits_by_size.items():\n for orbit in orbits:\n for i, bits in enumerate(orbit.bit_combos):\n line = (\n f\" |{orbit.bit_id + i:^6}{orbit.id:^12}{degree:^10}\"\n f\"{orbit.base_cluster.diameter:^20.4f}\"\n f\"{self.eci[orbit.bit_id + i]:^7.3f}\"\n )\n if self.regression_data is not None:\n line += (\n f\"{feature_avg[orbit.bit_id + i]:^15.3f}\"\n f\"{feature_std[orbit.bit_id + i]:^15.3f}\"\n f\"{feature_std[orbit.bit_id + i] * self.eci[orbit.bit_id + i]:^13.3f}\" # noqa\n )\n line += \"|\"\n outs.append(line)\n outs.append(\" \" + (len(outs[-1]) - 1) * \"-\")\n return \"\\n\".join(outs)",
"def write_output(self,content):\n text=\"\"\"# typ eta phi pt jmass ntrk btag had/em dummy dummy\\n\"\"\"\n self.output.writelines(text)\n text=\"0 \"+str(self.nb_data)+\" \"+str(len(content))+\"\\n\"\n self.output.writelines(text)\n\n i=1\n for particle in content:\n text=str(i)+' '+particle.lhco_line()+'\\n'\n self.output.writelines(text)\n i+=1",
"def extrct_out(cluster, fileout, projected=False):\n units0, origin0 = save_cluster(cluster)\n\n if cluster.ntot == 0:\n nb = 0\n cluster.mtot = 0.0\n trh = 0.0\n rn = np.zeros(10)\n else:\n cluster.to_pckms()\n cluster.to_centre(do_order=True, do_key_params=True)\n\n if cluster.nb > 0:\n nb = len(cluster.m2)\n else:\n nb = 0\n\n trh = half_mass_relaxation_time(cluster, multimass=True, projected=projected)\n\n if cluster.ntot > 10:\n if cluster.rn == None or (\n origin0 != cluster.origin or units0 != cluster.units\n ):\n rn = rlagrange(cluster, nlagrange=10, projected=projected)\n else:\n rn = np.zeros(10)\n\n fileout.write(\n \"%i %i %f %f %f \" % (cluster.ntot, nb, cluster.tphys, trh, cluster.mtot)\n )\n\n for i in range(0, len(rn)):\n fileout.write(\"%f \" % rn[i])\n\n fileout.write(\"%f \" % cluster.rmpro)\n\n if len(cluster.logl) > 0:\n fileout.write(\"%f \" % cluster.rhpro)\n else:\n fileout.write(\"-1. \")\n\n # Write additional parameters if they have been calculated:\n if cluster.rv is not None:\n fileout.write(\"%f \" % cluster.rv)\n if cluster.rl is not None:\n fileout.write(\"%f \" % cluster.rl)\n if cluster.rt is not None:\n fileout.write(\"%f \" % cluster.rt)\n\n try:\n fileout.write(\"%f %f \" % (cluster.rvmax, cluster.vmax))\n except:\n pass\n\n fileout.write(\"\\n\")\n\n return_cluster(cluster, units0, origin0)",
"def print_output(data,alignments,file):\n print(\"######################################################################\")\n print(\"Task 1 : IBM model 1 and EM algorithm implementation ,with corpus @\",file)\n print(\"######################################################################\")\n\n for i in range(len(data)):\n print(\"English Sentence : \",data[i][\"en\"])\n print(\"Foreign Sentence : \",data[i][\"fr\"])\n print(\"Alignment : \",alignments[i])\n print(\"----------------------------------------------------------------------\")",
"def _writeVariablesHeaderSection(self):\n self.header.write(wrapLine(\"NV\", self.annotation, self.delimiter, \"%d\\n\" % self.NV))\n self.header.write(wrapLine(\"VSCAL\", self.annotation, self.delimiter, ((\"%s\" + self.delimiter) * (self.NV - 1) + \"%s\\n\") % tuple(self.VSCAL)))\n self.header.write(wrapLine(\"VMISS\", self.annotation, self.delimiter, ((\"%s\" + self.delimiter) * (self.NV - 1) + \"%s\\n\") % tuple(self.VMISS)))\n self.header.write(wrapLines(\"VNAME\", self.annotation, self.delimiter, \"%s\\n\" * self.NV % tuple(self.VNAME)))",
"def write_raw_text(self, path='.'):\n cells = self.get_cells()\n arrays = []\n for cell in cells:\n arrays.append(cell.data)\n array = np.concatenate(arrays)\n fn = os.path.join(path, self.label + '.txt')\n fmt = []\n p = re.compile('(\\w)(\\d+)')\n for key, value in self.datatype:\n m = p.search(value)\n if m:\n kind, size = m.groups()\n # strings\n if kind == 'S':\n add = '%{}c'.format(size)\n # integers\n elif kind in ['u', 'i']:\n add = '%d'\n else:\n add = '%.8e'\n else:\n add = '%.8e'\n fmt.append(add)\n np.savetxt(fn, array, fmt=fmt, delimiter='\\t')\n return",
"def to_text_file(self, filename):\n translate.write_psf_text(self, filename)\n return",
"def write_minisat(self):\n num_variables = len(self.label_encodings)\n num_clauses = self.num_clauses\n clauses = self.clauses\n outfile = MinisatRunner.temp_in\n out = open(outfile,\"w\")\n try:\n out.write(\"p cnf %3d %3d\\n\" % (num_variables,num_clauses))\n for clause in clauses:\n for clause_variable in clause:\n out.write(\" %3d\" % self.minisat_encode_label(clause_variable));\n out.write(\" 0\\n\")\n finally:\n out.close()",
"def write(self, outfile, style='xyz', xyzlabel=True):\n out = ''\n if style == 'xyz':\n if xyzlabel:\n out += f'{len(self)}\\n\\n'\n out += str(self)\n elif style == 'latex':\n header = f'{len(self)}\\\\\\\\\\n'\n line_form = '{:<2}' + ' {:> 13.6f}' * 3 + ' {:>7.4f}'\n atoms = [line_form.format(atom, *pos, charge) for atom, xyz, charge in self]\n atoms = '\\n'.join(atoms)\n out = '\\\\begin{verbatim}\\n' + atoms + '\\n\\\\end{verbatim}'\n else:\n raise SyntaxError('Invalid style')\n with open(outfile, 'w') as f:\n f.write(out)",
"def write_file_simple(self,filename):\n\n output = open(filename,\"w\")\n # write header\n output.write(\"# %1s %3s %22s %6s %22s\\n\"%(\"l\",\"n\",\"nu_theo (muHz)\",\"unused\",\"Inertia\"))\n for i in range(self.modes.shape[0]):\n output.write(\" %1d %3d %22.15e 0.0 %22.15e\\n\"%( \\\n self.modes[\"l\"][i], \\\n self.modes[\"n\"][i], \\\n self.modes[\"freq\"][i]*self.glb[ifreq_ref], \\\n self.modes[\"inertia\"][i]))\n output.close()",
"def writedata_matlab(cibpercentdictm):\n \n expt = os.path.basename(os.path.abspath('.'))\n with open(expt + '_cibdata_m.txt', 'w') as f:\n #f.write('Condition \\t cibopen \\t total \\n')\n for condition, vals in cibpercentdictm.iteritems():\n condition = condition.replace(' - ', '')\n condition = condition.replace('-', '')\n sum, n = map(str, vals)\n #f.write('g' + condition + '_cibopen=' + sum + '\\n')\n #f.write('g' + condition + '_total=' + n + '\\n')\n #f.write(condition + '\\t' + sum + '\\t' + n + '\\n')\n f.write('g' + condition + '\\t' + sum + '\\t' + n + '\\n')",
"def write_to_latex(codelist, unwanted_courses):\n # TODO: investigate a way to add large amounts of text outside of the\n # function\n abstract01 = \"I created this document to practice parsing html and using\\\n tools like Beautiful Soup which I've previously had little experience\\\n in. As a result, it's not perfect.\\\\newline\\\n It is also a slightly condensed all-in-one-place look at a selection\\\n of courses that are available for fourth year computer science\\\n students at the University of Glasgow. For the purposes of clarity I\\\n have removed several courses from this selection. The following\\\n courses have been omitted:\"\n abstract02 = \"For more insight into the project, to report issues or to\\\n inspect the code, have a look at the GitHub:\\\n \\\\url{https://github.com/IS0metric/course-ripper}\"\n unincluded = create_not_included_list(unwanted_courses)\n with open('courses.tex', 'w') as f:\n # TODO Try and move all this to a separate function?\n # TODO: Check if it's more efficient to write a single, massive string\n # to file\n f.write('\\\\documentclass{hitec}\\n')\n f.write('\\\\usepackage[document]{ragged2e}\\n')\n f.write('\\\\usepackage{url}\\n')\n f.write('\\\\usepackage{hyperref}\\n')\n f.write('\\\\setcounter{tocdepth}{4}\\n')\n f.write('\\\\begin{document}\\n')\n f.write('\\\\title{Fourth Year (2016-17) Courses}\\n')\n f.write('\\\\author{Jack Parkinson}\\n')\n f.write('\\\\date{August 2016}\\n')\n f.write('\\\\maketitle\\n')\n f.write('\\\\abstract{' + abstract01 + unincluded + abstract02 + '}\\n')\n f.write('\\\\newpage\\n\\n')\n f.write('\\\\tableofcontents\\n')\n f.write('\\\\newpage\\n\\n')\n # TODO: Look into alternatives to the three lists\n all_courses = []\n sem1_courses = []\n sem2_courses = []\n for code in codelist:\n course = bsoup(get_coursepage(code))\n if course['offered']['value'] == 'Runs Throughout Semesters 1 and 2':\n all_courses.append(course)\n elif \"1\" in course['offered']['value']:\n sem1_courses.append(course)\n elif \"2\" in course['offered']['value']:\n sem2_courses.append(course)\n f.write('\\\\section{Semester 1 and 2 Courses}\\n\\n')\n for course in all_courses:\n f.write(latex_course(course))\n f.write('\\\\section{Semester 1 Only Courses}\\n\\n')\n for course in sem1_courses:\n f.write(latex_course(course))\n f.write('\\\\section{Semester 2 Only Courses}\\n\\n')\n for course in sem2_courses:\n f.write(latex_course(course))\n f.write('\\\\end{document}')\n return None",
"def write_output(series, filename):\n\n logging.info('Writing output')\n\n df = series.reset_index()\n\n df.columns = ['subject_id', 'classification']\n\n df.to_csv(filename, index=False)",
"def export(self, outpath):\n fout = open(outpath, \"w\")\n\n # Header takes the guesswork out of loading by recording how many lines, vector dims\n fout.write(str(self.n_words) + \" \" + str(self.n_dim) + \"\\n\")\n for token in self.id2word:\n vector_components = [\"%.6f\" % number for number in self[token]]\n vector_as_string = \" \".join(vector_components)\n\n out_line = token + \" \" + vector_as_string + \"\\n\"\n fout.write(out_line)\n\n fout.close()"
] | [
"0.58711535",
"0.5794212",
"0.577263",
"0.576856",
"0.5729977",
"0.5719904",
"0.5699117",
"0.5625929",
"0.55732876",
"0.5559315",
"0.5549494",
"0.55457133",
"0.5542708",
"0.5537047",
"0.5492166",
"0.54771644",
"0.5458965",
"0.545175",
"0.5443052",
"0.5383917",
"0.5383866",
"0.5355408",
"0.53506655",
"0.5328806",
"0.5321662",
"0.53145105",
"0.5280036",
"0.52787024",
"0.52765614",
"0.52689976"
] | 0.6494704 | 0 |
Estimate radii for stars on the main sequence using their ``BV`` color, using a simple relation calibrated on interferometry by Boyajian et al. 2012 | def bv_to_radius(b_minus_v):
# Boyajian 2012
X = b_minus_v
a0 = 0.3830
a1 = 0.9907
a2 = -0.6038
Y = 0
# Ignore metallicity
a3 = 0
a4 = 0
a5 = 0
return (a0 + a1 * X + a2 * X ** 2 + a3 * X * Y +
a4 * Y + a5 * Y ** 2) * R_sun | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def calc_rsi(image):\n\n # roll axes to conventional row,col,depth\n img = np.rollaxis(image, 0, 3)\n\n # bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral\n COAST = img[:, :, 0]\n B = img[:, :, 1]\n G = img[:, :, 2]\n Y = img[:, :, 3]\n R = img[:, :, 4]\n RE = img[:, :, 5]\n NIR1 = img[:, :, 6]\n NIR2 = img[:, :, 7]\n\n arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))\n dd = (2 * NIR1 - R) - (G - B)\n gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5\n gndvi = old_div((NIR1 - G), (NIR1 + G))\n ndre = old_div((NIR1 - RE), (NIR1 + RE))\n ndvi = old_div((NIR1 - R), (NIR1 + R))\n ndvi35 = old_div((G - R), (G + R))\n ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))\n nirry = old_div((NIR1), (R + Y))\n normnir = old_div(NIR1, (NIR1 + R + G))\n psri = old_div((R - B), RE)\n rey = old_div((RE - Y), (RE + Y))\n rvi = old_div(NIR1, R)\n sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69\n vi1 = old_div((10000 * NIR1), (RE) ** 2)\n vire = old_div(NIR1, RE)\n br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))\n gr = old_div(G, R)\n rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))\n\n ###Built-Up indices\n wvbi = old_div((COAST - RE), (COAST + RE))\n wvnhfd = old_div((RE - COAST), (RE + COAST))\n\n ###SIs\n evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))\n L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES\n savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))\n msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)\n bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))\n rgi = old_div(R, G)\n bri = old_div(B, R)\n\n rsi = np.stack(\n [arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,\n wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],\n axis=2)\n\n return rsi",
"def R(self):\n\t\treturn (arange(self.rbins) + 0.5) * (self.cbins - 0.5) / self.rbins",
"def fRCrim(Swe,Vc1,Vc2,Vc3,Vk,PHIe,Rc1,Rc2,Rc3,Rk,Rw,Rh,Cwv,Ckv,Alpha,Tout):\n#\n# 1. Compute and normalise volumetric components:\n#\t-----------------------------------------------\n\tVw=PHIe*Swe\n\tVh=PHIe*(1-Swe)\n\tVwe=(Vw-Cwv)/(1-Cwv)\n\tVwe=ImposeLimits(Vwe,0,1)\n\tVke=(Vk-Ckv)/(1-Ckv)\n\tVke=ImposeLimits(Vke,0,1)\n\tSum=abs(Vc1)+abs(Vc2)+abs(Vc3)+abs(Vke)+abs(Vwe)+abs(Vh)\n\tVc1=abs(Vc1)/Sum\n\tVc2=abs(Vc2)/Sum\n\tVc3=abs(Vc3)/Sum\n\tVk=abs(Vk)/Sum\n\tVw=abs(Vw)/Sum\n\tVh=abs(Vh)/Sum\n#\n#\t2. Determine conductivity of components:\n#\t----------------------------------------\n\tSigc1=1/Rc1\n\tSigc2=1/Rc2\n\tSigc3=1/Rc3\n\tSigk=1/Rk\n\tSigw=1/Rw\n\tSigh=1/Rh\n#\n#\t3. Compute Conductivity:\n#\t========================\n\tTrm1=Vc1*(Sigc1**(1/Alpha))\n\tTrm2=Vc2*(Sigc2**(1/Alpha))\n\tTrm3=Vc3*(Sigc3**(1/Alpha))\n\tTrm4=(Vk**2.2)*(Sigk**(1/Alpha)) # Factor of 2.2 included to get data to fit to Yang et al\n\tTrm5=Vw*(Sigw**(1/Alpha))\n\tTrm6=Vh*(Sigh**(1/Alpha))\n\tCrf=(Trm1+Trm2+Trm3+Trm4+Trm5+Trm6)**Alpha\n#\n#\n# 4. Output result:\n#\t-----------------\n\tif(Tout==0):\n\t\tFr=Crf\n\telse:\n\t\tFr=1/Crf\n\treturn Fr",
"def radarScat(sp, wl, K2=0.93):\n#TODO check if K2 is for ice or liquid!\n prefactor = 2*np.pi*wl**4/(np.pi**5*K2)\n \n \n reflect_hh = prefactor*(sp.Z11+sp.Z22+sp.Z12+sp.Z21)\n reflect_vv = prefactor*(sp.Z11+sp.Z22-sp.Z12-sp.Z21)\n kdp = 1e-3*(180.0/np.pi)*wl*sp.S22r_S11r\n\n reflect_hv = prefactor*(sp.Z11 - sp.Z12 + sp.Z21 - sp.Z22)\n #reflect_vh = prefactor*(sp.Z11 + sp.Z12 - sp.Z21 - sp.Z22).values\n ldr_h = reflect_hh/reflect_hv\n \n # delta_hv np.arctan2(Z[2,3] - Z[3,2], -Z[2,2] - Z[3,3])\n #a = (Z[2,2] + Z[3,3])**2 + (Z[3,2] - Z[2,3])**2\n #b = (Z[0,0] - Z[0,1] - Z[1,0] + Z[1,1])\n #c = (Z[0,0] + Z[0,1] + Z[1,0] + Z[1,1])\n #rho_hv np.sqrt(a / (b*c))\n rho_hv = np.nan*np.ones_like(reflect_hh) # disable rho_hv for now\n #Ah = 4.343e-3 * 2 * scatterer.wavelength * sp.S22i.values # attenuation horizontal polarization\n #Av = 4.343e-3 * 2 * scatterer.wavelength * sp.S11i.values # attenuation vertical polarization\n\n #- test: calculate extinction: TODO: test Cextx that is given in DDA with this calculation.\n k = 2 * np.pi / (wl)\n cext_hh = sp.S22i*4.0*np.pi/k\n cext_vv = sp.S11i*4.0*np.pi/k\n \n return reflect_hh, reflect_vv, reflect_hv, kdp, rho_hv, cext_hh, cext_vv",
"def sivina(self):\n return (self.r + self.g + self.b) / 3",
"def get_scene_radiance(self, img,*,size=15,omega=0.95,trans_lb=0.1,percent=0.1,radius=40,epsilon=0.001):\n\n L=356\n #Applying atmosheric scattering model on the image\n atmosphere = self.get_atmospheric_light(img, size=size, percent=percent)\n trans = self.get_transmission(img, atmosphere, size=size, omega=omega, radius=radius, epsilon=epsilon)\n clamped = np.clip(trans, trans_lb, omega)[:, :, None]\n img = np.float64(img)\n return np.uint8(((img - atmosphere) / clamped + atmosphere).clip(0, L - 1))",
"def radial_velocity(wv_obj, fx_obj, sig_obj, wv_std, fx_std, sig_std, obj_name, std_name, rv_std, rv_std_err, order,\n xcorr_width, cut, cutstart, cutend):\n\n # The more random iterations, the better... but it takes longer\n n_iter = 1000\n\n # Step 1: Fix the spectra:\n # * Select only the region in which they overlap\n # * Make a new stretched wavelength array (for sub-pixel precision work)\n # * Interpolate the data onto the new wavelength array\n # * Remove large scale slopes so we only compare line and band features\n\n # Find where standard and object overlap ---------------\n wv_min = max([min(wv_std), min(wv_obj)])\n wv_max = min([max(wv_std), max(wv_obj)])\n\n n_pix_std = len(wv_std)\n\n # Creates ln standard wavelength array ---------------------------------\n # AR 2013.0423 The wavelength array only covers the overlap region. Also, I'm folding the rebinning by 10 into this statement.\n acoef_std = (n_pix_std * 10 - 1) / (math.log(wv_max) - math.log(wv_min))\n bcoef_std = (n_pix_std * 10) - (acoef_std * math.log(wv_max))\n\n arr = np.arange(n_pix_std * 10) + 1\n wv_ln_std = np.exp((arr - bcoef_std) / acoef_std)\n\n # AR 2012.1018: Find the conversion between pixels and velocity. This will vary from instrument\n # to instrument and spectral order to spectral order, so we should preferentially calculate this\n # based on the actual input spectrum.\n # AR 2013.0422: Change the calculation to happen AFTER the corrected wavelength scale has been made\n # Find the average pixel/spectrum offset\n # Note: even though it's called micron_per_pix, it will still work if the wavelengths are\n # angstroms instead (it really converts <wavelength unit> to km/s)\n\n # Interpolate data onto same ln wavelength scale -------------------------------\n\n fx_interp_std = np.interp(wv_ln_std, wv_std, fx_std)\n fx_interp_obj = np.interp(wv_ln_std, wv_obj, fx_obj)\n sig_interp_std = np.interp(wv_ln_std, wv_std, sig_std) # AR 2012.1018 Also need to rebin sig\n sig_interp_obj = np.interp(wv_ln_std, wv_obj, sig_obj) # AR 2012.1018 Also need to rebin sig\n\n # Rebin Data ----------------------------\n\n wv_arr_std = np.asarray(wv_ln_std, dtype=float)\n fx_arr_obj = np.asarray(fx_interp_obj, dtype=float)\n fx_arr_std = np.asarray(fx_interp_std, dtype=float)\n sig_arr_obj = np.asarray(sig_interp_obj, dtype=float)\n sig_arr_std = np.asarray(sig_interp_std, dtype=float)\n\n datalen = len(fx_arr_obj)\n\n # Step 2: Measure vsini:\n # Note that as of 2015.0605, this doesn't actually work.\n\n # AR 2014.0922: For vsini:\n # In a loop:\n # Take the standard spectrum\n # broaden it to width X\n # autocorrelate,\n # measure width of gaussian Y (this is supposed to give you a means of translating between width-of-cross-correlation and vsini)\n # Fit function solving Y for X.\n # For each cross correlation of object and standard:\n # Determine vsini\n\n pix_scale = (2.99792458 * 10 ** 5) / acoef_std\n\n # vsinirange = [1,2,5,10,20,30,40,50,60,80,100,100]\n # widthrange = []\n # for v in vsinirange:\n # # Make convolution kernel for v km/s\n # kernel = lsf_rotate(pix_scale,v)\n # # Broaden the standard spectrum\n # fx_obj_wide = np.correlate(fx_arr_obj, kernel, mode='same')\n # # Rectify the spectrum\n # fx_obj_orig = (fx_arr_obj - np.mean(fx_arr_obj))/np.std(fx_arr_obj,ddof=1)\n # fx_obj_wide = (fx_obj_wide - np.mean(fx_obj_wide))/np.std(fx_obj_wide,ddof=1)\n #\n # # Remove a cubic (flatten the spectrum)\n # coeff,pcov = op.curve_fit(cubic,wv_arr_std,fx_obj_wide)\n # fx_obj_wide = fx_obj_wide - (coeff[0] + coeff[1]*wv_arr_std + coeff[2]*wv_arr_std**2 + coeff[3]*wv_arr_std**3)\n # coeff,pcov = op.curve_fit(cubic,wv_arr_std,fx_obj_orig)\n # fx_obj_orig = fx_obj_orig - (coeff[0] + coeff[1]*wv_arr_std + coeff[2]*wv_arr_std**2 + coeff[3]*wv_arr_std**3)\n #\n # # Cross-correlate the spectrum with its broadened self\n # ycorr = np.correlate(fx_obj_orig, fx_obj_wide, mode='full')\n # # Now determine where the peak is (should be near 0)\n # length = len(ycorr)\n # xcorr = np.arange(length) - length//2\n # xmid = np.argmax(ycorr)\n # ymax = np.max(ycorr)\n # # Chop out just the portion of the array near the peak\n # xcorr_min=xmid-xcorr_width\n # xcorr_max=xmid+xcorr_width\n # ycorr1=ycorr[xcorr_min:xcorr_max]\t#isolate section of array with gaussian\n # xcorr1=xcorr[xcorr_min:xcorr_max] #isolate the same section of the pixel range\n #\n # # set up initial values for gaussian fitting via chi2\n # sig = 10\n # sky = np.min(ycorr1)/1.2\n # # print ycorr1[-1],ycorr1[0],xcorr1[-1],xcorr1[0]\n # sky2 = (ycorr1[-1]-ycorr1[0])/(xcorr1[-1]-xcorr1[0])\n # lnamp = np.log(ymax/1.2-sky)\t# guess some values\n # mean = xcorr[xmid]\n #\n # amp = np.exp(lnamp)\n # sig2 = sig**2\n # # suggestion from D. Hogg 12/15/12: Add extra linear feature to fit.\n # # suggestion from D. Hogg 12/15/12: operate on ln(amp) so that the amplitude CANNOT be negative.\n # def chi2(p):\t#define gaussian function for fitting\n # sig2=p[2] ** 2\n # m = (np.exp(p[0]) * np.exp(-0.5 * (xcorr1 - p[1]) ** 2 / sig2)) + p[3] + p[4]*xcorr1\n # return (ycorr1 - m)\n #\n # # Fit the gaussian.\n # popt, ier = op.leastsq(chi2, [lnamp, mean, sig, sky, sky2])\n # lnamp, mean, sig, sky, sky2 = popt\n #\n # amp = np.exp(lnamp)\n # # record the width\n # widthrange.append(sig)\n #\n # # Plot all the widths to get a width-vsini curve\n # vsinicoeff,popt = op.curve_fit(quartic,np.asarray(widthrange),np.asarray(vsinirange))\n #\n # relationx = np.arange(50,200,1)\n # relationy = vsinicoeff[0]+vsinicoeff[1]*relationx+vsinicoeff[2]*relationx**2+vsinicoeff[3]*relationx**3+vsinicoeff[4]*relationx**4\n # figv = plt.figure(1)\n # axv = figv.add_subplot(211)\n # axv.scatter(widthrange,vsinirange)\n # axv.plot(relationx,relationy)\n # #ax.text(70,100,\"{0:} {1:} {2:} {3:} {4:}\".format(vsinicoeff))\n\n # 3. Cross-correlate the data, using n_iter trials:\n # * Generate two random gaussian noises scaled to the uncertainty on the fluxes\n # * Apply those gaussian noises to the standard and target stars\n # * Cross-correlate the standard and target stars\n # * Find and then cut out just the part of the cross-correlation curve near the maximum\n # * Set up gaussian\n # * Fit gaussian to that center part\n # * Save fitted parameters (pixel shift aka mean of gaussian, width aka stddev of gaussian)\n # * Repeat n_iter times\n\n # Cross correlation loop --------------------------------\n pix_shift = np.array([]) # initialize array for pixel shift values\n pix_width = np.zeros(n_iter) # initialize array for pixel width values\n l = 0\n\n # using the xrange generator rather than making a full list saves memory\n while len(pix_shift) < n_iter:\n # prepare the randomized data\n # GETTING ARRAYS READY FOR CROSS CORRELATION\n\n\n # Randomize noise:\n # create gaussian distribution of random numbers b/t 1 and -1, multiply err by numbers, add numbers to flux\n # I have drastically simplified the arrays here AR 2013.0319\n # AR 2013.0318: There was a problem, previously: noise was a fixed value, not linked to the known error values\n\n # AR 2013.0321: Speed fix. Rather than step through the array and generate one\n # normally-distributed error value scaled to the SNR at that point, I will generate an\n # array of normally-distributed error values scaled to 1, and then multiply by the SNR:\n # One array generation, one array multiplication.\n\n rand_dist = np.random.normal(loc=0.0, scale=1.0, size=datalen)\n rand_dist2 = np.random.normal(loc=0.0, scale=1.0, size=datalen)\n\n fx_temp_obj = np.asarray(fx_arr_obj + rand_dist * sig_arr_obj)\n fx_temp_std = np.asarray(fx_arr_std + rand_dist2 * sig_arr_std)\n mean_obj = np.mean(fx_temp_obj)\n mean_std = np.mean(fx_temp_std)\n stddev_obj = np.std(fx_temp_obj, ddof=1)\n stddev_std = np.std(fx_temp_std, ddof=1)\n\n # Regularize data (subtract mean, divide by std dev) (Should definitely be done AFTER noise was added)\n fx_reg_temp_obj = fx_temp_obj - mean_obj\n fx_reg_temp_obj = fx_reg_temp_obj / stddev_obj\n fx_reg_temp_std = fx_temp_std - mean_std\n fx_reg_temp_std = fx_reg_temp_std / stddev_std\n\n # curve fit - remove a cubic AR 2012.1113\n coeff, pcov = op.curve_fit(cubic, wv_arr_std, fx_reg_temp_obj)\n fx_reg_temp_obj = fx_reg_temp_obj - (\n coeff[0] + coeff[1] * wv_arr_std + coeff[2] * wv_arr_std ** 2 + coeff[3] * wv_arr_std ** 3)\n coeff, pcov = op.curve_fit(cubic, wv_arr_std, fx_reg_temp_std)\n fx_reg_temp_std = fx_reg_temp_std - (\n coeff[0] + coeff[1] * wv_arr_std + coeff[2] * wv_arr_std ** 2 + coeff[3] * wv_arr_std ** 3)\n\n # CROSS CORRELATION\n\n # compute the cross-correlation between the two spectra\n\n ycorr = np.correlate(fx_reg_temp_obj, fx_reg_temp_std, mode='full')\n # time required: 0.045 seconds average\n\n # http://stackoverflow.com/questions/12323959/fast-cross-correlation-method-in-python\n # conv1 = np.zeros(datalen * 2)\n # conv1[datalen/2:datalen/2+datalen] = fx_reg_temp_obj\n # conv2 = fx_reg_temp_std[::-1]\n # ycorr = signal.fftconvolve(conv1,conv2, mode='valid')\n # time required: 0.006 seconds average, but it segfaults by the third try.\n\n ## slight smoothing AR 2013.0315\n # ycorr = scipy.ndimage.filters.gaussian_filter1d(ycorr,11)\n\n # create the x offset axis (same length as ycorr, with 0 in the MIDDLE)\n length = len(ycorr)\n xcorr = np.arange(length) - length // 2\n # AR 2012.1126 Select a tiny piece around the maximum to fit with a gaussian.\n xmid = np.argmax(ycorr)\n ymax = np.max(ycorr)\n # now take just the portion of the array that matters\n xcorr_min = int(xmid - xcorr_width)\n xcorr_max = int(xmid + xcorr_width)\n ycorr1 = ycorr[xcorr_min:xcorr_max] # isolate section of array with gaussian\n xcorr1 = xcorr[xcorr_min:xcorr_max] # isolate the same section of the pixel range\n ycorr2 = ycorr[xcorr_min - 50:xcorr_max + 50]\n xcorr2 = xcorr[xcorr_min - 50:xcorr_max + 50]\n\n # suggestion from D. Hogg 12/15/12: Add extra linear feature to fit.\n # suggestion from D. Hogg 12/15/12: operate on ln(amp) so that the amplitude CANNOT be negative.\n def chi2(p): # define gaussian function for fitting\n sig2 = p[2] ** 2\n m = (np.exp(p[0]) * np.exp(-0.5 * (xcorr1 - p[1]) ** 2 / sig2)) + p[3] + p[4] * xcorr1\n return (ycorr1 - m)\n\n # set up initial values for chi2\n sig = 10\n sky = np.min(ycorr1) / 1.2\n # print ycorr1[-1],ycorr1[0],xcorr1[-1],xcorr1[0]\n sky2 = (ycorr1[-1] - ycorr1[0]) / (xcorr1[-1] - xcorr1[0])\n lnamp = np.log(ymax / 1.2 - sky) # guess some values\n mean = xcorr[xmid]\n\n amp = np.exp(lnamp)\n sig2 = sig ** 2\n\n popt, ier = op.leastsq(chi2, [lnamp, mean, sig, sky, sky2])\n lnamp, mean, sig, sky, sky2 = popt\n\n amp = np.exp(lnamp)\n\n # print_num=len(pix_shift)%100\n print_num = l % 100\n if print_num == 0:\n ## Uncomment the following to make a plot every 500 fits.\n # fig = plt.figure(l)\n # ax = fig.add_subplot(111)\n # my_gauss = (amp * (np.exp(-0.5 * ((xcorr1 - mean) ** 2) / sig**2))) + sky + sky2 * xcorr1\n # ax.plot(xcorr1,my_gauss,'r--')\n # ax.plot(xcorr2,ycorr2,'#000000')\n # ax.plot(xcorr1,ycorr1-my_gauss,'#00CC00')\n ##if abs(mean - xcorr[xmid]) > 5:\n ## print \"Mean is off\",mean,xcorr[xmid]\n # figname='rv_{0:}_{1:}_{2:}_{3:}.png'.format(std_name,obj_name,order,l)\n # ax.set_xlim(xcorr[xcorr_min-50],xcorr[xcorr_max+50])\n # fig.savefig(figname)\n # fig.clf()\n # plt.close()\n print\n \"amp={0: 12.4f} mu={1: 10.4f} sig={2: 9.4f} sky={3: 11.4f} sky2={4: 8.4f} n_entries={5:}\".format(amp,\n mean,\n sig,\n sky,\n sky2,\n len(\n pix_shift))\n\n l += 1\n if (cut == 0) | (mean > np.float(cutstart)) & (mean < np.float(cutend)):\n pix_shift = np.append(pix_shift, mean)\n # if ier < 5:\n # I'm calculating the vsini now because I need errors, and the vsini calculation is not linear.\n # pix_width[l] = vsinicoeff[0] + vsinicoeff[1] * sig + vsinicoeff[2] * sig**2 + vsinicoeff[3] * sig**3 + vsinicoeff[4] * sig**4\n\n # End cross correlation loop ---------------------------------\n\n # 4. Find the RV\n # All 5000 rv fits have been calculated and stored in arrays\n # 4a. Cut out outlier RVs. Useful if the cross-correlation produces occasional bad results. Use cutstart and cutend to force the code to only fit a gaussian to a certain region. Don't over-use this to force the result you want, though.\n # 4b. Compute the mean pixel shift and pixel shift uncertainty.\n # 4c. Convert pixel shift into RV\n # 4d. Shift the wavelength array appropriately - all lines should now line up.\n\n ## Uncomment this to print out an example cross-correlation diagram\n # fig = plt.figure(2)\n # ax = fig.add_subplot(111)\n # ax.plot(xcorr,ycorr,'k')\n # figname='rv_{0:}_{1:}_{2:}_xcorr.png'.format(std_name,obj_name,order)\n # fig.savefig(figname)\n # fig.clf()\n # plt.close()\n\n # Turn the list of pixel shifts into a numpy array\n pix_shift = np.asarray(pix_shift)\n\n # 4a. Cut out outliers from the pixel shift\n if cut == 1:\n pix_shift = pix_shift[np.where((pix_shift > np.float(cutstart)) & (pix_shift < np.float(cutend)))]\n\n # 4b. Compute the mean pixel shift (rv value) and pixel shift uncertainty (RV uncertainty).\n\n print\n l, len(pix_shift), np.float(len(pix_shift)) / np.float(n_iter) * 100.0\n\n mu = np.mean(pix_shift)\n sigma = np.std(pix_shift, ddof=1)\n\n # vsini = np.mean(pix_width)\n # vsini_err = np.std(pix_width,ddof=1)\n\n # axh = figv.add_subplot(212)\n # n, bins, patches=axh.hist(pix_width,bins=30,normed=1.0,facecolor='green',align='mid')\n # figv.savefig('vsiniplot.png')\n # plt.clf()\n # plt.close()\n\n # 4c. Transform pixel shift to shift in radial velocity\n\n # AR 2013.0423: The actually appropriate method requires a speed-of-light correction. This works for both angstroms and microns.\n rv_meas = (2.99792458 * 10 ** 5 * mu) / acoef_std\n rv_meas_err = (2.99792458 * 10 ** 5 * sigma) / acoef_std\n\n # 4d. Apply shift to arrays\n wv_rvcorr_obj = wv_arr_std * (1 - rv_meas / (2.99792458 * 10 ** 5))\n\n ## 5. Create plots ---------------------------------\n # The plots are the only reason find_rv.py needs to know the names of either star, or the RV of the standard.\n\n # Plot object and standard so you can clearly see that shift exists --------------------------------\n fig = plt.figure(1)\n\n # AR 2013.0703 Regularize the spectra for display purposes in the final graph\n # I'm using the mean and stddev of the last random-added attempt so it won't be perfect...\n fx_reg_obj = fx_arr_obj - mean_obj\n fx_reg_obj = fx_reg_obj / stddev_obj\n fx_reg_std = fx_arr_std - mean_std\n fx_reg_std = fx_arr_std / stddev_std\n\n # Plots target and standard with shift applied\n ax1 = fig.add_subplot(311)\n ax1.plot(wv_rvcorr_obj, fx_reg_obj, 'red')\n ax1.plot(wv_arr_std, fx_reg_std, 'blue')\n ax1.set_xlabel('wavelength (microns)')\n ax1.set_ylabel('normalized flux')\n target = 'Target: %s' % (obj_name)\n standard = 'Standard: %s' % (std_name)\n ax1.annotate(target, xy=(.7, .9), xycoords='axes fraction', xytext=(.6, .9), textcoords='axes fraction',\n color='red')\n ax1.annotate(standard, xy=(.7, .8), xycoords='axes fraction', xytext=(.6, .8), textcoords='axes fraction',\n color='blue')\n\n sig2 = sig ** 2\n my_gauss = (amp * (np.exp(-0.5 * ((xcorr1 - mu) ** 2) / sig2))) + sky + sky2 * xcorr1\n\n # Plots example of gaussian fit to cross correlation function\n ax2 = fig.add_subplot(312)\n ax2.plot(xcorr1, ycorr1, 'k.')\n ax2.plot(xcorr1, my_gauss, 'r--', linewidth=2)\n ax2.plot(xcorr1, ycorr1 - my_gauss, '#00CC00')\n ax2.set_xlabel('example of fit to cross correlation function')\n ax2.set_xlim(xcorr[xcorr_min - 50], xcorr[xcorr_max + 50])\n # print pix_shift\n\n\n ## Plot histogram of pixel shift values --------------------------------\n ax3 = fig.add_subplot(313)\n n, bins, patches = plt.hist(pix_shift, bins=30, normed=1.0, facecolor='green', align='mid')\n # Plot best fit gaussian over histogram\n y = mlab.normpdf(bins, mu, sigma)\n ax3.plot(bins, y, 'r--', linewidth=2)\n ax3.set_xlabel('radial velocity of target (pixels)')\n ax3.set_ylabel('frequency (normalized)')\n rad = 'RV = %.3f +/- %.3f' % (rv_meas, rv_meas_err)\n corr = 'RV (corr) = %.3f +/- %.3f' % (rv_std + rv_meas, (rv_std_err ** 2 + rv_meas_err ** 2) ** (0.5))\n # vsinistr = 'VsinI = %.3f +/- %.3f' % (vsini,vsini_err)\n ax3.annotate(rad, xy=(.66, .9), xycoords='axes fraction', xytext=(.66, .9), textcoords='axes fraction',\n color='black')\n ax3.annotate(corr, xy=(.6, .8), xycoords='axes fraction', xytext=(.60, .8), textcoords='axes fraction',\n color='black')\n # ax3.annotate(vsinistr,xy=(.6,.6),xycoords='axes fraction',xytext=(.60,.6),textcoords='axes fraction',color='black')\n ax3.annotate('{0:+5.2f} {1: 5.2f}'.format(mu, sigma), xy=(.05, .9), xycoords='axes fraction', xytext=(.05, .9),\n textcoords='axes fraction', color='black')\n ax3.annotate('{0:5.3f} km/s/pix'.format((2.99792458 * 10 ** 5) / acoef_std), xy=(.05, .8), xycoords='axes fraction',\n xytext=(.05, .8), textcoords='axes fraction', color='black')\n fig.subplots_adjust(hspace=.3)\n\n figname = 'rv_%s_%s_%d.png' % (std_name, obj_name, order)\n fig.savefig(figname)\n fig.clf()\n plt.close()\n\n # plt.figure(l+1)\n # plt.hist(pix_shift)\n\n # END RADIAL VELOCITY FUNCTION -----------------------------------------\n return rv_meas, rv_meas_err",
"def color_correct_panstarrs(self):\n PS1_r = self.pan['rmag']\n PS1_g = self.pan['gmag']\n self.pan_gr_color = self.pan['gmag'] - self.pan['rmag'] \n if self.filter == 'R' and ((self.instrument == 'h') | (self.instrument == 'm')): # this should be the only observations through an R filter\n print(\"correcting color for R filter at KPNO\") \n ###################################\n # Calculate Johnson R\n # from http://www.sdss3.org/dr8/algorithms/sdssUBVRITransform.php\n ###################################\n #self.R = self.pan['rmag'] + (-0.153)*(self.pan['rmag']-self.pan['imag']) - 0.117\n ###################################\n # Other transformations from \n # https://arxiv.org/pdf/1706.06147.pdf\n # R - r = C0 + C1 x (r-i) (-0.166, -0.275)\n # R - r = C0 + C1 x (g-r) (-0.142, -0.166)\n ###################################\n #\n #if self.useri:\n # self.R = self.pan['rmag'] + (-0.166)*(self.pan['rmag']-self.pan['imag']) - 0.275\n #else:\n # self.R = self.pan['rmag'] + (-0.142)*(self.pan['gmag']-self.pan['rmag']) - 0.142\n\n # from Matteo Fossati\n #Best fit quadratic KPHr - PS1_r = 0.0170*(PS1_g-PS1_r)^2 + -0.1864*(PS1_g-PS1_r) + 0.0213\n self.R = PS1_r + 0.0170*(PS1_g-PS1_r)**2 + -0.1864*(PS1_g-PS1_r) + 0.0213\n\n elif self.filter == 'r' and self.instrument == 'i':\n print(\"correcting color for r filter at INT\") \n #self.R = self.pan['rmag']\n #Best fit quadratic INTSr - PS1_r = 0.0023*(PS1_g-PS1_r)^2 + -0.0122*(PS1_g-PS1_r) + 0.0003\n self.R = PS1_r + 0.0023*(PS1_g-PS1_r)**2 + -0.0122*(PS1_g-PS1_r) + 0.0003 \n # which filter is the bok telescope using?\n elif self.filter == 'r' and self.instrument == 'b':\n print(\"correcting color for r filter at BOK\") \n #self.R = self.pan['rmag']\n #Best fit quadratic KPSr - PS1_r = 0.0084*(PS1_g-PS1_r)^2 + -0.0420*(PS1_g-PS1_r) + 0.0036\n self.R = PS1_r + 0.0084*(PS1_g-PS1_r)**2 + -0.0420*(PS1_g-PS1_r) + 0.0036 \n # this is the kpno r filter\n elif self.filter == 'r' and self.instrument == 'h':\n print(\"correcting color for r filter at KPNO\") \n #Best fit quadratic KPSr - PS1_r = 0.0084*(PS1_g-PS1_r)^2 + -0.0420*(PS1_g-PS1_r) + 0.0036\n self.R = self.pan['rmag']\n self.R = PS1_r + 0.0084*(PS1_g-PS1_r)**2 + -0.0420*(PS1_g-PS1_r) + 0.0036 \n\n # halpha filters\n elif self.filter == 'ha' and self.instrument == 'i':\n print(\"correcting color for halpha filter at INT\")\n #Best fit quadratic Intha - PS1_r = 0.0182*(PS1_g-PS1_r)^2 + -0.2662*(PS1_g-PS1_r) + 0.0774\n self.R = PS1_r + 0.0182*(PS1_g-PS1_r)**2 + -0.2662*(PS1_g-PS1_r) + 0.0774\n\n\n #self.R = self.pan['rmag']\n # bok is using the kpno halpha+4nm filter, so use the same correction for these\n elif self.filter == 'ha' and ((self.instrument == 'b') | (self.instrument == 'h') | (self.instrument == 'm')) :\n print(\"correcting color for ha filter at KPNO\") \n #Best fit quadratic Ha4 - PS1_r = 0.0016*(PS1_g-PS1_r)^2 + -0.2134*(PS1_g-PS1_r) + 0.0168\n #self.R = self.pan['rmag']\n self.R = PS1_r + 0.0016*(PS1_g-PS1_r)**2 + -0.2134*(PS1_g-PS1_r) + 0.0168\n else:\n print(\"ruh - roh! did not find the panstarrs color transformation!!!\")\n print(\"setting instrumental r mag to panstarrs r mag\")\n print()\n self.R = self.pan['rmag']",
"def computeNormalAndCurvature():\n radius = 50\n for i,j in pts:\n nb_pts = ti.cast(0, ti.f32)\n accu_0 = ti.cast(0, ti.f32)\n accu_1 = ti.cast(0, ti.f32)\n accu_2 = ti.cast(0, ti.f32)\n accu_3 = ti.cast(0, ti.f32)\n accu_4 = ti.cast(0, ti.f32)\n accu_5 = ti.cast(0, ti.f32)\n accu_6 = ti.cast(0, ti.f32)\n accu_7 = ti.cast(0, ti.f32)\n accu_8 = ti.cast(0, ti.f32)\n z = 0\n for x in range(i-radius, i+radius):\n for y in range(j-radius, j+radius):\n if ti.is_active(block1, [x,y]):\n accu_0 += x * x\n accu_1 += x * y\n accu_2 += x * z\n accu_3 += y * y\n accu_4 += y * z\n accu_5 += z * z\n accu_6 += x\n accu_7 += y\n accu_8 += z\n nb_pts += 1\n accu_0 /= nb_pts\n accu_1 /= nb_pts\n accu_2 /= nb_pts\n accu_3 /= nb_pts\n accu_4 /= nb_pts\n accu_5 /= nb_pts\n accu_6 /= nb_pts\n accu_7 /= nb_pts\n accu_8 /= nb_pts\n cov_mat_0 = accu_0 - accu_6 * accu_6\n cov_mat_1 = accu_1 - accu_6 * accu_7\n cov_mat_2 = accu_2 - accu_6 * accu_8\n cov_mat_4 = accu_3 - accu_7 * accu_7\n cov_mat_5 = accu_4 - accu_7 * accu_8\n cov_mat_8 = accu_5 - accu_8 * accu_8\n cov_mat_3 = cov_mat_1\n cov_mat_6 = cov_mat_2\n cov_mat_7 = cov_mat_5\n\n # Compute eigen value and eigen vector\n # Make sure in [-1, 1]\n scale = ti.max(1.0, ti.abs(cov_mat_0))\n scale = ti.max(scale, ti.abs(cov_mat_1))\n scale = ti.max(scale, ti.abs(cov_mat_2))\n scale = ti.max(scale, ti.abs(cov_mat_3))\n scale = ti.max(scale, ti.abs(cov_mat_4))\n scale = ti.max(scale, ti.abs(cov_mat_5))\n scale = ti.max(scale, ti.abs(cov_mat_6))\n scale = ti.max(scale, ti.abs(cov_mat_7))\n scale = ti.max(scale, ti.abs(cov_mat_8))\n if scale > 1.0:\n cov_mat_0 /= scale\n cov_mat_1 /= scale\n cov_mat_2 /= scale\n cov_mat_3 /= scale\n cov_mat_4 /= scale\n cov_mat_5 /= scale\n cov_mat_6 /= scale\n cov_mat_7 /= scale\n cov_mat_8 /= scale\n \n # Compute roots\n eigen_val_0 = ti.cast(0, ti.f32)\n eigen_val_1 = ti.cast(0, ti.f32)\n eigen_val_2 = ti.cast(0, ti.f32)\n \n c0 = cov_mat_0 * cov_mat_4 * cov_mat_8 \\\n + 2 * cov_mat_3 * cov_mat_6 * cov_mat_7 \\\n - cov_mat_0 * cov_mat_7 * cov_mat_7 \\\n - cov_mat_4 * cov_mat_6 * cov_mat_6 \\\n - cov_mat_8 * cov_mat_3 * cov_mat_3\n c1 = cov_mat_0 * cov_mat_4 \\\n - cov_mat_3 * cov_mat_3 \\\n + cov_mat_0 * cov_mat_8 \\\n - cov_mat_6 * cov_mat_6 \\\n + cov_mat_4 * cov_mat_8 \\\n - cov_mat_7 * cov_mat_7\n c2 = cov_mat_0 + cov_mat_4 + cov_mat_8\n \n if ti.abs(c0) < 0.00001:\n eigen_val_0 = 0\n d = c2 * c2 - 4.0 * c1\n if d < 0.0: # no real roots ! THIS SHOULD NOT HAPPEN!\n d = 0.0\n sd = ti.sqrt(d)\n eigen_val_2 = 0.5 * (c2 + sd)\n eigen_val_1 = 0.5 * (c2 - sd)\n else:\n s_inv3 = ti.cast(1.0 / 3.0, ti.f32)\n s_sqrt3 = ti.sqrt(3.0)\n c2_over_3 = c2 * s_inv3\n a_over_3 = (c1 - c2 * c2_over_3) * s_inv3\n if a_over_3 > 0:\n a_over_3 = 0\n \n half_b = 0.5 * (c0 + c2_over_3 * (2 * c2_over_3 * c2_over_3 - c1))\n q = half_b * half_b + a_over_3 * a_over_3 * a_over_3\n if q > 0:\n q = 0\n \n rho = ti.sqrt(-a_over_3)\n theta = ti.atan2(ti.sqrt(-q), half_b) * s_inv3\n cos_theta = ti.cos(theta)\n sin_theta = ti.sin(theta)\n eigen_val_0 = c2_over_3 + 2 * rho * cos_theta\n eigen_val_1 = c2_over_3 - rho * (cos_theta + s_sqrt3 * sin_theta)\n eigen_val_2 = c2_over_3 - rho * (cos_theta - s_sqrt3 * sin_theta)\n temp_swap = ti.cast(0, ti.f32)\n \n # Sort in increasing order.\n if eigen_val_0 >= eigen_val_1:\n temp_swap = eigen_val_1\n eigen_val_1 = eigen_val_0\n eigen_val_0 = temp_swap\n if eigen_val_1 >= eigen_val_2:\n temp_swap = eigen_val_2\n eigen_val_2 = eigen_val_1\n eigen_val_1 = temp_swap\n if eigen_val_0 >= eigen_val_1:\n temp_swap = eigen_val_1\n eigen_val_1 = eigen_val_0\n eigen_val_0 = temp_swap\n \n if eigen_val_0 <= 0:\n eigen_val_0 = 0\n d = c2 * c2 - 4.0 * c1\n if d < 0.0: # no real roots ! THIS SHOULD NOT HAPPEN!\n d = 0.0\n sd = ti.sqrt(d)\n eigen_val_2 = 0.5 * (c2 + sd)\n eigen_val_1 = 0.5 * (c2 - sd)\n # end of compute roots\n\n eigen_value = eigen_val_1 * scale # eigen value for 2D SDF\n # eigen value for 3D SDF\n #eigen_value = eigen_val_0 * scale\n\n #print(\"eigen_val_0 \", eigen_val_0)\n #print(\"eigen_val_1 \", eigen_val_1)\n #print(\"eigen_val_2 \", eigen_val_2)\n \n # TODO\n #scaledMat.diagonal ().array () -= eigenvalues (0)\n #eigenvector = detail::getLargest3x3Eigenvector<Vector> (scaledMat).vector;\n\n # Compute normal vector (TODO)\n #visual_norm[i,j][0] = eigen_val_0 #eigen_vector[0]\n #visual_norm[i,j][1] = eigen_val_1 #eigen_vector[1]\n #visual_norm[i,j][2] = eigen_val_2 #eigen_vector[2]\n\n # Compute the curvature surface change\n eig_sum = cov_mat_0 + cov_mat_1 + cov_mat_2\n visual_curv[i,j][0] = 0\n if eig_sum != 0:\n visual_curv[i,j][0] = eigen_val_1 # true curvature is: ti.abs(eigen_value / eig_sum)",
"def condensate_belowdew(Rs, Rv, Rsi, Rvi, Bo, Bg, Np, Gp):\n Btg = ((Bg * (1 - (Rs * Rvi))) + (Bo * (Rvi - Rv))) / (1 - (Rv * Rs)) # in RB/STB\n Bto = ((Bo * (1 - (Rv * Rsi))) + (Bg * (Rsi - Rs))) / (1 - (Rv * Rs)) # in RB/scf\n\n Gi = 0\n F = (Np * ((Bo - (Rs * Bg)) / (1 - (Rv * Rs)))) + ((Gp - Gi) * ((Bg - (Rv * Bo)) / (1 - (Rv * Rs))))\n Eg = Btg - Bg[0]\n return(F, Eg)",
"def evi_func(blue, red, nir):\n return (2.5 * (nir - red)) / (nir + 6 * red - 7.5 * blue + 1)",
"def savi(self,\n img):\n return (img.select(['NIR']).subtract(img.select(['RED'])).multiply(1 + self.const))\\\n .divide(img.select(['NIR']).add(img.select(['RED'])).add(self.const))\\\n .select([0], ['SAVI']).multiply(self.scale_factor).toInt16()",
"def test_circle_winding(setup):\n I, a, r0, r_c = setup\n Bz_analytic = mu_0*I/(2*a)\n \n B_calc = generic_curve.biot_savart_integral(r0, r_c, integration_dim='phi',\n spatial_dim='s', I=I)\n np.testing.assert_allclose(B_calc.sel(s=['x', 'y']), [0,0])\n np.testing.assert_allclose(B_calc.sel(s='z'), Bz_analytic)",
"def recf_legendre_rational(basetensor, ind, x):\n n = ind.getN()\n xv = x[:, ind.current_dim]\n Rnmin1 = basetensor[ind.all+ind.getPreceding(1)]\n Rnmin2 = basetensor[ind.all+ind.getPreceding(2)]\n return (2.*n-1.)/n*(xv-1.) / (xv+1.) * Rnmin1 - (n-1.) / n * Rnmin2",
"def F_calcDMradius(i, t, st, dm, t1, tth):\n mr = st.mn*dm.mxkg_v[i]/(st.mn+dm.mxkg_v[i]) # reduced mass, kg\n # before thermalization (cooling), rx changes with time:\n rxco = np.array([ F_rxco2(tim,t1,mr,(st.nb*1.e+6),dm.sigx_m,st.Rs,dm.mxkg_v[i],pF) for tim in t.time ]) # cm\n print \"-- Radius: rxco at t1 = \",F_rxco2(t1+0.1,t1,mr,(st.nb*1.e+6),dm.sigx_m,st.Rs,dm.mxkg_v[i],pF)\n # after thermalization:\n rxth1 = F_rxth(dm.mx_v[i],st.rhoc,st.Temp) # cm (formula)\n rxth2 = np.interp(tth,t.time,rxco) \t# cm (rxco(tth))\n rxth = rxth1\n print \" rxth=%.2e , rxth1=%.2e , rxth2=%.2e\" % (rxth,rxth1,rxth2)\n for k in xrange(len(t.time)):\n if t.time[k]<t1:\n t.rxtag[k] = 'Rs '\n t.rx[k] = st.Rs*1.e+2\n elif t.time[k]<tth:\n t.rxtag[k] = 'rxco'\n t.rx[k] = rxco[k]\n elif t.time[k]>=tth:\n t.rxtag[k] = 'rxth'\n t.rx[k] = rxth\n return rxco, rxth",
"def surf_bright(im, coord, minrad=3.):\n r = minrad\n slist = []\n while r < 80:\n aperture = CircularAperture(coord, r=r)\n phot_tab = aperture_photometry(im, aperture)\n s = phot_tab['aperture_sum']\n sb = s/(np.pi * r**2)\n print(sb)\n r += 1",
"def tophat_compensated(rad_obj, obj_posx, obj_posy, mapp, alpha):\n rad_filter = alpha * rad_obj\n extend = np.sqrt(2)\n rad_filter_sqrt2 = np.ceil(extend * rad_filter).astype(int)\n # annulus thickness normalised against ith void radius\n delta_eta = extend / args[\"Nbins\"]\n\n # distance of every pixel to centre\n pix_x = pix_y = np.arange(-rad_filter_sqrt2, rad_filter_sqrt2)\n pix_xx, pix_yy = np.meshgrid(pix_x, pix_y)\n pix_dist = np.sqrt(pix_xx ** 2 + pix_yy ** 2) / rad_filter\n\n # eta gives the annulus to which the pixel belongs\n eta = (pix_dist / delta_eta).astype(int)\n pix_xx = pix_xx[eta < args[\"Nbins\"]]\n pix_yy = pix_yy[eta < args[\"Nbins\"]]\n pix_dist = pix_dist[eta < args[\"Nbins\"]]\n eta = eta[eta < args[\"Nbins\"]]\n\n annulus_count = [list(eta).count(ee) for ee in np.unique(eta)]\n annulus_buffer = list(np.zeros(args[\"Nbins\"] - len(np.unique(eta))))\n annulus_count = np.asarray(annulus_count + annulus_buffer)\n annulus_value = np.zeros(args[\"Nbins\"])\n for pp in range(len(eta)):\n annulus_value[eta[pp]] += mapp[\n obj_posy + pix_xx[pp], obj_posx + pix_yy[pp]\n ]\n\n # Mean value in 0 -> rad_filter\n white_hat = np.mean(annulus_value[: np.ceil(1 / delta_eta).astype(int)])\n\n # Mean value in rad_filter -> sqrt(2)*rad_filter\n black_hat = np.mean(annulus_value[np.ceil(1 / delta_eta).astype(int) :])\n\n return white_hat - black_hat",
"def calc_source_blend_ang_radii(source, blend, log):\n\n log.info('\\n')\n log.info('Calculating the angular radius of the source star:')\n source.calc_stellar_ang_radius(log)\n log.info('Source angular radius (from SDSS (g-i), Boyajian+ 2014 relations) = '+str(round(source.ang_radius,4))+' +/- '+str(round(source.sig_ang_radius,4)))\n\n log.info('\\n')\n log.info('Calculating the angular radius of the blend:')\n blend.calc_stellar_ang_radius(log)\n log.info('Blend angular radius (from SDSS (g-i), Boyajian+ 2014 relations) = '+str(round(blend.ang_radius,4))+' +/- '+str(round(blend.sig_ang_radius,4)))\n\n return source, blend",
"def update():\n\n scan = np.clip(\n rc.lidar.get_samples() * LIDAR_OFFSET, 0, None\n ) # smooth(rc.lidar.get_samples())\n\n scan_xy = None\n\n color_image = rc.camera.get_color_image()\n depth_image = cv.bilateralFilter(rc.camera.get_depth_image(), 9, 75, 75)\n vis_image = np.zeros((2 * VIS_RADIUS, 2 * VIS_RADIUS, 3), np.uint8, \"C\")\n hsv_image = cv.cvtColor(color_image, cv.COLOR_BGR2HSV)\n\n # FSM\n\n speed = 0\n angle = 0\n global currentChallenge\n global oldState\n global colorPriority\n\n if currentChallenge == Challenge.ManualControl:\n speed, angle = manualControl()\n if rc.controller.was_pressed(rc.controller.Button.A):\n currentChallenge = oldState\n else:\n if rc.controller.was_pressed(rc.controller.Button.A):\n oldState = currentChallenge\n currentChallenge = Challenge.ManualControl\n\n curve = None\n path = None\n if currentChallenge == Challenge.Line:\n if colorPriority == None:\n # scan AR tags\n colorPriority = [\n Color.Red,\n Color.Green,\n Color.Blue,\n ] # {Color.Red: 1, Color.Blue: 2, Color.Green: 3}\n\n hsv_image[0 : height // 2, :] = [0, 0, 0] # crop out top half\n\n if colorPriority[2] == Color.Blue:\n blue_r = np.array([])\n blue_t = np.array([])\n else:\n mask = cv.inRange(hsv_image, BLUE[0], BLUE[1])\n # rc.display.show_color_image(mask)\n points = np.argwhere(mask != 0)\n depths = depth_image[points[:, 0], points[:, 1]]\n blue_r, blue_t = camera2Polar(points, depths)\n\n if colorPriority[2] == Color.Red:\n red_r = np.array([])\n red_t = np.array([])\n else:\n mask = cv.bitwise_or(\n cv.inRange(hsv_image, RED1[0], RED1[1]),\n cv.inRange(hsv_image, RED2[0], RED2[1]),\n )\n # rc.display.show_color_image(mask)\n points = np.argwhere(mask != 0)\n depths = depth_image[points[:, 0], points[:, 1]]\n red_r, red_t = camera2Polar(points, depths)\n\n if colorPriority[2] == Color.Green:\n green_r = np.array([])\n green_t = np.array([])\n else:\n mask = cv.inRange(hsv_image, GREEN[0], GREEN[1])\n # rc.display.show_color_image(mask)\n points = np.argwhere(mask != 0)\n depths = depth_image[points[:, 0], points[:, 1]]\n green_r, green_t = camera2Polar(points, depths)\n\n depths = np.concatenate([blue_r, red_r, green_r])\n sort = np.argsort(depths)\n\n points = np.array(\n [\n depths[sort],\n np.concatenate([blue_t, red_t, green_t])[sort],\n np.concatenate(\n [\n np.full_like(blue_r, Color.Blue),\n np.full_like(red_r, Color.Red),\n np.full_like(green_r, Color.Green),\n ]\n )[sort],\n ]\n )\n\n path = None\n\n if len(depths) > 5:\n final_r = np.array([])\n final_t = np.array([])\n\n oldt = -1\n\n for i in range(LINE_RADIUS // 5): # increments of 10 units\n p = points[\n :,\n np.logical_and(points[0, :] >= i * 5, points[0, :] < (i + 1) * 5),\n ]\n l = p.shape[1]\n if l > 0:\n unique = np.unique(p[2])\n # d = dict(zip(unique, counts))\n for c in colorPriority:\n if c in unique:\n args = np.argwhere(p[2] == c)\n tlist = p[1, args]\n c_t = np.mean(tlist)\n if oldt == -1 or abs(c_t - oldt) < 0.4: # radians\n final_r = np.append(final_r, p[0, args])\n final_t = np.append(final_t, tlist)\n oldt = c_t\n break\n # else:\n # final_r = np.append(final_r, p[0])\n # final_t = np.append(final_t, p[1])\n\n path = polar2TopDown(final_r, final_t)\n path[:, 1] -= 25\n curve = fitCurveToPath(path, vis_image)\n if currentChallenge == Challenge.Lane:\n mask = cv.bitwise_or(\n cv.inRange(hsv_image, PURPLE[0], PURPLE[1]),\n cv.inRange(hsv_image, ORANGE[0], ORANGE[1]),\n )\n # rc.display.show_color_image(mask)\n points = np.argwhere(mask != 0)\n depths = depth_image[points[:, 0], points[:, 1]]\n r, t = camera2Polar(points, depths)\n path = polar2TopDown(r, t)\n\n if len(path) > 0:\n final_x = np.array([])\n final_y = np.array([])\n\n # path = path[np.absolute(path[:, 1]) < LANE_HORIZ_RADIUS]\n\n for i in range(VIS_RADIUS // 5):\n p = path[np.logical_and(path[:, 1] >= i * 5, path[:, 1] < (i + 1) * 5)]\n if len(p) > 0:\n y = p[:, 1]\n p = p[:, 0]\n m = np.mean(p)\n left_mean = np.mean(p[p < m])\n right_mean = np.mean(p[p > m])\n if (\n abs(left_mean - right_mean) < 5\n or abs(left_mean - right_mean) > 100\n ):\n continue # throw out this data, one side is not visible\n m = (left_mean + right_mean) / 2\n p[p > m] += m - right_mean\n p[p < m] += m - left_mean\n final_x = np.append(final_x, p)\n final_y = np.append(final_y, y)\n\n path = np.transpose([final_x, final_y])\n curve = fitCurveToPath(path, vis_image)\n\n if curve is not None: # line or lane gave valid result\n # speed = 0.5\n slope = npp.polyval(5, np.polyder(curve))\n error = slope / 50\n # angleError = TARGET_ANGLE - (rightWallAngle + leftWallAngle) / 2\n # distError = npp.polyval(0, curve)\n\n # if abs(angleError) < ANGLE_THRESHHOLD:\n # error = distError\n # else:\n # error = distError / 10 + np.sin(np.radians(angleError)) * 2 # angleError / 30\n\n # angle = rc_utils.clamp(Angle_PID.update(error, rc.get_delta_time()), -1, 1)\n\n if True: # currentChallenge == Challenge.Cones:\n blue_image = np.zeros((VIS_RADIUS * 2, VIS_RADIUS * 2), dtype=np.uint8)\n red_image = np.zeros((VIS_RADIUS * 2, VIS_RADIUS * 2), dtype=np.uint8)\n\n visualizeColor(blue_image, hsv_image, depth_image, BLUE, 255)\n visualizeColor(red_image, hsv_image, depth_image, RED1, 255)\n visualizeColor(red_image, hsv_image, depth_image, RED2, 255)\n\n cones = []\n\n keypoints = conedetector.detect(blue_image)\n # vis_image = cv.drawKeypoints(\n # vis_image,\n # keypoints,\n # np.array([]),\n # (0, 255, 255),\n # cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,\n # )\n\n cones.append([[k.pt[0], k.pt[1], WaypointType.Blue] for k in keypoints])\n\n keypoints = conedetector.detect(red_image)\n # vis_image = cv.drawKeypoints(\n # vis_image,\n # keypoints,\n # np.array([]),\n # (0, 255, 255),\n # cv.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS,\n # )\n\n cones.append([[k.pt[0], k.pt[1], WaypointType.Red] for k in keypoints])\n\n scan_xy = lidar2TopDown(scan) # , 30, 330)\n scan_xy[:, 1] -= 15 # lidar offset\n scan_xy = scan_xy[\n (np.absolute(scan_xy) < 60).all(axis=1)\n ] # scipy.cluster.vq.whiten(scan_xy)\n\n centroids = None\n if scan_xy.size > 5:\n try:\n centroids, distortion = scipy.cluster.vq.kmeans(scan_xy, 3)\n except:\n print(\"K means error\")\n\n # print(c)\n\n if centroids is not None:\n v = topDown2Vis(centroids)\n for i in range(len(v[0])):\n cones.append([v[1][i], v[0][i], WaypointType.Unknown])\n # x = v[1][i]\n # y = v[0][i]\n # if x < VIS_RADIUS: # and y > VIS_RADIUS:\n # red_cones.append([x, y])\n # else:\n # blue_cones.append([x, y])\n # if c is not None:\n # dist = min(np.minimum(c[:, 0] - x), np.minimum(c[:, 1] - y))\n # if dist > 10:\n # rc_utils.draw_circle(vis_image, (x, y), (0, 255, 255), 5)\n # else:\n # rc_utils.draw_circle(vis_image, (x, y), (0, 255, 255), 5)\n\n if len(cones) > 0:\n for i in range(len(waypoints)):\n\n x = int(rc_utils.clamp(waypoints[c][0], 0, VIS_RADIUS * 2))\n y = int(rc_utils.clamp(waypoints[c][1], 0, VIS_RADIUS * 2))\n for i in waypoints[c + 1 :]:\n d = (i[0] - x) ** 2 + (i[1] - y) ** 2\n if d < 100:\n break\n\n waypoints = []\n gate_forming_cones = []\n\n for i in red_cones:\n for j in blue_cones:\n if abs(j[1] - i[1]) < 40 and 20 < abs(j[0] - i[0]) < 100: # found gate\n x = abs(j[0] - i[0]) / 2\n # y = (j[1] + i[1]) / 2\n # if y >= 20:\n waypoints.append([j[0] - x, j[1], WaypointType.Gate])\n waypoints.append([i[0] + x, i[1], WaypointType.Gate])\n gate_forming_cones.append(i)\n gate_forming_cones.append(j)\n # break\n\n # print(gate_forming_cones)\n\n # print(waypoints)\n firstrun = True\n for a in red_cones:\n x = int(a[1])\n y = int(a[0])\n rc_utils.draw_circle(vis_image, (x, y), (0, 127, 255), 5)\n for b in blue_cones:\n xb = int(b[1])\n yb = int(b[0])\n if firstrun:\n rc_utils.draw_circle(vis_image, (xb, yb), (255, 255, 0), 5)\n d = (xb - x) ** 2 + (yb - y) ** 2\n if d < 100:\n gate_forming_cones.append(a)\n firstrun = False\n\n for i in red_cones:\n if i not in gate_forming_cones:\n waypoints.append([i[0] + 30, i[1], WaypointType.Red])\n waypoints.append([i[0] + 20, i[1] - 20, WaypointType.Red])\n waypoints.append([i[0] + 20, i[1] + 20, WaypointType.Red])\n\n for i in blue_cones:\n if i not in gate_forming_cones:\n waypoints.append([i[0] - 30, i[1], WaypointType.Blue])\n waypoints.append([i[0] - 20, i[1] - 20, WaypointType.Blue])\n waypoints.append([i[0] - 20, i[1] + 20, WaypointType.Blue])\n\n if len(waypoints) > 0:\n w = []\n\n for c in range(len(waypoints)):\n x = int(rc_utils.clamp(waypoints[c][0], 0, VIS_RADIUS * 2))\n y = int(rc_utils.clamp(waypoints[c][1], 0, VIS_RADIUS * 2))\n for i in waypoints[c + 1 :]:\n d = (i[0] - x) ** 2 + (i[1] - y) ** 2\n if d < 100:\n break\n else:\n w.append(waypoints[c])\n rc_utils.draw_circle(vis_image, (y, x), (0, 255, 0), 5)\n\n # waypoints.append([VIS_RADIUS, VIS_RADIUS, WaypointType.Self])\n waypoints = np.array(w)\n\n waypoints[:, 0:2] -= VIS_RADIUS\n waypoints[:, 1] = np.negative(waypoints[:, 1])\n # print(waypoints)\n\n # fit curve to path\n try:\n curve = scipy.interpolate.interp1d(\n waypoints[:, 1],\n waypoints[:, 0],\n # type=\"cubic\",\n fill_value=\"extrapolate\",\n )\n except:\n print(\"Spline curve error\")\n curve = None\n\n if curve is not None:\n i = topDown2Vis(\n np.transpose(\n [\n curve(np.arange(-VIS_RADIUS, VIS_RADIUS)),\n np.arange(-VIS_RADIUS, VIS_RADIUS),\n ]\n )\n )\n if i is not None:\n vis_image[i] = [255, 255, 0] # add pixels in color image\n\n speed = 0.2\n slope = (curve(0.1) - curve(0)) / 0.1\n error = curve(0) / 10 # + slope * 5 # angleError / 30\n if np.isfinite(error):\n angle = rc_utils.clamp(Angle_PID.update(error, rc.get_delta_time()), -1, 1)\n global last_waypoint_type\n last_waypoint_type = waypoints[np.argmin(np.absolute(waypoints[:, 1])), 2]\n else:\n if last_waypoint_type == WaypointType.Blue:\n speed = 0.2\n angle = 1\n elif last_waypoint_type == WaypointType.Red:\n speed = 0.2\n angle = -1\n\n # print(\"centroids : \", centroids)\n # print(\"distortion :\", distortion)\n\n # pass\n # if currentChallenge == Challenge.Gate:\n # pass\n if currentChallenge == Challenge.Wall:\n pass\n\n # green dot in middle for car\n rc_utils.draw_circle(vis_image, (VIS_RADIUS, VIS_RADIUS), (0, 255, 255), 2)\n\n if scan_xy is not None:\n i = topDown2Vis(scan_xy)\n if i is not None:\n vis_image[i] = [255, 255, 255]\n\n visualizeColor(vis_image, hsv_image, depth_image, BLUE, (255, 127, 0))\n visualizeColor(vis_image, hsv_image, depth_image, RED1, (0, 0, 255))\n visualizeColor(vis_image, hsv_image, depth_image, RED2, (0, 0, 255))\n # visualizeColor(vis_image, hsv_image, depth_image, GREEN, (0, 255, 0))\n\n if path is not None:\n i = topDown2Vis(path)\n if i is not None:\n vis_image[i] = [0, 255, 255] # add pixels in color image\n\n # mask[points[:, 0], points[:, 1]] = depths\n # rc.display.show_depth_image(mask)\n\n # red = (255, 0, 0)\n # blue = (255, 127, 0)\n # green = (0, 255, 0)\n # orange = (0, 127, 255)\n # purple = (255, 0, 127)\n # visualizeColor(vis_image, hsv_image, depth_image, PURPLE, (255, 0, 127))\n # visualizeColor(vis_image, hsv_image, depth_image, ORANGE, (0, 127, 255))\n\n rc.display.show_color_image(vis_image)\n # rc.display.show_depth_image(depth_image)\n\n rc.drive.set_speed_angle(speed, angle)",
"def IR():\n s = np.array(\n [2.40774137,2.287696084,2.203613927,2.048710132,1.899829585,1.591776247,\n 2.021218754,2.572949552,3.298381484,3.635993426,3.788266224,3.8307278,3.834208811]\n )\n\n TI = np.array([50, 75, 100, 150, 200, 300, 400, 500, 750, 1000, 1500, 2000, 3000])\n\n comp1 = s * np.array([-159.1,-134.2,-109.1,-64.7,25.0,40.1,88.6,126.8,187.6,219.4,245.4,253.6,256.1])\n comp2 = s * np.array([-368.3,-356.9,-343.8,-318.1,-292.0,-242.5,-199.3,-158.4,-68.8,14.2,131.9,219.5,333.5])\n comp3 = s * np.array([-77.5,-51.9,-29.8,9.9,40.2,85.7,115.4,135.1,160.1,167.6,172.3,171.7,171.8])\n comp4 = s * np.array([-265.0,-240.6,-216.7,-170.5,-128.2,-53.5,9.6,62.3,159.7,223.8,296.5,328.3,346.7])\n comp5 = s * np.array([-346.5,-328.9,-312.1,-278.5,-244.4,-182.3,-128.0,-80.0,30.8,109.3,225.1,299.5,372.2])\n\n comp = [comp1, comp2, comp3, comp4, comp5]\n MSE = []\n colors = [\"#1f77b4\", \"#ff7f0e\", \"#2ca02c\", \"#d62728\", \"#9467bd\"]\n x_new = np.linspace(0, 3000, 10000)\n for i, j, k in zip(comp, colors, np.arange(1, 6)):\n plt.scatter(TI, i, c=j)\n # popt, _ = curve_fit(MZ, TI, i, p0=np.array([200, 220, 300]))\n popt, _ = curve_fit(MZ, TI, i, p0=np.array([300, 220]))\n # M_z0, T1, M0 = popt\n M0, T1 = popt\n y_new = MZ(x_new, *popt)\n plt.plot(x_new, y_new, \"--\", c=j, label=f\"Fit Comp. {k:d} : $T_1$={T1:3.2f}\")\n MSE.append(mean_squared_error(i,y_new[TI]))\n print(MSE)\n print(np.mean(MSE))\n plt.grid()\n plt.legend(loc=\"best\")\n plt.xlabel(\"TI\")\n plt.ylabel(r\"Singal Intensity $M_z$\")\n plt.show()",
"def model_prem(r):\n\n\t#- normalised radius\n\tx = r / 6371000.0\n\n\t#- march through the various depth levels -----------------------------------------------------\n\n\t#- upper crust\n\tif (r >= 6356000.0):\n\t\trho = 2.6\n\t\tvpv = 5.8\n\t\tvph = vpv\n\t\tvsv = 3.2\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- lower crust\n\telif (r >= 6346000.6) & (r < 6356000.0):\n\t\trho = 2.9\n\t\tvpv = 6.8\n\t\tvph = vpv\n\t\tvsv = 3.9\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- LID\n\telif (r >= 6291000.0) & (r < 6346000.6):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 0.8317 + 7.2180 * x\n\t\tvph = 3.5908 + 4.6172 * x\n\t\tvsv = 5.8582 - 1.4678 * x\n\t\tvsh = -1.0839 + 5.7176 * x\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- LVZ\n\telif (r >= 6151000.0) & (r < 6291000.0):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 0.8317 + 7.2180 * x\n\t\tvph = 3.5908 + 4.6172 * x\n\t\tvsv = 5.8582 - 1.4678 * x\n\t\tvsh = -1.0839 + 5.7176 * x\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- Transition zone 1\n\telif (r >= 5971000.0) & (r < 6151000.0):\n\t\trho = 7.1089 - 3.8045 * x\n\t\tvpv = 20.3926 - 12.2569 * x\n\t\tvph = vpv\n\t\tvsv = 8.9496 - 4.4597 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 2\n\telif (r >= 5771000.0) & (r < 5971000.0):\n\t\trho = 11.2494 - 8.0298 * x\n\t\tvpv = 39.7027 - 32.6166 * x\n\t\tvph = vpv\n\t\tvsv = 22.3512 - 18.5856 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 3\n\telif (r >= 5701000.0) & (r < 5771000.0):\n\t\trho = 5.3197 - 1.4836 * x\n\t\tvpv = 19.0957 - 9.8672 * x\n\t\tvph = vpv\n\t\tvsv = 9.9839 - 4.9324 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 1\n\telif (r >= 5600000.0) & (r < 5701000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 29.2766 - 23.6027 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 22.3459 - 17.2473 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- Lower mantle 2\n\telif (r >= 3630000.0) & (r < 5600000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 24.9520 - 40.4673 * x + 51.4832 * x**2 - 26.6419 * x**3\n\t\tvph = vpv\n\t\tvsv = 11.1671 - 13.7818 * x + 17.4575 * x**2 - 9.2777 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 3\n\telif (r >= 3480000.0) & (r < 3630000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 15.3891 - 5.3181 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 6.9254 + 1.4672 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Outer core\n\telif (r >= 1221000.5) & (r < 3480000.0):\n\t\trho = 12.5815 - 1.2638 * x - 3.6426 * x**2 - 5.5281 * x**3\n\t\tvpv = 11.0487 - 4.0362 * x + 4.8023 * x**2 - 13.5732 * x**3\n\t\tvph = vpv\n\t\tvsv = 0.0\n\t\tvsh = 0.0\n\t\teta = 1.0\n\n\t#- Inner Core\n\telif (r >= 0.0) & (r < 1221000.5):\n\t\trho = 13.0885 - 8.8381 * x**2\n\t\tvpv = 11.2622 - 6.3640 * x**2\n\t\tvph = vpv\n\t\tvsv = 3.6678 - 4.4475 * x**2\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- convert to elastic parameters --------------------------------------------------------------\n\n\trho = 1000.0 * rho\n\tvpv = 1000.0 * vpv\n\tvph = 1000.0 * vph\n\tvsv = 1000.0 * vsv\n\tvsh = 1000.0 * vsh\n\n\tA = rho * vph**2\n\tC = rho * vpv**2\n\tN = rho * vsh**2\n\tL = rho * vsv**2\n\tF = eta * (A - 2 * L)\n\n\treturn rho, A, C, F, L, N",
"def SNR(self, flux_sky, n_pix_star, flux_star, gain, ron):\n SNR = (gain*flux_star/sqrt(gain*flux_star + n_pix_star*gain*flux_sky + n_pix_star*ron**2)) \n return SNR",
"def ls5_sr_corr(img):\n return img.select(['B1'], ['BLUE']).float().multiply(0.91996).add(37).int16()\\\n .addBands(img.select(['B2'], ['GREEN']).float().multiply(0.92764).add(84).int16())\\\n .addBands(img.select(['B3'], ['RED']).float().multiply(0.8881).add(98).int16())\\\n .addBands(img.select(['B4'], ['NIR']).float().multiply(0.95057).add(38).int16())\\\n .addBands(img.select(['B5'], ['SWIR1']).float().multiply(0.96525).add(29).int16())\\\n .addBands(img.select(['B7'], ['SWIR2']).float().multiply(0.99601).add(20).int16())\\\n .addBands(img.select(['pixel_qa'], ['PIXEL_QA']).int16())\\\n .addBands(img.select(['radsat_qa'], ['RADSAT_QA']).int16())\\\n .copyProperties(img)\\\n .copyProperties(img, ['system:time_start', 'system:time_end', 'system:index', 'system:footprint'])",
"def model_prem_iso(r):\n\n\t#- normalised radius\n\tx = r / 6371000.0\n\n\t#- march through the various depth levels -----------------------------------------------------\n\n\t#- upper crust\n\tif (r >= 6356000.0):\n\t\trho = 2.6\n\t\tvpv = 5.8\n\t\tvph = vpv\n\t\tvsv = 3.2\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- lower crust\n\telif (r >= 6346000.6) & (r < 6356000.0):\n\t\trho = 2.9\n\t\tvpv = 6.8\n\t\tvph = vpv\n\t\tvsv = 3.9\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- LID\n\telif (r >= 6291000.0) & (r < 6346000.6):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 4.1875 + 3.9382 * x\n\t\tvph = vpv\n\t\tvsv = 2.1519 + 2.3481 * x\n\t\tvsh = vsv\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- LVZ\n\telif (r >= 6151000.0) & (r < 6291000.0):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 4.1875 + 3.9382 * x\n\t\tvph = vpv\n\t\tvsv = 2.1519 + 2.3481 * x\n\t\tvsh = vsv\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- Transition zone 1\n\telif (r >= 5971000.0) & (r < 6151000.0):\n\t\trho = 7.1089 - 3.8045 * x\n\t\tvpv = 20.3926 - 12.2569 * x\n\t\tvph = vpv\n\t\tvsv = 8.9496 - 4.4597 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 2\n\telif (r >= 5771000.0) & (r < 5971000.0):\n\t\trho = 11.2494 - 8.0298 * x\n\t\tvpv = 39.7027 - 32.6166 * x\n\t\tvph = vpv\n\t\tvsv = 22.3512 - 18.5856 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 3\n\telif (r >= 5701000.0) & (r < 5771000.0):\n\t\trho = 5.3197 - 1.4836 * x\n\t\tvpv = 19.0957 - 9.8672 * x\n\t\tvph = vpv\n\t\tvsv = 9.9839 - 4.9324 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 1\n\telif (r >= 5600000.0) & (r < 5701000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 29.2766 - 23.6027 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 22.3459 - 17.2473 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- Lower mantle 2\n\telif (r >= 3630000.0) & (r < 5600000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 24.9520 - 40.4673 * x + 51.4832 * x**2 - 26.6419 * x**3\n\t\tvph = vpv\n\t\tvsv = 11.1671 - 13.7818 * x + 17.4575 * x**2 - 9.2777 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 3\n\telif (r >= 3480000.0) & (r < 3630000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 15.3891 - 5.3181 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 6.9254 + 1.4672 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Outer core\n\telif (r >= 1221000.5) & (r < 3480000.0):\n\t\trho = 12.5815 - 1.2638 * x - 3.6426 * x**2 - 5.5281 * x**3\n\t\tvpv = 11.0487 - 4.0362 * x + 4.8023 * x**2 - 13.5732 * x**3\n\t\tvph = vpv\n\t\tvsv = 0.0\n\t\tvsh = 0.0\n\t\teta = 1.0\n\n\t#- Inner Core\n\telif (r >= 0.0) & (r < 1221000.5):\n\t\trho = 13.0885 - 8.8381 * x**2\n\t\tvpv = 11.2622 - 6.3640 * x**2\n\t\tvph = vpv\n\t\tvsv = 3.6678 - 4.4475 * x**2\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- convert to elastic parameters --------------------------------------------------------------\n\n\trho = 1000.0 * rho\n\tvpv = 1000.0 * vpv\n\tvph = 1000.0 * vph\n\tvsv = 1000.0 * vsv\n\tvsh = 1000.0 * vsh\n\n\tA = rho * vph**2\n\tC = rho * vpv**2\n\tN = rho * vsh**2\n\tL = rho * vsv**2\n\tF = eta * (A - 2 * L)\n\n\treturn rho, A, C, F, L, N",
"def radialApproxEffect(hubdist1,hubdist2,width,length):\n #Grating coordinates\n x,y = np.meshgrid(np.linspace(-width,width,1000),\\\n np.linspace(-length,length,1000))\n y1 = y + hubdist1\n y2 = y + hubdist2\n\n #Convert to period and yaw angle\n period1 = np.sqrt(x**2+y1**2)/hubdist1*160. #nm\n period2 = np.sqrt(x**2+y2**2)/hubdist2*160. #nm\n yaw = blazeYaw(1.5*np.pi/180,2.4,3,160.)\n yaw1 = np.pi/2 - np.arctan(x/y1) + yaw\n yaw2 = np.pi/2 - np.arctan(x/y2) + yaw\n\n #Determine alpha and beta\n beta0,alpha0 = litBetaAlpha(1.5*np.pi/180,2.4,3,160.)\n alpha1 = alpha0 + 3*2.4/period1*np.sin(yaw1)\n alpha2 = alpha0 + 3*2.4/period2*np.sin(yaw2)\n beta1 = beta0 + (3*2.4/period1)*np.cos(yaw1)\n beta2 = beta0 + (3*2.4/period2)*np.cos(yaw2)\n\n #Determine spot shifts\n x1 = hubdist2*(alpha1/beta1)\n x2 = hubdist2*(alpha2/beta2)\n \n\n pdb.set_trace()\n \n return x1,x2",
"def R_term(\n enst, # enstrophy field\n omega1, # vorticity-1 component\n omega2, # vorticity-2 component\n omega3, # vorticity-3 component\n s11, # strain rate-11 component\n s12, # strain rate-12 component\n s13, # strain rate-13 component\n s22, # strain rate-22 component\n s23, # strain rate-23 component\n s33, # strain rate-33 component\n diff = False): # differentiation flag\n #---------------------------------------------------------------------#\n # Defining domain variables #\n #---------------------------------------------------------------------#\n pi = np.pi # pi\n dx = (2.0*pi)/64.0 # spatial step\n nu = 0.000185 # default viscosity\n #---------------------------------------------------------------------#\n # Spectral differentiation variables #\n #---------------------------------------------------------------------#\n dim = 64\n kspec = np.fft.fftfreq(dim) * dim\n Kfield = np.array(np.meshgrid(kspec, kspec, kspec, indexing='ij'))\n #---------------------------------------------------------------------#\n # Spectral differentiation variables #\n #---------------------------------------------------------------------#\n term1 = np.zeros((dim, dim, dim))\n term2 = np.zeros((dim, dim, dim))\n term3 = np.zeros((dim, dim, dim))\n #---------------------------------------------------------------------#\n # Numerator (numpy gradient tool) #\n #---------------------------------------------------------------------#\n if diff is not False:\n term1 = np.gradient(enst,dx, edge_order=2)[0]\n term2 = np.gradient(enst,dx, edge_order=2)[1]\n term3 = np.gradient(enst,dx, edge_order=2)[2]\n #---------------------------------------------------------------------#\n # Numerator (spectral differentiation) #\n #---------------------------------------------------------------------#\n else:\n term1 = 0.5*np.fft.ifftn(1j*Kfield[0]*np.fft.fftn(enst) +\\\n 1j*Kfield[0]*np.fft.fftn(enst)).real\n term2 = 0.5*np.fft.ifftn(1j*Kfield[1]*np.fft.fftn(enst) +\\\n 1j*Kfield[1]*np.fft.fftn(enst)).real\n term3 = 0.5*np.fft.ifftn(1j*Kfield[2]*np.fft.fftn(enst) +\\\n 1j*Kfield[2]*np.fft.fftn(enst)).real\n #---------------------------------------------------------------------#\n # Numerator #\n #---------------------------------------------------------------------#\n num = nu*(term1**2.0+ term2**2.0 + term3**2.0)\n #---------------------------------------------------------------------#\n # Denominator #\n #---------------------------------------------------------------------#\n den = omega1*s11*omega1 + omega1*s12*omega2 + omega1*s13*omega3 +\\\n omega2*s12*omega1 + omega2*s22*omega2 + omega2*s23*omega3+\\\n omega3*s13*omega1 + omega3*s23*omega2 + omega3*s33*omega3\n #---------------------------------------------------------------------#\n # R calculation #\n #---------------------------------------------------------------------#\n R = num/den\n\n return R",
"def rad(tx,K,w,e,T0,Vo,P):\r\n\r\n M=2*np.pi*(tx-T0)/P #Mean anomaly\r\n E=np.pi\r\n for j in range(0,25):\r\n E=(M-e*(E*np.cos(E)-np.sin(E)))/(1-e*np.cos(E))\r\n th=2*np.arctan(((1+e)/(1-e))**0.5*np.tan(E/2))\r\n return K*(np.cos(th+w)+e*np.cos(w))+Vo",
"def rad(x) :#en mm!\r\n return topdia(x)/2.0",
"def _c_numeric(self, rij):\n radial_fun = np.zeros((self.lmax+1, self.nmax))\n radial_fun[0,1] = 1.0\n\n #Get local references to these variables so that we don't need `self`\n #all over in the overbasis calculation below.\n alpha = self.alpha\n rb = self.rb \n for n in range(1, self.nmax+1):\n argbess = 2*alpha*rb[n-1]*rij\n ep = np.exp(-alpha*(rij + rb[n-1])**2)\n em = np.exp(-alpha*(rij - rb[n-1])**2)\n #In the loops below, msb prefix refers to modified spherical bessel.\n for l in range(self.lmax+1):\n if l == 0:\n if argbess == 0.0:\n msb_fi_ki_l = np.exp(-alpha*(rb[n-1]**2 + rij**2))\n else:\n #msb_fi_ki_lm = cosh(arg_bess)/arg_bess\n #msb_fi_ki_l = sinh(arg_bess)/arg_bess\n msb_fi_ki_lm = 0.5 * (em + ep) / argbess\n msb_fi_ki_l = 0.5 * (em - ep) / argbess\n else:\n if argbess == 0.0:\n msb_fi_ki_l = 0.0\n else:\n msb_fi_ki_lmm = msb_fi_ki_lm\n msb_fi_ki_lm = msb_fi_ki_l\n msb_fi_ki_l = msb_fi_ki_lmm-(2*l-1)*msb_fi_ki_lm/argbess\n\n radial_fun[l,n-1] = msb_fi_ki_l #* rb[n-1]\n fc = fcut(rij, self.rcut, self.trans_width)\n return np.dot(radial_fun, self.transformbasis)*fc",
"def radiation_measurement_analysis():\n import pint\n ureg = pint.UnitRegistry()\n\n mrem_h = ureg.parse_units('mrem') / ureg.hour\n m = ureg.parse_units('meters')\n s = ureg.parse_units('seconds')\n\n # Measurements of background radiation\n bg_dist = ureg.parse_expression('10 m') # estimate of how far away we are wrt background\n background_rows = [\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.022 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=4.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.021 * mrem_h, capture_time=5.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=11.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=16.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.024 * mrem_h, capture_time=20.0 * s),\n ]\n\n # Measurements of sample radiation\n esp_dist = ureg.parse_expression('1 inch').to(m) / 2 # estimate of how far we are from the sample when very close\n dist0_rows = [\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=0.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=3.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=5.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=9.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=10.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=11.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.057 * mrem_h, capture_time=12.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.058 * mrem_h, capture_time=13.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=14.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=15.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=16.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=20.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=22.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.066 * mrem_h, capture_time=23.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=24.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=25.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=26.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=28.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=30.0 * s),\n ]\n\n dist0_v2_rows = [\n dict(vid=3, distance=esp_dist, rad=0.012 * mrem_h, capture_time=0.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.011 * mrem_h, capture_time=1.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=8.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=9.0 * s),\n ]\n\n close_rows = [\n dict(vid=4, distance=0.5 * m, rad=0.013 * mrem_h, capture_time=0.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.014 * mrem_h, capture_time=5.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=7.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.011 * mrem_h, capture_time=15.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=16.0 * s),\n ]\n\n mid_rows = [\n dict(vid=5, distance=1.0 * m, rad=0.014 * mrem_h, capture_time=0.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.015 * mrem_h, capture_time=5.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.013 * mrem_h, capture_time=10.0 * s),\n ]\n\n far_rows = [\n dict(vid=6, distance=2.0 * m, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=6, distance=2.0 * m, rad=0.025 * mrem_h, capture_time=0.1 * s),\n ]\n\n # guess_dist = ureg.parse_expression('0.3 m') # estimate of how far away we are wrt background\n # guess_rows = [\n # dict(vid=9, distance=guess_dist, rad=0.030 * mrem_h, capture_time=0.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.041 * mrem_h, capture_time=2.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.051 * mrem_h, capture_time=3.0 * s),\n # ]\n\n rows = dist0_rows + background_rows + dist0_v2_rows + close_rows + mid_rows + far_rows\n # rows += guess_rows\n\n import pandas as pd\n import numpy as np\n table = pd.DataFrame(rows)\n\n # Ensure comparable units\n units = {\n 'rad': mrem_h,\n 'distance': m,\n 'capture_time': s,\n }\n for key, unit in units.items():\n table[key] = table[key].apply(lambda c: c.to(unit).m)\n table['rad'] = table['rad'].astype(float)\n table['distance'] = table['distance'].astype(float)\n\n # Weight each measurement based on the amount of time the measurement was\n # sustained in the video.\n average_rad_rows = []\n for vid, group in table.groupby('vid'):\n from statsmodels.stats.weightstats import DescrStatsW\n weights = (-1 * group['capture_time'].diff(periods=-1).fillna(0)) / group['capture_time'].iloc[-1]\n table.loc[group.index, 'weight'] = weights\n values = group['rad']\n weighted_stats = DescrStatsW(values, weights=weights, ddof=0)\n dists = group['distance'].unique()\n assert len(dists) == 1\n average_rad_rows.append({\n 'vid': vid,\n 'distance': dists[0],\n 'rad_mean': weighted_stats.mean,\n 'rad_std': weighted_stats.std,\n })\n stats_table = pd.DataFrame(average_rad_rows)\n\n bg_row = stats_table.loc[stats_table['distance'].argmax()]\n fg_row = stats_table.loc[stats_table['distance'].argmin()]\n\n # -------------------\n ADD_DUMMY_VALUES = 0\n if ADD_DUMMY_VALUES:\n # Hack: because we don't have enough samples we can fudge the value\n # knowning that the value should be the background radiation in the\n # limit.\n\n dummy_measurements = []\n extra_support = 1\n for idx in range(3, 3 + extra_support):\n dummy_row = {\n 'vid': -idx,\n 'distance': bg_row['distance'] + idx,\n 'rad_mean': bg_row['rad_mean'],\n 'rad_std': 0.01,\n }\n dummy_measurements.append(dummy_row)\n\n # also add an extra value close to the sample\n rad_bg = bg_row['rad_mean']\n rad_above_bg = fg_row['rad_mean'] - rad_bg\n dummy_row = {\n 'vid': -1,\n 'distance': fg_row['distance'] / 2,\n 'rad_mean': rad_bg + (rad_above_bg * 4),\n 'rad_std': 0.5,\n }\n dummy_measurements.append(dummy_row)\n\n # dummy_row = {\n # 'vid': -2,\n # 'distance': fg_row['distance'] / 4,\n # 'rad_mean': rad_bg + (rad_above_bg * 16),\n # }\n # dummy_measurements.append(dummy_row)\n\n dummy_stats = pd.DataFrame(dummy_measurements)\n dummy_stats['weight'] = 0.5\n stats_table['weight'] = 1.0\n stats_table2 = pd.concat([stats_table, dummy_stats]).reset_index(drop=True).sort_values('distance')\n else:\n stats_table2 = stats_table\n # -------------------\n\n import scipy\n scipy.optimize.curve_fit\n\n # Because we know the radiation should follow an inverse square law wrt to\n # distance, we can fit a polynomial of degree 2 (parabola) to interpolate /\n # extrapolate the **inverse** values.\n x = stats_table2['distance'].values\n y = stats_table2['rad_mean'].values\n s = stats_table2['rad_std'].values\n\n # Model the squared falloff directly\n def invsquare(x, a, b):\n return a * (1 / (0.01 + x ** 2)) + b\n # bg_row['rad_mean']\n # Use curve_fit to constrain the first coefficient to be zero\n try:\n coef = scipy.optimize.curve_fit(invsquare, x, y, sigma=s, method='trf')[0]\n except Exception as ex:\n coef = None\n print(f'ex={ex}')\n\n # Also fit one to the raw weighted points as a sanity check\n # inv_poly2 = Polynomial.fit(table['distance'], 1 / table['rad'], w=table['weight'], deg=2)\n\n import kwplot\n sns = kwplot.autosns()\n plt = kwplot.autoplt()\n # ax = sns.boxplot(data=table, x='distance', y='rad', width=0.1)\n\n # Add in points to show each observation\n ax = sns.relplot(x=\"distance\", y=\"rad\", data=table, size=4, color=\".3\",\n linewidth=0, alpha=0.5, palette='deep')\n\n ax = plt.gca()\n ax.set_xlabel('distance from sample ({})'.format(str(units['distance'])))\n ax.set_ylabel('radiation dosage ({})'.format(str(units['rad'])))\n\n max_meters = 10\n\n extrap_x = np.linspace(0, max_meters, 1000)\n if coef is not None:\n extrap_y1 = invsquare(extrap_x, *coef)\n # extrap_y2 = 1 / inv_poly2(extrap_x)\n ax.plot(stats_table2['distance'].values, stats_table2['rad_mean'].values, 'rx')\n ax.plot(stats_table['distance'].values, stats_table['rad_mean'].values, 'bo')\n ax.plot(extrap_x, extrap_y1, '--')\n ax.set_ylim(0.001, 0.1)\n ax.set_yscale('log')\n # ax.plot(extrap_x, extrap_y2, '--')"
] | [
"0.59634537",
"0.59169656",
"0.5846073",
"0.5812182",
"0.5738116",
"0.57252634",
"0.5717098",
"0.56898355",
"0.56139106",
"0.5604561",
"0.5550189",
"0.5483282",
"0.54414994",
"0.5432839",
"0.54192543",
"0.5417974",
"0.5405411",
"0.5389052",
"0.53736633",
"0.5373638",
"0.53550863",
"0.5321604",
"0.5313399",
"0.53079355",
"0.52627397",
"0.5258244",
"0.5247617",
"0.5229825",
"0.52209216",
"0.5211677"
] | 0.6301096 | 0 |
Create a class to import and parse the excel spreadsheet that is used as an input file for V/UQpredictivity. | def __init__(self, input_file):
self.file_name = input_file
# Import the excel file:
self.xlfile = ExcelFile(self.file_name) # to retrieve & work w/ input | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def import_data(self):\n\n self.worksheet = (\n xlrd.open_workbook(filename=self.source).sheet_by_index(0)\n )\n # Import conversion data from worksheet and store as scipy arrays\n self.T_exp = np.array(\n self.worksheet.col_values(0, start_rowx=4, end_rowx=None)\n ) + 273.15\n self.HCout_raw = np.array(\n self.worksheet.col_values(4, start_rowx=4, end_rowx=None)\n )\n self.HCin_raw = np.array(\n self.worksheet.col_values(8, start_rowx=4, end_rowx=None)\n )\n self.eta_exp = (\n (self.HCin_raw - self.HCout_raw) / self.HCin_raw\n )\n self.T_model = np.linspace(\n self.T_exp[0] - 50, self.T_exp[-1] + 50, 25\n )\n self.T_array = self.T_model",
"def __init__(self, filename=None, filetype=None, instrument=None):\n if filename:\n if instrument == 'Element':\n skipfooter = 4\n header = 1\n drop = 9\n elif instrument == 'Agilent':\n skipfooter = 4\n header = 3\n drop = 3\n else:\n skipfooter = 0\n header = 0\n drop = 0\n\n if filetype == 'xlsx':\n pwd = os.getcwd()\n os.chdir(os.path.dirname(filename))\n self.imported = pd.ExcelFile(filename)\n self.data = self.imported.parse(\n 0, index_col=0, skipfooter=skipfooter, header=header)\n self.data = self.data.drop(self.data.index[:drop], axis=0)\n os.chdir(pwd)\n # TODO xlsx doesnt work with agilent type\n elif filetype == 'csv':\n pwd = os.getcwd()\n os.chdir(os.path.dirname(filename))\n self.data = pd.read_csv(filename, sep=',', index_col=0, skipfooter=skipfooter,\n header=header, engine='python')\n os.chdir(pwd)\n elif filetype == 'asc':\n pwd = os.getcwd()\n os.chdir(os.path.dirname(filename))\n self.data = pd.read_csv(filename, sep='\\t', index_col=0, skipfooter=skipfooter,\n header=header, engine='python')\n self.data = self.data.drop(self.data.index[:drop], axis=0)\n self.data.dropna(axis=1, how='all', inplace=True)\n self.data = self.data.apply(pd.to_numeric, errors='coerce')\n os.chdir(pwd)\n else:\n warnings.warn('File type not supported.')\n\n self.data.index = self.data.index.astype('float32')\n self.time = self.data.index\n self.elements = list(map(elem_resolution, self.data.columns))\n self.data.columns = self.elements\n\n self.srms = pd.ExcelFile('./SRM.xlsx').parse(index_col=0)\n self.sum_koeficients = pd.ExcelFile(\n './default_sum_koef.xlsx').parse(0, index_col=0, header=None).to_dict()[1]\n\n self.srm = None\n self.iolite = None\n self.names = None\n self.internal_std = None\n self.ablation_time = None\n\n self.laser_off = []\n self.laser_on = []\n self.skip = {'bcg_start': 0,\n 'bcg_end': 0,\n 'sample_start': 0,\n 'sample_end': 0} # time in seconds to skip from each bcg and sample\n\n self.filter_line = None\n self.starts = None\n self.ends = None\n self.bcg = None\n self.average_peaks = None\n self.ratio = None\n self.quantified = None\n self.lod = None\n self.correction_elements = None\n self.corrected_IS = None\n self.corrected_SO = None\n\n self.dx = None\n self.dy = None\n self.maps = {}\n self.qmaps = {}\n\n self.regression_values = {}\n self.regression_equations = {}",
"def __init__(self, infile, sheet, header=True, date_format=\"%Y-%m-%d\"):\n from openpyxl import load_workbook\n \n wb = load_workbook(infile, data_only=True, use_iterators=True, keep_vba=False)\n \n try:\n sheet = wb.worksheets[int(sheet)-1]\n \n except:\n for ws in wb.worksheets:\n if ws.title == sheet:\n sheet = ws\n break\n else:\n raise Exception(\"No worksheet named {0}\".format(sheet))\n \n self.iter = sheet.iter_rows()\n self.date_format = date_format\n \n if header:\n self.fieldnames = self.format_excel_row(self.iter.next())\n self.aliases = dict(zip(\n list(\"col{0}\".format(idx+1) for idx in xrange(len(self.fieldnames))),\n self.fieldnames))\n else:\n self.fieldnames = list(\"col{0}\".format(idx+1) for idx in xrange(len(sheet.columns))),\n self.aliases = None",
"def import_building_from_excel(\n project, building_name, construction_age, path_to_excel, sheet_names\n):\n\n def warn_constructiontype(element):\n \"\"\"Generic warning function\"\"\"\n if element.construction_type is None:\n warnings.warn(\n 'In zone \"%s\" the %s construction \"%s\" could not be loaded from the TypeBuildingElements.json, '\n \"an error will occur due to missing data for calculation.\"\n \"Check for spelling and the correct combination of building age and construction type.\"\n \"Here is the list of faulty entries:\\n%s\"\n \"\\nThese entries can easily be found checking the stated index in the produced ZonedInput.xlsx\"\n % (\n group[\"zone\"].iloc[0],\n element.name,\n group[\"OuterWallConstruction\"].iloc[0],\n group,\n )\n )\n\n bldg = Building(parent=project)\n bldg.name = building_name\n bldg.year_of_construction = construction_age\n bldg.with_ahu = True # HardCodedInput\n if bldg.with_ahu is True:\n bldg.central_ahu.heat_recovery = True # HardCodedInput\n bldg.central_ahu.efficiency_recovery = 0.35 # HardCodedInput\n bldg.central_ahu.temperature_profile = 25 * [273.15 + 18] # HardCodedInput\n bldg.central_ahu.min_relative_humidity_profile = 25 * [0] # HardCodedInput\n bldg.central_ahu.max_relative_humidity_profile = 25 * [1] # HardCodedInput\n bldg.central_ahu.v_flow_profile = 25 * [1] # HardCodedInput\n\n # Parameters that need hard coding in teasers logic classes\n # 1. \"use_set_back\" needs hard coding at aixlib.py in the init; defines\n # if the in the useconditions stated\n # heating_time with the respective set_back_temp should be applied.\n # use_set_back = false -> all hours of the day\n # have same set_temp_heat actual value: use_set_back = Check your current version!\n # 2. HeaterOn, CoolerOn, hHeat, lCool, etc. can be hard coded in the text\n # file\n # \"teaser / data / output / modelicatemplate / AixLib /\n # AixLib_ThermalZoneRecord_TwoElement\"\n # actual changes: Check your current version!\n\n # Parameters to be set for each and every zone (#HardCodedInput)\n # -----------------------------\n out_wall_tilt = 90\n window_tilt = 90\n ground_floor_tilt = 0\n floor_tilt = 0\n ceiling_tilt = 0\n rooftop_tilt = 0\n ground_floor_orientation = -2\n floor_orientation = -2\n rooftop_orientation = -1\n ceiling_orientation = -1\n # -----------------------------\n\n # load_building_data from excel_to_pandas DataFrame:\n data = import_data(path_to_excel, sheet_names)\n\n # informative print\n usage_types = get_list_of_present_entries(data[\"UsageType\"])\n print(\"List of present usage_types in the original Data set: \\n%s\" % usage_types)\n\n # define the zoning methodology/function\n data = zoning_example(data)\n\n # informative print\n usage_types = get_list_of_present_entries(data[\"Zone\"])\n print(\"List of zones after the zoning is applied: \\n%s\" % usage_types)\n\n # aggregate all rooms of each zone and for each set general parameter,\n # boundary conditions\n # and parameter regarding the building physics\n zones = data.groupby([\"Zone\"])\n for name, zone in zones:\n\n # Block: Thermal zone (general parameter)\n tz = ThermalZone(parent=bldg)\n tz.name = str(name)\n tz.area = zone[\"NetArea[m²]\"].sum()\n # room vice calculation of volume plus summing those\n tz.volume = (\n np.array(zone[\"NetArea[m²]\"]) * np.array(zone[\"HeatedRoomHeight[m]\"])\n ).sum()\n\n # Block: Boundary Conditions\n # load UsageOperationTime, Lighting, RoomClimate and InternalGains\n # from the \"UseCondition.json\"\n tz.use_conditions = UseConditions(parent=tz)\n tz.use_conditions.load_use_conditions(zone[\"Zone\"].iloc[0], project.data)\n\n # Block: Building Physics\n # Grouping by orientation and construction type\n # aggregating and feeding to the teaser logic classes\n grouped = zone.groupby([\"OuterWallOrientation[°]\", \"OuterWallConstruction\"])\n for name, group in grouped:\n # looping through a groupby object automatically discards the\n # groups where one of the attributes is nan\n # additionally check for strings, since the value must be of type\n # int or float\n if not isinstance(group[\"OuterWallOrientation[°]\"].iloc[0], str):\n out_wall = OuterWall(parent=tz)\n out_wall.name = (\n \"outer_wall_\"\n + str(int(group[\"OuterWallOrientation[°]\"].iloc[0]))\n + \"_\"\n + str(group[\"OuterWallConstruction\"].iloc[0])\n )\n out_wall.area = group[\"OuterWallArea[m²]\"].sum()\n out_wall.tilt = out_wall_tilt\n out_wall.orientation = group[\"OuterWallOrientation[°]\"].iloc[0]\n # load wall properties from \"TypeBuildingElements.json\"\n out_wall.load_type_element(\n year=bldg.year_of_construction,\n construction=group[\"OuterWallConstruction\"].iloc[0],\n )\n warn_constructiontype(out_wall)\n else:\n warnings.warn(\n 'In zone \"%s\" the OuterWallOrientation \"%s\" is '\n \"neither float nor int, \"\n \"hence this building element is not added.\\nHere is the \"\n \"list of faulty entries:\\n%s\"\n \"\\n These entries can easily be found checking the stated \"\n \"index in the produced ZonedInput.xlsx\"\n % (\n group[\"Zone\"].iloc[0],\n group[\"OuterWallOrientation[°]\"].iloc[0],\n group,\n )\n )\n\n grouped = zone.groupby([\"WindowOrientation[°]\", \"WindowConstruction\"])\n for name, group in grouped:\n # looping through a groupby object automatically discards the\n # groups where one of the attributes is nan\n # additionally check for strings, since the value must be of type\n # int or float\n if not isinstance(group[\"OuterWallOrientation[°]\"].iloc[0], str):\n window = Window(parent=tz)\n window.name = (\n \"window_\"\n + str(int(group[\"WindowOrientation[°]\"].iloc[0]))\n + \"_\"\n + str(group[\"WindowConstruction\"].iloc[0])\n )\n window.area = group[\"WindowArea[m²]\"].sum()\n window.tilt = window_tilt\n window.orientation = group[\"WindowOrientation[°]\"].iloc[0]\n # load wall properties from \"TypeBuildingElements.json\"\n window.load_type_element(\n year=bldg.year_of_construction,\n construction=group[\"WindowConstruction\"].iloc[0],\n )\n warn_constructiontype(window)\n else:\n warnings.warn(\n 'In zone \"%s\" the window orientation \"%s\" is neither '\n \"float nor int, \"\n \"hence this building element is not added. Here is the \"\n \"list of faulty entries:\\n%s\"\n \"\\nThese entries can easily be found checking the stated \"\n \"index in the produced ZonedInput.xlsx\"\n % (\n group[\"Zone\"].iloc[0],\n group[\"WindowOrientation[°]\"].iloc[0],\n group,\n )\n )\n\n grouped = zone.groupby([\"IsGroundFloor\", \"FloorConstruction\"])\n for name, group in grouped:\n if group[\"NetArea[m²]\"].sum() != 0: # to avoid devision by 0\n if group[\"IsGroundFloor\"].iloc[0] == 1:\n ground_floor = GroundFloor(parent=tz)\n ground_floor.name = \"ground_floor\" + str(\n group[\"FloorConstruction\"].iloc[0]\n )\n ground_floor.area = group[\"NetArea[m²]\"].sum()\n ground_floor.tilt = ground_floor_tilt\n ground_floor.orientation = ground_floor_orientation\n # load wall properties from \"TypeBuildingElements.json\"\n ground_floor.load_type_element(\n year=bldg.year_of_construction,\n construction=group[\"FloorConstruction\"].iloc[0],\n )\n warn_constructiontype(ground_floor)\n elif group[\"IsGroundFloor\"].iloc[0] == 0:\n floor = Floor(parent=tz)\n floor.name = \"floor\" + str(group[\"FloorConstruction\"].iloc[0])\n floor.area = group[\"NetArea[m²]\"].sum() / 2 # only half of\n # the floor belongs to this story\n floor.tilt = floor_tilt\n floor.orientation = floor_orientation\n # load wall properties from \"TypeBuildingElements.json\"\n floor.load_type_element(\n year=bldg.year_of_construction,\n construction=group[\"FloorConstruction\"].iloc[0],\n )\n warn_constructiontype(floor)\n else:\n warnings.warn(\n \"Values for IsGroundFloor have to be either 0 or 1, \"\n \"for no or yes respectively\"\n )\n else:\n warnings.warn(\n 'zone \"%s\" with IsGroundFloor \"%s\" and construction '\n 'type \"%s\" '\n \"has no floor nor groundfloor, since the area equals 0.\"\n % (\n group[\"Zone\"].iloc[0],\n group[\"IsGroundFloor\"].iloc[0],\n group[\"FloorConstruction\"].iloc[0],\n )\n )\n\n grouped = zone.groupby([\"IsRooftop\", \"CeilingConstruction\"])\n for name, group in grouped:\n if group[\"NetArea[m²]\"].sum() != 0: # to avoid devision by 0\n if group[\"IsRooftop\"].iloc[0] == 1:\n rooftop = Rooftop(parent=tz)\n rooftop.name = \"rooftop\" + str(group[\"CeilingConstruction\"].iloc[0])\n rooftop.area = group[\n \"NetArea[m²]\"\n ].sum() # sum up area of respective\n # rooftop parts\n rooftop.tilt = rooftop_tilt\n rooftop.orientation = rooftop_orientation\n # load wall properties from \"TypeBuildingElements.json\"\n rooftop.load_type_element(\n year=bldg.year_of_construction,\n construction=group[\"CeilingConstruction\"].iloc[0],\n )\n warn_constructiontype(rooftop)\n elif group[\"IsRooftop\"].iloc[0] == 0:\n ceiling = Ceiling(parent=tz)\n ceiling.name = \"ceiling\" + str(group[\"CeilingConstruction\"].iloc[0])\n ceiling.area = group[\"NetArea[m²]\"].sum() / 2 # only half\n # of the ceiling belongs to a story,\n # the other half to the above\n ceiling.tilt = ceiling_tilt\n ceiling.orientation = ceiling_orientation\n # load wall properties from \"TypeBuildingElements.json\"\n ceiling.load_type_element(\n year=bldg.year_of_construction,\n construction=group[\"CeilingConstruction\"].iloc[0],\n )\n warn_constructiontype(ceiling)\n else:\n warnings.warn(\n \"Values for IsRooftop have to be either 0 or 1, \"\n \"for no or yes respectively\"\n )\n else:\n warnings.warn(\n 'zone \"%s\" with IsRooftop \"%s\" and construction type '\n '\"%s\" '\n \"has no ceiling nor rooftop, since the area equals 0.\"\n % (\n group[\"Zone\"].iloc[0],\n group[\"IsRooftop\"].iloc[0],\n group[\"CeilingConstruction\"].iloc[0],\n )\n )\n\n grouped = zone.groupby([\"InnerWallConstruction\"])\n for name, group in grouped:\n if group[\"InnerWallArea[m²]\"].sum() != 0: # to avoid devision by 0\n in_wall = InnerWall(parent=tz)\n in_wall.name = \"inner_wall\" + str(\n group[\"InnerWallConstruction\"].iloc[0]\n )\n in_wall.area = group[\"InnerWallArea[m²]\"].sum() / 2 # only\n # half of the wall belongs to each room,\n # the other half to the adjacent\n # load wall properties from \"TypeBuildingElements.json\"\n in_wall.load_type_element(\n year=bldg.year_of_construction,\n construction=group[\"InnerWallConstruction\"].iloc[0],\n )\n warn_constructiontype(in_wall)\n else:\n warnings.warn(\n 'zone \"%s\" with inner wall construction \"%s\" has no '\n \"inner walls, since area = 0.\"\n % (group[\"Zone\"].iloc[0], group[\"InnerWallConstructio\" \"n\"].iloc[0])\n )\n\n # Block: AHU and infiltration #Attention hard coding\n # set the supply volume flow of the AHU per zone\n ahu_dict = {\n \"Bedroom\": [15.778, 15.778],\n \"Corridorsinthegeneralcarearea\": [5.2941, 5.2941],\n \"Examinationortreatmentroom\": [15.743, 15.743],\n \"MeetingConferenceseminar\": [16.036, 16.036],\n \"Stocktechnicalequipmentarchives\": [20.484, 20.484],\n \"WCandsanitaryroomsinnonresidentialbuildings\": [27.692, 27.692],\n }\n _i = 0\n for key in ahu_dict:\n if tz.name == key:\n tz.use_conditions.min_ahu = ahu_dict[key][0]\n tz.use_conditions.max_ahu = ahu_dict[key][1]\n _i = 1\n if _i == 0:\n warnings.warn(\n \"The zone %s could not be found in your ahu_dict. Hence, \"\n \"no AHU flow is defined. The default value is \"\n \"0 (min_ahu = 0; max_ahu=0\" % tz.name\n )\n\n return project, data",
"def import_heat_data(self):\n worksheet = (\n xlrd.open_workbook(filename=self.filename_heat).sheet_by_index(0)\n ) \n self.exh.corrected_reading = np.array(worksheet.col_values(0,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.exh.datum = worksheet.cell_value(2,4) # manometer datum (in) \n self.exh.pressure_drop = ( (self.exh.corrected_reading -\n self.exh.datum) * 2. * self.H2O_kPa ) \n # pressure drop across heat exchanger (kPa)\n self.cummins.torque = np.array(worksheet.col_values(1,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx))\n self.exh.T_inlet_array = np.array(worksheet.col_values(2,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.exh.T_outlet_array = np.array(worksheet.col_values(3,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.cool.T_inlet_array = np.array(worksheet.col_values(5,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx)) \n self.cool.T_outlet_array = np.array(worksheet.col_values(4,\n start_rowx=self.start_rowx, end_rowx=self.end_rowx))",
"def Excel_Load_Data( self, ExcelFilename ):\n pass",
"def __init__(self):\n self.project_dir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n self.excel_file = os.path.join(self.project_dir, \"data\", \"Literature_Data.xlsx\")\n self.spreadsheet_name = \"Individualized Data\"\n self.filled_output_file = os.path.join(self.project_dir, \"data\", \"filled_data.csv\")\n self.output_file = os.path.join(self.project_dir, \"data\", \"final.csv\")\n self.use_fake_data = False # For testing\n # This instance value \"self.df\" is the pandas DataFrame that contains all of the data\n # from the literature case studies. Manipulating this field is the purpose of this class.\n \n self.num_negative = 500\n self.df = None",
"def load_from_excel(self, excel_fp: str):\n # TODO:\n pass",
"def __init__(self, filepath):\n self._column_names = []\n self._row_list = []\n self._workbook = None\n\n try:\n self._workbook = xlrd.open_workbook(filepath)\n except:\n raise Exception('Nie można otworzyć pliku lub nieprawidłowy rodzaj pliku.')",
"def from_spreadsheet(spreadsheet, framework):\n\n # Basically the strategy is going to be\n # 1. Read in all of the stuff - pops, transfers, interpops can be directly added to Data\n # 2. Read in all the other TDVE content, and then store it in the data specs according to the variable type defined in the Framework\n # e.g. the fact that 'Alive' is a Characteristic is stored in the Framework and Data but not in the Databook. So for example, we read in\n # a TDVE table called 'Alive', but it needs to be stored in data.specs['charac']['ch_alive'] and the 'charac' and 'ch_alive' are only available in the Framework\n\n import openpyxl\n\n self = ProjectData(framework=framework)\n\n if not isinstance(spreadsheet, sc.Spreadsheet):\n spreadsheet = sc.Spreadsheet(spreadsheet)\n\n workbook = openpyxl.load_workbook(spreadsheet.tofile(), read_only=True, data_only=True) # Load in read-only mode for performance, since we don't parse comments etc.\n validate_category(workbook, \"atomica:databook\")\n\n # These sheets are optional - if none of these are provided in the databook\n # then they will remain empty\n self.transfers = list()\n self.interpops = list()\n\n for sheet in workbook.worksheets:\n\n if sheet.title.startswith(\"#ignore\"):\n continue\n\n if sheet.title == \"Population Definitions\":\n try:\n self._read_pops(sheet)\n except Exception as e:\n message = 'An error was detected on the \"Population Definitions\" sheet'\n raise Exception(\"%s -> %s\" % (message, e)) from e\n elif sheet.title == \"Transfers\":\n try:\n self._read_transfers(sheet)\n except Exception as e:\n message = 'An error was detected on the \"Transfers\" sheet'\n raise Exception(\"%s -> %s\" % (message, e)) from e\n elif sheet.title == \"Interactions\":\n try:\n self._read_interpops(sheet)\n except Exception as e:\n message = 'An error was detected on the \"Interactions\" sheet'\n raise Exception(\"%s -> %s\" % (message, e)) from e\n elif sheet.title == \"Metadata\":\n continue\n else:\n self.tdve_pages[sheet.title] = []\n tables, start_rows = read_tables(sheet)\n for table, start_row in zip(tables, start_rows):\n\n try:\n tdve = TimeDependentValuesEntry.from_rows(table)\n except Exception as e:\n message = 'Error on sheet \"%s\" while trying to read a TDVE table starting on row %d' % (sheet.title, start_row)\n raise Exception(\"%s -> %s\" % (message, e)) from e\n\n # If the TDVE is not in the Framework, that's a critical stop error, because the framework needs to at least declare\n # what kind of variable this is - otherwise, we don't know the allowed units and cannot write the databook back properly\n try:\n spec, item_type = framework.get_variable(tdve.name)\n except NotFoundError:\n message = 'Error on sheet \"%s\" while reading TDVE table \"%s\" (row %d). The variable was not found in the Framework' % (sheet.title, tdve.name, start_row)\n raise Exception(message)\n\n code_name = spec.name\n tdve.allowed_units = [framework.get_databook_units(code_name)]\n tdve.pop_type = spec[\"population type\"]\n\n # Migrate the units (20181114)\n # All TimeSeries instances in databook TDVE tables should have the same units as the allowed units\n # However, if the user entered something that is wrong, we need to keep it and alert them during validation\n # Therefore, we can migrate as long as the _old_ units made sense\n for ts in tdve.ts.values():\n if ts.units != tdve.allowed_units[0]:\n if not ts.units or ts.units.strip().lower() == tdve.allowed_units[0].strip().split()[0].strip().lower():\n ts.units = tdve.allowed_units[0]\n\n if not spec[\"databook page\"]:\n logger.warning('A TDVE table for \"%s\" (%s) was read in and will be used, but the Framework did not mark this quantity as appearing in the databook', tdve.name, code_name)\n tdve.comment = spec[\"guidance\"]\n\n if code_name in self.tdve:\n raise Exception('A TDVE table for \"%s\" (%s) appears more than once in the databook. The first table was on sheet \"%s\" and the first duplicate table is on sheet \"%s\" starting on row %d' % (tdve.name, code_name, [k for k, v in self.tdve_pages.items() if code_name in v][0], sheet.title, start_row))\n\n self.tdve[code_name] = tdve\n # Store the TDVE on the page it was actually on, rather than the one in the framework. Then, if users move anything around, the change will persist\n self.tdve_pages[sheet.title].append(code_name)\n\n tvals = set()\n for tdve in self.tdve.values():\n tvals.update(tdve.tvec)\n for tdc in self.transfers + self.interpops:\n tvals.update(tdc.tvec)\n self.tvec = np.array(sorted(tvals))\n\n return self",
"def __init__(self, filename=None):\n self.name = filename\n self.wb = None\n if os.path.exists(filename):\n try:\n self.wb = xlrd.open_workbook(filename)\n except:\n print(\"not an excel file\")\n else:\n self.set_amiSheetNames()\n self.filename = os.path.splitext(os.path.abspath(filename))[0]\n else:\n print(\"not a file\")",
"def test_parse_sample_sheet(self):\n pass",
"def read_xlsx(self, filename):\n xlsx = pd.ExcelFile(filename)\n for sheet in xlsx.sheet_names:\n table_index_header = cfg.get_list(\"table_index_header\", sheet)\n self.input_data[sheet] = xlsx.parse(\n sheet,\n index_col=list(range(int(table_index_header[0]))),\n header=list(range(int(table_index_header[1]))),\n squeeze=(\"series\" not in sheet),\n )\n self.check_input_data(warning=False)\n self.add_meta_data()\n return self",
"def read_excel_file(self):\n self.df = pd.read_excel(str(self.file_path))\n self.data_mat=np.array(self.df).astype(float).transpose()",
"def load(cls):\n \n # Loop through problems and build patient problem lists:\n probs = csv.reader(file(PROBLEMS_FILE,'U'),dialect='excel-tab')\n header = probs.next() \n for prob in probs:\n cls(dict(zip(header,prob))) # Create a problem instance ",
"def __init__(self, file_path):\n self.file_path = file_path\n self.current_row = 0\n self.workbook = \"\"\n self.sheet = \"\"\n self.load_workbook()",
"def read_excel_file(self):\n self.df = pd.read_excel(str(self.file_path))\n self.data_mat=np.array(self.df).astype(float)",
"def nodes_data_excel_parser(excel_path,**kwargs):\n excel_parser_engine = kwargs.get(\"engine\",\"xlrd\")\n\n # Check if excel file exists\n if not excel_path or not os.path.isfile(excel_path):\n raise FileNotFoundError(\n \"Excel data file {} not found.\".format(excel_path)\n )\n\n xls = pd.ExcelFile(excel_path,engine=excel_parser_engine)\n\n try:\n # TODO for sheet in xls.sheet_names:\n # nodes_data[sheet] = xls.parse(sheet)\n nodes_data = {\n \"buses\": xls.parse(\"buses\").replace({np.nan:None}),\n \"commodity_sources\": xls.parse(\"commodity_sources\").replace({np.nan:None}),\n \"transformers\": xls.parse(\"transformers\").replace({np.nan:None}),\n \"transformers_chp\": xls.parse(\"transformers_chp\").replace({np.nan:None}),\n \"renewables\": xls.parse(\"renewables\").replace({np.nan:None}),\n \"demand\": xls.parse(\"demand\").replace({np.nan:None}),\n \"storages\": xls.parse(\"storages\").replace({np.nan:None}),\n \"powerlines\": xls.parse(\"powerlines\").replace({np.nan:None}),\n \"timeseries\": xls.parse(\"time_series\").replace({np.nan:None}),\n \"financial\":xls.parse(\"financial\").replace({np.nan:None})\n }\n except KeyError:\n err_msg = \"Excel file must contains: [buses, commodity_sources, transformers, renewables, demand, storages, powerlines, financial and timeseries].\\n\\\n The following sheets are found: {}\".format(xls.sheet_names)\n raise Exception(err_msg)\n\n # set datetime index\n nodes_data[\"timeseries\"].set_index(\"timestamp\", inplace=True)\n nodes_data[\"timeseries\"].index = pd.to_datetime(\n nodes_data[\"timeseries\"].index\n )\n\n logger.info(\"Data from Excel file {} imported in as nodes data.\".format(excel_path))\n\n return nodes_data",
"def import_excel(self):\n self.ensure_one()\n if self.file_import:\n filecontent = base64.b64decode(self.file_import)\n try:\n # Todo: import excel\n input = cStringIO.StringIO()\n input.write(filecontent)\n wb = open_workbook(file_contents=input.getvalue())\n problem_emails = {\"inserted_names\": [],\n \"inserted_emails\": [],\n \"invalid_emails\": [],\n \"duplicate_names\": [],\n \"duplicate_emails\": []}\n for sheet in wb.sheets():\n try:\n self.insert_db(sheet, wb, problem_emails)\n except Exception as e:\n raise (str(e))\n\n except:\n # todo: import csv\n wb = filecontent.split('\\r\\n')\n for line in range(1, len(wb) - 1):\n line_data = wb[line].split(',')\n self.crete_line(line_data[0], line_data[1])\n\n if problem_emails['invalid_emails']:\n raise except_orm(_('Invalid Email Format Found!'),\n _( '\\n'.join(map(str, list(item for item in problem_emails['invalid_emails']))) + '\\n\\n Please check and try again.'))\n if problem_emails['duplicate_names']:\n raise except_orm(_('Duplicate Name Found!'),\n _( '\\n'.join(map(str, list(item for item in problem_emails['duplicate_names']))) + '\\n\\n Please check and try again.'))\n if problem_emails['duplicate_emails']:\n raise except_orm(_('Duplicate Email Found!'),\n _( '\\n'.join(map(str, list(item for item in problem_emails['duplicate_emails']))) + '\\n\\n Please check and try again.'))\n\n return {\n 'view_type': 'form',\n 'view_mode': 'form',\n 'res_model': 'shipmaster.invitation',\n 'res_id': self.id,\n 'view_id': False,\n 'type': 'ir.actions.act_window',\n 'target': 'new',\n }",
"def main(xls, seqtype):\n data_extraction = {}\n # 1 - Load data\n logger.info(f'Load {xls}')\n manifest, metadata = load_xls(xls)\n # 2 - Check file and data\n logger.info(f'Start to validate XLS')\n check_samples(manifest, metadata)\n check_seqtype(manifest, seqtype)\n check_metadata(metadata, seqtype)\n logger.success(f'Successfully validate XLS')\n # 3 - Export XLS to TSV for Qiime2\n logger.info(f'Start to export XLS to TSV')\n data_extraction = extract_manifest(manifest, seqtype, data_extraction)\n data_extraction, metadata_vars = extract_metadata(metadata, seqtype, data_extraction)\n export_to_tsv_for_qiime(data_extraction, metadata_vars, seqtype)\n logger.success(f'Done')",
"def test_from_file_xls(self):\n with TemporaryDirectory() as tmp:\n fp, df_test = save_simple_dataframe(tmp, 'test.xls')\n df_read = BaseDataClass.from_file(fp).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )",
"def driver():\n\n directory = r\"C:/Users/Aftab Alam/Documents/GitHub\"\n directory = directory + r\"/SRM-placement-analyser/data/\"\n fileList = [directory+\"InfosysResult.xlsx\",directory+\"TCSResult.xlsx\",directory+\"CognizantResult.xlsx\",directory+\"WiproResult.xlsx\"]\n \n listOfPlaced = extractCommonData.extractCommonData(fileList)\n createNewExcelSheet(directory,listOfPlaced)",
"def __init__(self):\n self.rulesDF = pd.read_csv(\n 'https://docs.google.com/spreadsheets/d/1gKBEgk9HPkg-ZXrHzQtXHubBNW9g_0JdAYQVhoBpxvQ/export?format=csv&gid=1639723702')\n self.prospectsDF = pd.read_csv(\n 'https://docs.google.com/spreadsheets/d/1gKBEgk9HPkg-ZXrHzQtXHubBNW9g_0JdAYQVhoBpxvQ/export?format=csv&gid=0')",
"def import_excel(self, filepath_excel,database_type):\n if database_type == \"render\":\n try:\n connection = sqlite3.connect(self.filepath_render_database)\n pointer = connection.cursor()\n\n sql_anweisung = \"\"\"\n INSERT INTO render_information (\n object_type,\n name,\n radius,\n polar_angle_min,\n polar_anglel_max,\n polar_angle_segments,\n polar_angle_random_rad,\n azimuth_angle_min,\n azimuth_angle_max,\n azimuth_angle_segments,\n azimuth_angle_random_rad,\n tracking_obj,\n segmentation\n )\n VALUES (\n :object_type,\n :name,\n :radius,\n :polar_angle_min,\n :polar_anglel_max,\n :polar_angle_segments,\n :polar_angle_random_rad,\n :azimuth_angle_min,\n :azimuth_angle_max,\n :azimuth_angle_segments,\n :azimuth_angle_random_rad,\n :tracking_obj,\n :segmentation\n )\n \"\"\"\n with open(filepath_excel) as csvdatei:\n csv_reader_object = csv.reader(csvdatei, delimiter=';')\n next(csv_reader_object)\n pointer.executemany(sql_anweisung, csv_reader_object)\n connection.commit()\n connection.close()\n print(\"render data addet from excel file\")\n except :\n print(\"adding render data from excel file failed\")\n\n elif database_type == \"object\":\n try:\n connection = sqlite3.connect(self.filepath_object_database)\n pointer = connection.cursor()\n\n sql_anweisung = \"\"\"\n INSERT INTO object_information (\n obj_filepath,\n obj_name,\n obj_type,\n obj_scale_factor,\n obj_type,\n obj_location_x,\n obj_location_y,\n obj_location_z,\n obj_rotation_x,\n obj_rotation_y,\n obj_rotation_z,\n obj_amount_percent,\n obj_material_path,\n obj_point_in_time,\n maximum_random_rotation_degree_z,\n maximum_random_translation,\n random_amount\n )\n VALUES (\n :obj_filepath,\n :obj_name,\n :obj_type,\n :obj_scale_factor,\n :obj_type,\n :obj_location_x,\n :obj_location_y,\n :obj_location_z,\n :obj_rotation_x,\n :obj_rotation_y,\n :obj_rotation_z,\n :obj_amount_percent,\n :obj_material_path,\n :obj_point_in_time,\n :maximum_random_rotation_degree_z,\n :maximum_random_translation,\n :random_amount\n )\n \"\"\"\n with open(filepath_excel) as csvdatei:\n csv_reader_object = csv.reader(csvdatei, delimiter=';')\n print(csv_reader_object)\n next(csv_reader_object)\n pointer.executemany(sql_anweisung, csv_reader_object)\n connection.commit()\n connection.close()\n print(\"object data added from excel file\")\n except :\n print(\"adding object data from excel file failed\")\n\n else:\n print(\"no Database found, maybe check spelling in method call??\")\n return",
"def __init__(self):\n super(FlowData, self).__init__()\n self.filename_flow = 'trash can flow meter.xls'\n self.start_rowx = 2\n self.end_rowx = 17\n self.poly_order = 2\n self.trash_volume = 77.6e-3 # trash can volume (m^3)\n self.P = 100.",
"def get_xls(xls_name, sheet_name):\n cls = []\n # get xls file's path\n xlsPath = os.path.join(proDir, \"testFile\", 'case', xls_name)\n # open xls file\n file = open_workbook(xlsPath)\n # get sheet by name\n sheet = file.sheet_by_name(sheet_name)\n # get one sheet's rows\n nrows = sheet.nrows\n for i in range(nrows):\n if sheet.row_values(i)[0] != u'case_name':\n cls.append(sheet.row_values(i))\n return cls",
"def test_from_file_xlsx(self):\n with TemporaryDirectory() as tmp:\n fp, df_test = save_simple_dataframe(tmp, 'test.xlsx')\n df_read = BaseDataClass.from_file(fp).df\n self.assertEqual(\n pd.testing.assert_frame_equal(df_test, df_read),\n None,\n )",
"def __init__(\n self,\n source,\n name='Survey',\n coordinate_system=CoordinateSystem.CARTESIAN):\n\n # Validate if extension is supported.\n (_, extension) = os.path.splitext(source)\n accepted_extensions = set(['.gpx','.csv', '.txt'])\n extension = extension.lower()\n if extension not in accepted_extensions:\n raise TypeError(\n \"The file extension provided is not currently supported.\")\n\n # initialize class\n self.name = name\n self.source = source\n self.coordinate_system = coordinate_system\n\n if extension == '.gpx':\n self.data = self._read_gpx()\n elif (extension == '.csv') or (extension == '.txt'):\n expected_col_names = {\n CoordinateSystem.GEOGRAPHIC: ['latitude',\n 'longitude',\n 'elevation',],\n CoordinateSystem.UTM: ['northing',\n 'easting',\n 'elevation',],\n CoordinateSystem.CARTESIAN: ['x',\n 'y',\n 'z',]\n }\n self.data = self._read_txt(\n expected_col_names[self.coordinate_system])\n else:\n raise ValueError(\"Error while parsing supported file extension.\")",
"def main():\n\n era = dt.datetime.now()\n\n parser = xlslisp_compile_argdoc()\n args = parser.parse_args()\n\n space = os.path.splitext(args.file)[0]\n\n # Import the Values of Sheets of one Xlsx File\n\n sheet_by_name = openpyxl.load_workbook(args.file, data_only=True)\n sheet_by_name_keys_list = sheet_by_name.sheetnames\n\n stderr_print(\n \"xlslisp: reading {} sheets from: {}\".format(\n len(sheet_by_name_keys_list), args.file\n )\n )\n\n # Option to quit early\n\n if not args.force:\n stderr_print(\n \"xlslisp.py: Xlsx imported, run again with --force to replace Csv's\"\n )\n\n sys.exit(1)\n\n # Visit each Sheet\n\n for (index, sheetname) in enumerate(sheet_by_name_keys_list):\n sheet = sheet_by_name[sheetname]\n\n csv_name = \"{space}-{dashed_sheet}.csv\".format(\n space=space, dashed_sheet=sheetname.replace(\" \", \"-\")\n ).lower()\n\n # Collect Rows of String Values\n\n csv_ragged_rows = list()\n for row_index in range(sheet.max_row):\n row_mark = 1 + row_index\n\n csv_cells = list()\n\n for col_index in range(sheet.max_column):\n cell = sheet.cell(1 + row_index, 1 + col_index)\n col_mark = cell.column_letter\n assert col_mark == excel_az_mark(col_index)\n\n if False:\n if (col_mark, row_mark) == (\"C\", 89):\n pdb.set_trace()\n\n csv_cells.append(cell.value)\n\n # Warn of trailing spaces\n\n if str(csv_cells[-1]).endswith(\" \"):\n stderr_print(\n \"xlslisp: Warning: \"\n \"could rstrip cell at: {!r}!{}{} {}\".format(\n sheetname, col_mark, row_mark, csv_cells[-1]\n )\n )\n\n csv_ragged_rows.append(csv_cells)\n\n # Format as rectangular Csv to please GitHub\n #\n # per GitHub > Rendering CSV and TSV data\n # flagging ragged as \"we can make this file beautiful and searchable\"\n #\n\n csv_rows = rows_complete(csv_ragged_rows, cell=None)\n\n charstream = io.StringIO()\n csv_writer = csv.writer(charstream)\n for csv_cells in csv_rows:\n csv_writer.writerow(csv_cells)\n\n charstream.seek(0)\n csv_chars = charstream.read()\n\n # Write the lines with local \"os.linesep\" line-ending's\n # specifically Not the mix of \"\\r\\n\" and \"\\n\" from multi-line Excel cells\n # but without rstrip'ping the lines # TODO: poor choice to skip rstrip?\n\n csv_lines = csv_chars.splitlines()\n csv_joined = \"\\n\".join(csv_lines) + \"\\n\"\n\n stderr_print(\n \"xlslisp: writing {} chars of {} rows to: {}\".format(\n len(csv_joined), sheet.max_row, csv_name\n )\n )\n\n with open(csv_name, \"w\") as csv_writing:\n csv_writing.write(csv_joined)\n\n now = dt.datetime.now()\n stderr_print(\"xlslisp: elapsed time of\", (now - era), \"since\", era)\n\n sys.exit(0)",
"def read_xls_csv(self):\n filename = str(self.filename)\n location_stock_id = self.location\n vals = []\n inventory_create = self.env['stock.inventory']\n\n if (filename.endswith('xls') or filename.endswith('xlsx')):\n wb = xlrd.open_workbook(\n file_contents=base64.decodestring(self.xls_file))\n sheet = wb.sheet_by_index(0)\n\n for i in range(1, sheet.nrows):\n row = sheet.row_values(i)\n firstrow = sheet.row_values(0)\n firstrow = [str(item).lower() for item in firstrow]\n pid = row[firstrow.index('id')]\n quantity = row[firstrow.index('quantity')]\n product_obj = self.env['product.product'].search(\n [('id', '=', pid)])\n vals.append({\n 'product_code': product_obj.default_code,\n 'product_qty': quantity,\n 'location_id': location_stock_id.id,\n 'product_id': product_obj.id\n })\n inv = inventory_create.create({'name': self.inventory_name,\n 'location_id': location_stock_id.id,\n 'filter': 'partial'})\n stock_inventory_line = self.env['stock.inventory.line']\n # inv.prepare_inventory()\n for record in vals:\n record.update({'inventory_id': inv.id})\n stock_inventory_line.create(record)\n inv.action_done()\n\n else:\n xls_file = base64.b64decode(self.xls_file)\n file_input = cStringIO.StringIO(xls_file)\n file_input.seek(0)\n rows = []\n delimeter = ','\n reader = csv.reader(file_input, delimiter=delimeter,\n lineterminator='\\r\\n')\n for row in reader:\n rows.append(row)\n for row in rows[1:]:\n rows[0] = [str(item).lower() for item in rows[0]]\n product_obj = self.env['product.product'].search(\n [('id', '=', row[rows[0].index('id')])])\n vals.append({\n 'product_code': row[rows[0].index('id')],\n 'product_qty': row[rows[0].index('quantity')],\n 'location_id': location_stock_id.id,\n 'product_id': product_obj.id\n })\n inv = inventory_create.create({'name': self.inventory_name,\n 'location_id': location_stock_id.id,\n 'filter': 'partial'})\n stock_inventory_line = self.env['stock.inventory.line']\n # inv.prepare_inventory()\n for record in vals:\n record.update({'inventory_id': inv.id})\n stock_inventory_line.create(record)\n inv.action_done()\n return {\n 'name': 'Stock import',\n 'type': 'ir.actions.act_window',\n 'view_type': 'form',\n 'res_id': self.id,\n 'view_mode': 'tree,form',\n 'res_model': 'stock.inventory',\n 'target': 'current',\n }"
] | [
"0.7165949",
"0.6775792",
"0.6591296",
"0.65136635",
"0.6475",
"0.6339719",
"0.63090634",
"0.63020563",
"0.627618",
"0.6165925",
"0.61505485",
"0.60875565",
"0.60049736",
"0.5965323",
"0.59106916",
"0.58891374",
"0.58874655",
"0.587729",
"0.58750814",
"0.58100104",
"0.57986385",
"0.57843095",
"0.56940746",
"0.56548893",
"0.5624102",
"0.5599419",
"0.5574373",
"0.55706656",
"0.55636764",
"0.5558354"
] | 0.74365103 | 0 |
Like tf.train.range_input_producer, but randomizes every index instead of taking a range and shuffling every epoch (random reordering). | def random_index_input_producer(limit, num_epochs=None, seed=None,
capacity=32, shared_name=None, name=None):
with ops.op_scope([limit], name, "input_producer") as name:
index_tensor = tf.random_uniform([limit], minval=0, maxval=limit, dtype=tf.int64, seed=seed)
return tf.train.input_producer(
index_tensor, [], num_epochs, False, None, capacity,
shared_name, name, "fraction_of_%d_full" % capacity) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def random_slice_input_producer(tensor_list, num_epochs=None, seed=None,\n capacity=32, shared_name=None, name=None):\n with ops.op_scope(tensor_list, name, \"input_producer\"):\n tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)\n if not tensor_list:\n raise ValueError(\n \"Expected at least one tensor in slice_input_producer().\")\n range_size = tf.to_int64(array_ops.shape(tensor_list[0])[0])\n queue = random_index_input_producer(\n range_size, num_epochs=num_epochs,\n seed=seed, capacity=capacity,\n shared_name=shared_name)\n index = queue.dequeue()\n output = [array_ops.gather(t, index) for t in tensor_list]\n return output",
"def _producer(raw_data, batch_size, num_steps, name=None):\n with tf.name_scope(name, \"Producer\", [raw_data, batch_size, num_steps]):\n raw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int32)\n\n data_len = tf.size(raw_data)\n batch_len = data_len // batch_size\n data = tf.reshape(raw_data[0 : batch_size * batch_len],\n [batch_size, batch_len])\n\n epoch_size = (batch_len - 1) // num_steps\n assertion = tf.assert_positive(\n epoch_size,\n message=\"epoch_size == 0, decrease batch_size or num_steps\")\n with tf.control_dependencies([assertion]):\n epoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = tf.slice(data, [0, i * num_steps], [batch_size, num_steps])\n y = tf.slice(data, [0, i * num_steps + 1], [batch_size, num_steps])\n return x, y",
"def ptb_producer(raw_data, unigrams, batch_size, num_steps, num_true, num_sampled, vocab_size, name=None):\n with tf.name_scope(name, \"PTBProducer\", [raw_data, batch_size, num_steps]):\n raw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int64)\n\n data_len = tf.size(raw_data)\n batch_len = data_len // batch_size\n data = tf.reshape(raw_data[0 : batch_size * batch_len],\n [batch_size, batch_len])\n\n epoch_size = (batch_len - 1) // num_steps\n assertion = tf.assert_positive(\n epoch_size,\n message=\"epoch_size == 0, decrease batch_size or num_steps\")\n with tf.control_dependencies([assertion]):\n epoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = tf.strided_slice(data, [0, i * num_steps],\n [batch_size, (i + 1) * num_steps])\n x.set_shape([batch_size, num_steps])\n y = tf.strided_slice(data, [0, i * num_steps + 1],\n [batch_size, (i + 1) * num_steps + 1])\n y.set_shape([batch_size, num_steps])\n ns = None\n\n psw_list = get_split_weights_for_perp(tf.reshape(y, [-1]), unigrams, vocab_size, batch_size*num_steps)\n \n for i in range(len(psw_list)):\n psw_list[i] = tf.reshape(psw_list[i], [batch_size, num_steps])\n\n '''\n if num_sampled > 0:\n y_list = tf.unpack(y, axis=1)\n ns_list = []\n for i in range(num_steps):\n ns = get_neg_samples(batch_size, num_true, num_sampled, vocab_size, y_list[i], unigrams)\n ns_list.append(ns)\n else:\n ns = None\n '''\n\n return x, y, ns, psw_list",
"def sample_data_input_fn(params):\n window_size = params['window_size']\n batch_size = params['batch_size']\n\n dataset_names = sample_data.get_data_names()\n all_downsampled = [sample_data.get_downsampled_data(name) for name in dataset_names]\n np_dtype = all_downsampled[0].dtype\n _, num_columns = all_downsampled[0].shape\n assert num_columns == 3\n\n # For each data item, this computes\n time_diffs = [(x[1:, 0] - x[:-1, 0]) for x in all_downsampled]\n median_time_diff = np.median(np.concatenate(time_diffs, axis=0))\n lower, upper = median_time_diff * 0.8, median_time_diff * 1.2\n valid_start_window_indices = [\n get_window_valid_indices(d, lower, upper, window_size) for d in time_diffs\n ]\n for name, valid_indices in zip(dataset_names, valid_start_window_indices):\n if np.size(valid_indices) == 0:\n raise ValueError(\"{} has no valid window ranges\".format(name))\n\n def get_samples_py_op(idx_array):\n assert isinstance(idx_array, np.ndarray)\n assert idx_array.shape == (batch_size, )\n samp_results = np.zeros((batch_size, window_size, num_columns), dtype=np_dtype)\n for i, sample_idx in enumerate(idx_array):\n start_idx = random.choice(valid_start_window_indices[sample_idx])\n samp_results[i, :, :] = all_downsampled[sample_idx][start_idx: (\n start_idx + window_size)]\n assert samp_results.shape == (batch_size, window_size, num_columns)\n return samp_results\n\n def get_window_sample(idx_tensor):\n samples = tf.py_func(get_samples_py_op, [idx_tensor], np_dtype)\n samples.set_shape((batch_size, window_size, num_columns))\n return samples\n\n def random_negative_py_op(idx_array):\n assert isinstance(idx_array, np.ndarray)\n neg_idx_array = np.copy(idx_array)\n for i, idx in enumerate(idx_array):\n while neg_idx_array[i] == idx_array[i]:\n neg_idx_array[i] = random.randint(0, len(all_downsampled) - 1)\n return neg_idx_array\n\n def get_negative_window_sample(idx_tensor):\n neg_idx_tensor = tf.py_func(\n random_negative_py_op,\n [idx_tensor],\n idx_tensor.dtype)\n return get_window_sample(neg_idx_tensor)\n\n # Current sample method: First select sample index, then select window.\n num_samples = len(all_downsampled)\n if num_samples < 2:\n raise ValueError(\"Need at least 2 light curves for negative samples!\")\n dataset = tf.data.Dataset.range(num_samples)\n dataset = dataset.repeat().shuffle(num_samples * 2).batch(batch_size)\n\n positive = dataset.map(lambda idx_tensor: {\n 'left': get_window_sample(idx_tensor),\n 'right': get_window_sample(idx_tensor),\n 'goal': tf.constant([1.0] * batch_size, dtype=tf.float64)\n })\n negative = dataset.map(lambda idx_tensor: {\n 'left': get_window_sample(idx_tensor),\n 'right': get_negative_window_sample(idx_tensor),\n 'goal': tf.constant([0.0] * batch_size, dtype=tf.float64)\n })\n\n # TODO(gatoatigrado): Experiment with shuffling positive & negative within a batch.\n # Currently each batch is just positive or negative.\n assert positive.output_shapes == negative.output_shapes\n assert negative.output_types == positive.output_types\n dataset = tf.contrib.data.sample_from_datasets((positive, negative))\n assert dataset.output_shapes == negative.output_shapes\n return dataset",
"def scheduled_sample(ground_truth_x, generated_x, batch_size, num_ground_truth):\n idx = tf.random_shuffle(tf.range(int(batch_size)))\n ground_truth_idx = tf.gather(idx, tf.range(num_ground_truth))\n generated_idx = tf.gather(idx, tf.range(num_ground_truth, int(batch_size)))\n\n ground_truth_examps = tf.gather(ground_truth_x, ground_truth_idx)\n generated_examps = tf.gather(generated_x, generated_idx)\n return tf.dynamic_stitch([ground_truth_idx, generated_idx],\n [ground_truth_examps, generated_examps])",
"def test_get_id_range_for_partition_with_sparse_range():\n min_id = 4\n max_id = 5999\n partition_size = 2000\n id_range_item_count = max_id - min_id + 1 # this many individual IDs should be processed for continuous ID range\n record_ids = {4, 5, 7, 99, 101, 120, 1998, 1999, 2000, 2001, 2002, 4444, 5999}\n etl_config = {\"partition_size\": partition_size}\n ctrl = PostgresElasticsearchIndexerController(etl_config)\n ctrl.min_id = min_id\n ctrl.max_id = max_id\n ctrl.record_count = len(record_ids)\n ctrl.config[\"partitions\"] = ctrl.determine_partitions()\n assert ctrl.config[\"partitions\"] == ceil(id_range_item_count / partition_size)\n partition_range = range(0, ctrl.config[\"partitions\"])\n # First batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[0])\n assert lower_bound == min_id\n assert upper_bound == lower_bound + (partition_size - 1)\n # Second batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[1])\n assert lower_bound == min_id + partition_size\n assert upper_bound == lower_bound + (partition_size - 1)\n # Last batch should go all the way up to max_id\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[-1])\n assert lower_bound == (min_id + (partition_size * partition_range[-1]))\n assert upper_bound == max_id\n assert _remove_seen_ids(ctrl, record_ids) == set({})",
"def batchify(t, n, randomize=True):\n\n inds = np.arange(t)\n if randomize:\n np.random.shuffle(inds)\n\n while len(inds) > 0:\n\n yield inds[:n]\n inds = np.delete(inds, slice(n))",
"def minibatch(x_train, y_train, batch_size, train_epochs):\n epoch = 0\n start = 0\n key = random.PRNGKey(0)\n\n while epoch < train_epochs:\n end = start + batch_size\n\n if end > x_train.shape[0]:\n key, split = random.split(key)\n permutation = random.permutation(split,\n np.arange(x_train.shape[0], dtype=np.int64))\n x_train = x_train[permutation]\n y_train = y_train[permutation]\n epoch += 1\n start = 0\n continue\n\n yield x_train[start:end], y_train[start:end]\n start = start + batch_size",
"def sampler(self) -> Generator[int, None, None]:\n remaining = self.num_samples\n if self.shuffled:\n while remaining > 0:\n n = min(remaining, len(self.data_source))\n for idx in torch.randperm(len(self.data_source))[0:n]:\n yield int(idx)\n remaining -= n\n else:\n current_idx = None\n while remaining > 0:\n if current_idx is None or current_idx >= len(self.data_source):\n current_idx = 0\n yield current_idx\n current_idx += 1\n remaining -= 1",
"def sample(self, batch_size):\n indices = np.random.randint(len(self._storage), size=batch_size)\n return [self._storage[i] for i in indices]",
"def test_get_id_range_for_partition_with_one_over():\n min_id = 1\n max_id = 101\n partition_size = 20\n id_range_item_count = max_id - min_id + 1 # this many individual IDs should be processed for continuous ID range\n assert id_range_item_count % partition_size == 1 # one over the partition size\n etl_config = {\"partition_size\": partition_size}\n ctrl = PostgresElasticsearchIndexerController(etl_config)\n ctrl.min_id = min_id\n ctrl.max_id = max_id\n ctrl.record_count = id_range_item_count # assume records exist for each ID in range\n ctrl.config[\"partitions\"] = ctrl.determine_partitions()\n assert ctrl.config[\"partitions\"] == ceil(id_range_item_count / partition_size)\n partition_range = range(0, ctrl.config[\"partitions\"])\n # First batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[0])\n assert lower_bound == min_id\n assert upper_bound == lower_bound + (partition_size - 1)\n # Second batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[1])\n assert lower_bound == min_id + partition_size\n assert upper_bound == lower_bound + (partition_size - 1)\n # Last batch should go all the way up to max_id\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[-1])\n assert lower_bound == (min_id + (partition_size * partition_range[-1])) == 101\n assert upper_bound == max_id == 101\n id_set = set(range(min_id, max_id + 1))\n assert _remove_seen_ids(ctrl, id_set) == set({})",
"def ptb_producer(raw_data, batch_size, num_steps, name=None):\n with tf.name_scope(name, \"PTBProducer\", [raw_data, batch_size, num_steps]):\n raw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int32)\n\n data_len = tf.size(raw_data)\n batch_len = data_len // batch_size\n data = tf.reshape(raw_data[0 : batch_size * batch_len],\n [batch_size, batch_len])\n\n epoch_size = (batch_len - 1) // num_steps\n assertion = tf.assert_positive(\n epoch_size,\n message=\"epoch_size == 0, decrease batch_size or num_steps\")\n with tf.control_dependencies([assertion]):\n epoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = tf.strided_slice(data, [0, i * num_steps],\n [batch_size, (i + 1) * num_steps])\n x.set_shape([batch_size, num_steps])\n y = tf.strided_slice(data, [0, i * num_steps + 1],\n [batch_size, (i + 1) * num_steps + 1])\n y.set_shape([batch_size, num_steps])\n return x, y",
"def data_producer(raw_data, batch_size, num_steps, name=None):\n with tf.name_scope(name, \"DataProducer\", [raw_data, batch_size, num_steps]):\n raw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int32)\n\n data_len = tf.size(raw_data)\n batch_len = data_len // batch_size\n data = tf.reshape(raw_data[0: batch_size * batch_len],\n [batch_size, batch_len])\n\n epoch_size = (batch_len - 1) // num_steps\n assertion = tf.assert_positive(\n epoch_size,\n message=\"epoch_size == 0, decrease batch_size or num_steps\")\n with tf.control_dependencies([assertion]):\n epoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = tf.strided_slice(data, [0, i * num_steps],\n [batch_size, (i + 1) * num_steps])\n x.set_shape([batch_size, num_steps])\n y = tf.strided_slice(data, [0, i * num_steps + 1],\n [batch_size, (i + 1) * num_steps + 1])\n y.set_shape([batch_size, num_steps])\n return x, y",
"def sample(self, batch_size):\n # get the sum of priorities\n priority_sum = self.sum_tree.get_sum_priority()\n # sample priorities \n priorities_to_sample = np.random.uniform(0, priority_sum, batch_size)\n # get the indexes of replays\n sample_idxes = [self.sum_tree.get(x) for x in priorities_to_sample]\n # fetch the transitions and prepare the batch for training\n random_sample = [self.queue[x] for x in sample_idxes]\n # zip\n zipped = [ torch.from_numpy( np.asarray(arr).astype(np.float32) ).float() for arr in zip(*random_sample) ]\n sample = Transition( zipped[0], zipped[1].unsqueeze_(-1).long(), zipped[2].unsqueeze_(-1), zipped[3], zipped[4].unsqueeze_(-1).byte() )\n return sample, sample_idxes",
"def test_get_id_range_for_partition_with_empty_partitions():\n min_id = 1\n max_id = 100\n partition_size = 20\n id_range_item_count = max_id - min_id + 1 # this many individual IDs should be processed for continuous ID range\n record_ids = {1, 5, 7, 15, 19, 20, 41, 100}\n etl_config = {\"partition_size\": partition_size}\n ctrl = PostgresElasticsearchIndexerController(etl_config)\n ctrl.min_id = min_id\n ctrl.max_id = max_id\n ctrl.record_count = len(record_ids)\n ctrl.config[\"partitions\"] = ctrl.determine_partitions()\n assert ctrl.config[\"partitions\"] == ceil(id_range_item_count / partition_size)\n partition_range = range(0, ctrl.config[\"partitions\"])\n # First batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[0])\n assert lower_bound == min_id\n assert upper_bound == lower_bound + (partition_size - 1)\n # Second batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[1])\n assert lower_bound == min_id + partition_size\n assert upper_bound == lower_bound + (partition_size - 1)\n # Last batch should go all the way up to max_id\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[-1])\n assert lower_bound == (min_id + (partition_size * partition_range[-1]))\n assert upper_bound == max_id\n assert _remove_seen_ids(ctrl, record_ids) == set({})",
"def test_get_id_range_for_partition_with_evenly_divisible_partition_size_offset():\n min_id = 4\n max_id = 6004\n partition_size = 2000\n id_range_item_count = max_id - min_id + 1 # this many individual IDs should be processed for continuous ID range\n etl_config = {\"partition_size\": partition_size}\n ctrl = PostgresElasticsearchIndexerController(etl_config)\n ctrl.min_id = min_id\n ctrl.max_id = max_id\n ctrl.record_count = id_range_item_count # assume records exist for each ID in range\n ctrl.config[\"partitions\"] = ctrl.determine_partitions()\n assert ctrl.config[\"partitions\"] == ceil(id_range_item_count / partition_size)\n partition_range = range(0, ctrl.config[\"partitions\"])\n # First batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[0])\n assert lower_bound == min_id\n assert upper_bound == lower_bound + (partition_size - 1)\n # Second batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[1])\n assert lower_bound == min_id + partition_size\n assert upper_bound == lower_bound + (partition_size - 1)\n # Last batch should go all the way up to max_id\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[-1])\n assert lower_bound == (min_id + (partition_size * partition_range[-1]))\n assert upper_bound == max_id\n id_set = set(range(min_id, max_id + 1))\n assert _remove_seen_ids(ctrl, id_set) == set({})",
"def ptb_producer(raw_data, batch_size, num_steps, name=None):\n\twith tf.name_scope(name, \"PTBProducer\", [raw_data, batch_size, num_steps]):\n\t\traw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int32)\n\n\t\tdata_len = tf.size(raw_data)\n\t\tbatch_len = data_len // batch_size\n\t\tdata = tf.reshape(raw_data[0: batch_size * batch_len],\n\t\t [batch_size, batch_len])\n\n\t\tepoch_size = (batch_len - 1) // num_steps\n\t\tassertion = tf.assert_positive(\n\t\t\tepoch_size,\n\t\t\tmessage=\"epoch_size == 0, decrease batch_size or num_steps\")\n\t\twith tf.control_dependencies([assertion]):\n\t\t\tepoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n\n\t\ti = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n\t\tx = tf.strided_slice(data, [0, i * num_steps],\n\t\t [batch_size, (i + 1) * num_steps])\n\t\tx.set_shape([batch_size, num_steps])\n\t\ty = tf.strided_slice(data, [0, i * num_steps + 1],\n\t\t [batch_size, (i + 1) * num_steps + 1])\n\t\ty.set_shape([batch_size, num_steps])\n\t\treturn x, y",
"def _grid_nd_sample(in_tensor, indices, batch_dims=1):\n # with tf.variable_scope(\"grid_nd_sample\", reuse=False):\n interpolation_indices = indices[..., -2:]\n rounded_indices = indices[..., :-2]\n inter_floor = tf.floor(interpolation_indices)\n inter_ceil = tf.math.ceil(interpolation_indices)\n p1_indices = tf.concat([rounded_indices, inter_floor], axis=-1, name=\"p1_ind\")\n p2_indices = tf.concat([rounded_indices, inter_ceil[..., :1], inter_floor[..., 1:2]], axis=-1,\n name=\"p2_ind\")\n p3_indices = tf.concat([rounded_indices, inter_floor[..., :1], inter_ceil[..., 1:2]], axis=-1,\n name=\"p3_ind\")\n p4_indices = tf.concat([rounded_indices, inter_ceil], axis=-1, name=\"p4_ind\")\n mu = interpolation_indices - inter_floor\n\n # with tf.name_scope(\"gather_corners\"):\n p1v = tf.gather_nd(in_tensor, tf.cast(p1_indices, tf.int32), batch_dims=batch_dims, name=\"gather_p1\")\n p2v = tf.gather_nd(in_tensor, tf.cast(p2_indices, tf.int32), batch_dims=batch_dims, name=\"gather_p2\")\n p3v = tf.gather_nd(in_tensor, tf.cast(p3_indices, tf.int32), batch_dims=batch_dims, name=\"gather_p3\")\n p4v = tf.gather_nd(in_tensor, tf.cast(p4_indices, tf.int32), batch_dims=batch_dims, name=\"gather_p4\")\n mu_x, mu_y = tf.split(mu, 2, axis=-1)\n with tf.name_scope(\"interpolate_p12\"):\n p12_interp = p1v * (1 - mu_x) + p2v * mu_x\n with tf.name_scope(\"interpolate_p34\"):\n p34_interp = p3v * (1 - mu_x) + p4v * mu_x\n with tf.name_scope(\"interpolate_y\"):\n vertical_interp = p12_interp * (1 - mu_y) + p34_interp * mu_y\n return vertical_interp",
"def test_get_id_range_for_partition_with_evenly_divisible():\n min_id = 1\n max_id = 100\n partition_size = 20\n id_range_item_count = max_id - min_id + 1 # this many individual IDs should be processed for continuous ID range\n assert id_range_item_count % partition_size == 0 # evenly divisible\n etl_config = {\"partition_size\": partition_size}\n ctrl = PostgresElasticsearchIndexerController(etl_config)\n ctrl.min_id = min_id\n ctrl.max_id = max_id\n ctrl.record_count = id_range_item_count # assume records exist for each ID in range\n ctrl.config[\"partitions\"] = ctrl.determine_partitions()\n assert ctrl.config[\"partitions\"] == ceil(id_range_item_count / partition_size)\n partition_range = range(0, ctrl.config[\"partitions\"])\n # First batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[0])\n assert lower_bound == min_id\n assert upper_bound == lower_bound + (partition_size - 1)\n # Second batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[1])\n assert lower_bound == min_id + partition_size\n assert upper_bound == lower_bound + (partition_size - 1)\n # Last batch should go all the way up to max_id\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[-1])\n assert lower_bound == (max_id - partition_size + 1) == (min_id + (partition_size * partition_range[-1]))\n assert upper_bound == max_id\n id_set = set(range(min_id, max_id + 1))\n assert _remove_seen_ids(ctrl, id_set) == set({})",
"def random_stretch_squeeze(inputs,\n resample_offset,\n seed=None):\n if inputs.shape.rank != 2:\n raise ValueError('inputs.shape.rank:%d must be 2' % inputs.shape.rank)\n\n inputs_shape = inputs.shape.as_list()\n batch_size = inputs_shape[0]\n sequence_length = inputs_shape[1]\n\n image = tf.expand_dims(inputs, 2) # feature\n image = tf.expand_dims(image, 3) # channels\n\n resample = 1.0 # when it is equal to 1 - no stretching or squeezing\n time_stretch_squeeze = tf.random.uniform(\n shape=[batch_size],\n minval=resample - resample_offset,\n maxval=resample + resample_offset,\n dtype=tf.float32,\n seed=seed)\n tf.print(time_stretch_squeeze)\n print(time_stretch_squeeze)\n shape = tf.shape(inputs)\n outputs = tf.TensorArray(inputs.dtype, 0, dynamic_size=True)\n for i in tf.range(batch_size):\n image_resized = tf.image.resize(\n images=image[i],\n size=(tf.cast((tf.cast(shape[1], tf.float32) * time_stretch_squeeze[i]),\n tf.int32), 1),\n preserve_aspect_ratio=False)\n image_resized_cropped = tf.image.resize_with_crop_or_pad(\n image_resized,\n target_height=sequence_length,\n target_width=1,\n )\n\n outputs = outputs.write(i, image_resized_cropped)\n\n outputs = tf.squeeze(outputs.stack(), axis=[2, 3])\n outputs.set_shape(inputs_shape)\n return outputs",
"def _shuffle(inputs):\n\texpand_inputs = tf.stack(inputs, axis = -1)\n\tshuffled_inputs = tf.random_shuffle(expand_inputs)\n\treturn tf.unstack(shuffled_inputs)",
"def _sample_propagation_indices(\n self, batch_size: int, _rng: torch.Generator\n ) -> torch.Tensor:\n model_len = (\n len(self.elite_models) if self.elite_models is not None else len(self)\n )\n if batch_size % model_len != 0:\n raise ValueError(\n \"To use GaussianMLP's ensemble propagation, the batch size must \"\n \"be a multiple of the number of models in the ensemble.\"\n )\n # rng causes segmentation fault, see https://github.com/pytorch/pytorch/issues/44714\n return torch.randperm(batch_size, device=self.device)",
"def training_input_fn():\n\n dataset = tf.data.Dataset.range(len(np.array(cube_features)[0]))\n dataset = dataset.repeat().shuffle(1000).batch(batch_size)\n dataset = dataset.map(mapping_function)\n dataset = dataset.prefetch(16)\n return dataset",
"def _GenerateUniqueRandomInputTensor(self, shape):\n num_elements = 1\n for size in shape:\n num_elements *= size\n x = np.arange(num_elements, dtype=np.float32)\n self._PRNG.shuffle(x)\n return x.reshape(shape)",
"def generator(data, lookback, delay, min_index, max_index, shuffle=False, batch_size=128, step=6):\n if max_index is None:\n max_index = len(data) - delay - 1\n i = min_index + lookback\n while 1:\n if shuffle:\n rows = np.random.randint(min_index + lookback, max_index + 1, size=batch_size)\n else:\n if i + batch_size >= max_index:\n i = min_index + lookback\n rows = np.arange(i, min(i + batch_size, max_index + 1))\n i += len(rows)\n samples = np.zeros((len(rows), lookback // step, data.shape[-1]))\n targets = np.zeros((len(rows),data.shape[-1]))\n for j, row in enumerate(rows):\n indices = range(rows[j] - lookback, rows[j], step)\n samples[j] = data[indices]\n targets[j] = data[rows[j] + delay]\n yield samples, targets",
"def random_grid_generator(self, *input_shape):\n rnd = np.random.RandomState(1)\n rnd.rand(input_shape)",
"def gen(num_batches,\n batch_size,\n seq_width,\n min_len,\n max_len):\n for batch_num in range(num_batches):\n\n # All batches have the same sequence length\n seq_len = random.randint(min_len, max_len)\n seq = np.random.binomial(1, 0.5, (seq_len, batch_size, seq_width))\n seq = Variable(torch.from_numpy(seq))\n\n # The input includes an additional channel used for the delimiter\n inp = Variable(torch.zeros(seq_len + 1, batch_size, seq_width + 1))\n inp[:seq_len, :, :seq_width] = seq\n inp[seq_len, :, seq_width] = 1.0 # delimiter in our control channel\n outp = seq.clone()\n\n yield batch_num+1, inp.float().to(params.device), outp.float().to(params.device)",
"def input_fn(data_dir,\n subset,\n num_shards,\n batch_size,\n seq_length=4,\n use_distortion_for_training=True):\n with tf.device('/gpu:0' if num_shards >= 1 else '/cpu:0'):\n use_distortion = subset == 'train' and use_distortion_for_training\n dataset = data_parser.DataSet(data_dir, subset, use_distortion, seq_length)\n image_batch, label_batch, occlusion_batch, depth_batch = dataset.make_batch(batch_size)\n\n # Note that passing num=batch_size is safe here, even though\n # dataset.batch(batch_size) can, in some cases, return fewer than batch_size\n # examples. This is because it does so only when repeating for a limited\n # number of epochs, but our dataset repeats forever.\n image_batch = tf.unstack(image_batch, num=batch_size, axis=0)\n label_batch = tf.unstack(label_batch, num=batch_size, axis=0)\n occlusion_batch = tf.unstack(occlusion_batch, num=batch_size, axis=0)\n depth_batch = tf.unstack(depth_batch, num=batch_size, axis=0)\n feature_shards = [[] for i in range(num_shards)]\n label_shards = [[] for i in range(num_shards)]\n occlusion_shards = [[] for i in range(num_shards)]\n depth_shards = [[] for i in range(num_shards)]\n skip = batch_size/num_shards\n for idx in range(num_shards):\n feature_shards[idx].append(tf.parallel_stack(image_batch[idx*skip:(idx+1)*skip]))\n label_shards[idx].append([[tf.parallel_stack(label_batch[idx*skip:(idx+1)*skip])], [tf.parallel_stack(occlusion_batch[idx*skip:(idx+1)*skip])], [tf.parallel_stack(depth_batch[idx*skip:(idx+1)*skip])]])\n\n return feature_shards, label_shards",
"def sample_valid_seeds(mask: Tensor, num_sampled_seed: int = 1024) -> Tensor: # noqa: E501\n device = mask.device\n batch_size = mask.shape[0]\n sample_inds = mask.new_zeros((batch_size, num_sampled_seed),\n dtype=torch.int64)\n for bidx in range(batch_size):\n # return index of non zero elements\n valid_inds = torch.nonzero(mask[bidx, :]).squeeze(-1)\n if len(valid_inds) < num_sampled_seed:\n # compute set t1 - t2\n t1 = torch.arange(num_sampled_seed, device=device)\n t2 = valid_inds % num_sampled_seed\n combined = torch.cat((t1, t2))\n uniques, counts = combined.unique(return_counts=True)\n difference = uniques[counts == 1]\n\n rand_inds = torch.randperm(\n len(difference),\n device=device)[:num_sampled_seed - len(valid_inds)]\n cur_sample_inds = difference[rand_inds]\n cur_sample_inds = torch.cat((valid_inds, cur_sample_inds))\n else:\n rand_inds = torch.randperm(\n len(valid_inds), device=device)[:num_sampled_seed]\n cur_sample_inds = valid_inds[rand_inds]\n sample_inds[bidx, :] = cur_sample_inds\n return sample_inds",
"def distorted_inputs( batch_size):\n\t#read_labels_dict(r\"D:\\PythonWorksp\\TensorFlow\\furniture\\bed\\tf-labels.txt\")\n\timage_list, label_list = read_labeled_image_list(LABEL_FILE)\n\tfor f in image_list:\n\t\tif not tf.gfile.Exists(f):\n\t\t\traise ValueError('Failed to find file: ' + f)\n\n\t#print(label_list)\n\n\timages = tf.convert_to_tensor(image_list, dtype=tf.string)\n\tlabels = tf.convert_to_tensor(label_list, dtype=tf.int64)\n\t\n\tprint(labels)\n\t# Makes an input queue\n\tinput_queue = tf.train.slice_input_producer([images, labels],\n\t\t\t\t\t\t\t\t\t\t\t\t#num_epochs=num_epochs,\n\t\t\t\t\t\t\t\t\t\t\t\tshuffle=True)\n\n\timage, label = read_images_from_disk(input_queue)\n\n\tprint(label)\n\t# Create a queue that produces the filenames to read.\n\t#filename_queue = tf.train.string_input_producer(filenames)\n\n\t# Read examples from files in the filename queue.\n\t#read_input = read_image(filename_queue)\n\treshaped_image = tf.cast(image, tf.float32)\n\n\theight = IMAGE_SIZE\n\twidth = IMAGE_SIZE\n\n\t# Image processing for training the network. Note the many random\n\t# distortions applied to the image.\n\n\t# Randomly crop a [height, width] section of the image.\n\t# distorted_image = tf.random_crop(reshaped_image, [height, width, 3])\n\tdistorted_image = tf.image.resize_images(reshaped_image, [height, width])\n\n\t# Randomly flip the image horizontally.\n\tdistorted_image = tf.image.random_flip_left_right(distorted_image)\n\n\t# Because these operations are not commutative, consider randomizing\n\t# the order their operation.\n\t# NOTE: since per_image_standardization zeros the mean and makes\n\t# the stddev unit, this likely has no effect see tensorflow#1458.\n\tdistorted_image = tf.image.random_brightness(distorted_image,\n\t\t\t\t\t\t\t\t\t\t\t max_delta=63)\n\tdistorted_image = tf.image.random_contrast(distorted_image,\n\t\t\t\t\t\t\t\t\t\t\t lower=0.2, upper=1.8)\n\n\t# Subtract off the mean and divide by the variance of the pixels.\n\tfloat_image = tf.image.per_image_standardization(distorted_image)\n\n\t# Set the shapes of tensors.\n\tfloat_image.set_shape([height, width, 3])\n\t#label.set_shape([1])#todo\n\n\t# Ensure that the random shuffling has good mixing properties.\n\tmin_fraction_of_examples_in_queue = 0.4\n\tmin_queue_examples = int(NUM_EXAMPLES_PER_EPOCH_FOR_TRAIN *\n\t\t\t\t\t\t min_fraction_of_examples_in_queue)\n\tprint ('Filling queue with %d images before starting to train. '\n\t\t 'This will take a few minutes.' % min_queue_examples)\n\n\t# Generate a batch of images and labels by building up a queue of examples.\n\treturn _generate_image_and_label_batch(float_image, label,\n\t\t\t\t\t\t\t\t\t\t min_queue_examples, batch_size,\n\t\t\t\t\t\t\t\t\t\t shuffle=True)"
] | [
"0.67417186",
"0.59812593",
"0.5888701",
"0.5693134",
"0.56846994",
"0.56515205",
"0.56035835",
"0.5600005",
"0.55920666",
"0.5584618",
"0.557734",
"0.5556954",
"0.55525434",
"0.553969",
"0.55269265",
"0.5495206",
"0.5448808",
"0.5438618",
"0.5402123",
"0.5388766",
"0.53828126",
"0.53713083",
"0.5366495",
"0.53579044",
"0.5344078",
"0.534241",
"0.53260136",
"0.5320521",
"0.5316641",
"0.53137904"
] | 0.7107893 | 0 |
Like tf.train.slice_input_producer, but uses random_index_input_producer instead of range_input_producer. | def random_slice_input_producer(tensor_list, num_epochs=None, seed=None,
capacity=32, shared_name=None, name=None):
with ops.op_scope(tensor_list, name, "input_producer"):
tensor_list = ops.convert_n_to_tensor_or_indexed_slices(tensor_list)
if not tensor_list:
raise ValueError(
"Expected at least one tensor in slice_input_producer().")
range_size = tf.to_int64(array_ops.shape(tensor_list[0])[0])
queue = random_index_input_producer(
range_size, num_epochs=num_epochs,
seed=seed, capacity=capacity,
shared_name=shared_name)
index = queue.dequeue()
output = [array_ops.gather(t, index) for t in tensor_list]
return output | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def random_index_input_producer(limit, num_epochs=None, seed=None,\n capacity=32, shared_name=None, name=None):\n with ops.op_scope([limit], name, \"input_producer\") as name:\n index_tensor = tf.random_uniform([limit], minval=0, maxval=limit, dtype=tf.int64, seed=seed)\n return tf.train.input_producer(\n index_tensor, [], num_epochs, False, None, capacity,\n shared_name, name, \"fraction_of_%d_full\" % capacity)",
"def _producer(raw_data, batch_size, num_steps, name=None):\n with tf.name_scope(name, \"Producer\", [raw_data, batch_size, num_steps]):\n raw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int32)\n\n data_len = tf.size(raw_data)\n batch_len = data_len // batch_size\n data = tf.reshape(raw_data[0 : batch_size * batch_len],\n [batch_size, batch_len])\n\n epoch_size = (batch_len - 1) // num_steps\n assertion = tf.assert_positive(\n epoch_size,\n message=\"epoch_size == 0, decrease batch_size or num_steps\")\n with tf.control_dependencies([assertion]):\n epoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = tf.slice(data, [0, i * num_steps], [batch_size, num_steps])\n y = tf.slice(data, [0, i * num_steps + 1], [batch_size, num_steps])\n return x, y",
"def ptb_producer(raw_data, unigrams, batch_size, num_steps, num_true, num_sampled, vocab_size, name=None):\n with tf.name_scope(name, \"PTBProducer\", [raw_data, batch_size, num_steps]):\n raw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int64)\n\n data_len = tf.size(raw_data)\n batch_len = data_len // batch_size\n data = tf.reshape(raw_data[0 : batch_size * batch_len],\n [batch_size, batch_len])\n\n epoch_size = (batch_len - 1) // num_steps\n assertion = tf.assert_positive(\n epoch_size,\n message=\"epoch_size == 0, decrease batch_size or num_steps\")\n with tf.control_dependencies([assertion]):\n epoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = tf.strided_slice(data, [0, i * num_steps],\n [batch_size, (i + 1) * num_steps])\n x.set_shape([batch_size, num_steps])\n y = tf.strided_slice(data, [0, i * num_steps + 1],\n [batch_size, (i + 1) * num_steps + 1])\n y.set_shape([batch_size, num_steps])\n ns = None\n\n psw_list = get_split_weights_for_perp(tf.reshape(y, [-1]), unigrams, vocab_size, batch_size*num_steps)\n \n for i in range(len(psw_list)):\n psw_list[i] = tf.reshape(psw_list[i], [batch_size, num_steps])\n\n '''\n if num_sampled > 0:\n y_list = tf.unpack(y, axis=1)\n ns_list = []\n for i in range(num_steps):\n ns = get_neg_samples(batch_size, num_true, num_sampled, vocab_size, y_list[i], unigrams)\n ns_list.append(ns)\n else:\n ns = None\n '''\n\n return x, y, ns, psw_list",
"def ptb_producer(raw_data, batch_size, num_steps, name=None):\n with tf.name_scope(name, \"PTBProducer\", [raw_data, batch_size, num_steps]):\n raw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int32)\n\n data_len = tf.size(raw_data)\n batch_len = data_len // batch_size\n data = tf.reshape(raw_data[0 : batch_size * batch_len],\n [batch_size, batch_len])\n\n epoch_size = (batch_len - 1) // num_steps\n assertion = tf.assert_positive(\n epoch_size,\n message=\"epoch_size == 0, decrease batch_size or num_steps\")\n with tf.control_dependencies([assertion]):\n epoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = tf.strided_slice(data, [0, i * num_steps],\n [batch_size, (i + 1) * num_steps])\n x.set_shape([batch_size, num_steps])\n y = tf.strided_slice(data, [0, i * num_steps + 1],\n [batch_size, (i + 1) * num_steps + 1])\n y.set_shape([batch_size, num_steps])\n return x, y",
"def ptb_producer(raw_data, raw_label, batch_size, config, name=None):\n with tf.name_scope(name, \"PTBProducer\", [raw_data, raw_label, batch_size]):\n feature_len = raw_data.shape[1]\n class_len = raw_label.shape[1]\n data_len = raw_data.shape[0]\n epoch_size = data_len // batch_size\n\n with tf.device(config.use_gpu):\n data_ph = tf.placeholder(dtype=tf.float32, shape=raw_data.shape, name='data_ph')\n label_ph = tf.placeholder(dtype=tf.float32, shape=raw_label.shape, name='label_ph')\n data_vb = tf.Variable(data_ph, trainable=False, collections=[], name='data_vb')\n label_vb = tf.Variable(label_ph, trainable=False, collections=[], name='label_vb')\n\n # epoch_size = batch_len\n # assertion = tf.assert_positive(\n # epoch_size,\n # message=\"epoch_size == 0, decrease batch_size or num_steps\")\n # with tf.control_dependencies([assertion]):\n # epoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n\n # image, label = tf.train.slice_input_producer([data_vb, label_vb], name='producer')\n # x, y = tf.train.batch([image, label], batch_size=batch_size, capacity=int(batch_size * (0.4 * epoch_size + 3)), name='batch')\n\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = tf.strided_slice(data_vb, [i * batch_size, 0], [(i + 1) * batch_size, feature_len])\n x.set_shape([batch_size, feature_len])\n y = tf.strided_slice(label_vb, [i * batch_size, 0], [(i + 1) * batch_size, class_len])\n y.set_shape([batch_size, class_len])\n return x, y, data_vb.initializer, label_vb.initializer, data_ph, label_ph, data_vb, label_vb",
"def ptb_producer(raw_data, batch_size, num_steps, name=None):\n\twith tf.name_scope(name, \"PTBProducer\", [raw_data, batch_size, num_steps]):\n\t\traw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int32)\n\n\t\tdata_len = tf.size(raw_data)\n\t\tbatch_len = data_len // batch_size\n\t\tdata = tf.reshape(raw_data[0: batch_size * batch_len],\n\t\t [batch_size, batch_len])\n\n\t\tepoch_size = (batch_len - 1) // num_steps\n\t\tassertion = tf.assert_positive(\n\t\t\tepoch_size,\n\t\t\tmessage=\"epoch_size == 0, decrease batch_size or num_steps\")\n\t\twith tf.control_dependencies([assertion]):\n\t\t\tepoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n\n\t\ti = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n\t\tx = tf.strided_slice(data, [0, i * num_steps],\n\t\t [batch_size, (i + 1) * num_steps])\n\t\tx.set_shape([batch_size, num_steps])\n\t\ty = tf.strided_slice(data, [0, i * num_steps + 1],\n\t\t [batch_size, (i + 1) * num_steps + 1])\n\t\ty.set_shape([batch_size, num_steps])\n\t\treturn x, y",
"def data_producer(raw_data, batch_size, num_steps, name=None):\n with tf.name_scope(name, \"DataProducer\", [raw_data, batch_size, num_steps]):\n raw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int32)\n\n data_len = tf.size(raw_data)\n batch_len = data_len // batch_size\n data = tf.reshape(raw_data[0: batch_size * batch_len],\n [batch_size, batch_len])\n\n epoch_size = (batch_len - 1) // num_steps\n assertion = tf.assert_positive(\n epoch_size,\n message=\"epoch_size == 0, decrease batch_size or num_steps\")\n with tf.control_dependencies([assertion]):\n epoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n\n i = tf.train.range_input_producer(epoch_size, shuffle=False).dequeue()\n x = tf.strided_slice(data, [0, i * num_steps],\n [batch_size, (i + 1) * num_steps])\n x.set_shape([batch_size, num_steps])\n y = tf.strided_slice(data, [0, i * num_steps + 1],\n [batch_size, (i + 1) * num_steps + 1])\n y.set_shape([batch_size, num_steps])\n return x, y",
"def rnnlm_producer(raw_data, batch_size, num_steps, name=None):\n\twith tf.name_scope(name, \"RNNLMProducer\", [raw_data, batch_size, num_steps]):\n\t\traw_data = tf.convert_to_tensor(raw_data, name=\"raw_data\", dtype=tf.int32)\n\t\tdata_len = tf.size(raw_data)\n\t\tbatch_len = data_len // batch_size\n\t\tdata = tf.reshape(raw_data[0 : batch_size * batch_len], [batch_size, batch_len])\n\n\t\tepoch_size = (batch_len - 1) // num_steps\n\t\tassertion = tf.assert_positive(epoch_size,\n\t\t\tmessage=\"epoch_size == 0, decrease batch_size or num_steps\")\n\t\twith tf.control_dependencies([assertion]):\n\t\t\tepoch_size = tf.identity(epoch_size, name=\"epoch_size\")\n\n\t\ti = tf.train.range_input_producer(epoch_size, shuffle = False).dequeue()\n\t\tx = tf.strided_slice(data, [0, i * num_steps],\n\t\t\t[batch_size, (i + 1) * num_steps])\n\t\tx.set_shape([batch_size, num_steps])\n\t\ty = tf.strided_slice(data, [0, i * num_steps + 1],\n\t\t\t[batch_size, (i + 1) * num_steps + 1])\n\t\ty.set_shape([batch_size, num_steps])\n\t\treturn x, y",
"def batch_inputs(dataset, batch_size, train, semantic_level_settings, num_preprocess_threads=16):\n # Force all input processing onto CPU in order to reserve the GPU for the forward and backward.\n with tf.device('/cpu:0'):\n with tf.name_scope('batch_processing'):\n data_files = dataset.data_files()\n if data_files is None:\n raise ValueError('No data files found for this dataset')\n\n examples_per_shard = 1024\n # Create filename_queue\n if train:\n filename_queue = tf.train.string_input_producer(data_files, shuffle=True, capacity=16)\n input_queue_memory_factor = 16\n num_readers = 4\n else:\n filename_queue = tf.train.string_input_producer(data_files, shuffle=False, capacity=1)\n input_queue_memory_factor = 1\n num_readers = 1\n if num_preprocess_threads % 4:\n raise ValueError('Please make num_preprocess_threads a multiple '\n 'of 4 (%d % 4 != 0).', num_preprocess_threads)\n\n min_queue_examples = examples_per_shard * input_queue_memory_factor\n if train:\n examples_queue = tf.RandomShuffleQueue(\n capacity=min_queue_examples + 3 * batch_size,\n min_after_dequeue=min_queue_examples,\n dtypes=[tf.string])\n # Create multiple readers to populate the queue of examples.\n enqueue_ops = []\n for _ in range(num_readers):\n reader = dataset.reader()\n _, value = reader.read(filename_queue)\n enqueue_ops.append(examples_queue.enqueue([value]))\n\n tf.train.queue_runner.add_queue_runner(\n tf.train.queue_runner.QueueRunner(examples_queue, enqueue_ops))\n example_serialized = examples_queue.dequeue()\n else:\n examples_queue = tf.FIFOQueue(\n capacity=examples_per_shard + 3 * batch_size,\n dtypes=[tf.string])\n # Create multiple readers to populate the queue of examples.\n reader = dataset.reader()\n _, example_serialized = reader.read(filename_queue)\n\n images_and_labels = []\n for thread_id in range(num_preprocess_threads):\n # Parse a serialized Example proto to extract the image and metadata.\n image_buffer, labels, filename = parse_example_proto(example_serialized,\n semantic_level_settings)\n image = decode_jpeg(image_buffer)\n if train:\n image = distort_image(image, dataset.height, dataset.width, thread_id)\n else:\n image = eval_image(image, dataset.height, dataset.width)\n\n # Finally, rescale to [-1,1] instead of [0, 1)\n image = tf.subtract(image, 0.5)\n image = tf.multiply(image, 2.0)\n images_and_labels.append([image, filename] + labels)\n\n batch_data = tf.train.batch_join(\n images_and_labels,\n batch_size=batch_size,\n capacity=2 * num_preprocess_threads * batch_size)\n\n # Get image data, filenames, level_labels separately.\n images = batch_data[0]\n images = tf.cast(images, tf.float32)\n images = tf.reshape(images, shape=[batch_size, dataset.height, dataset.width, 3])\n\n filenames = tf.reshape(batch_data[1], [batch_size])\n level_labels = {}\n for idx, settings in enumerate(semantic_level_settings):\n level_labels[settings[0]] = tf.reshape(batch_data[2 + idx], [batch_size, -1])\n\n return (images, level_labels, filenames)",
"def slice_features(self, input, indices):\n t_input = tf.transpose(a=input)\n gather_idxs = np.array([[i] for i in indices]).astype(np.int32)\n t_actual = tf.gather_nd(t_input, gather_idxs)\n actual = tf.transpose(a=t_actual)\n return actual",
"def sample_data_input_fn(params):\n window_size = params['window_size']\n batch_size = params['batch_size']\n\n dataset_names = sample_data.get_data_names()\n all_downsampled = [sample_data.get_downsampled_data(name) for name in dataset_names]\n np_dtype = all_downsampled[0].dtype\n _, num_columns = all_downsampled[0].shape\n assert num_columns == 3\n\n # For each data item, this computes\n time_diffs = [(x[1:, 0] - x[:-1, 0]) for x in all_downsampled]\n median_time_diff = np.median(np.concatenate(time_diffs, axis=0))\n lower, upper = median_time_diff * 0.8, median_time_diff * 1.2\n valid_start_window_indices = [\n get_window_valid_indices(d, lower, upper, window_size) for d in time_diffs\n ]\n for name, valid_indices in zip(dataset_names, valid_start_window_indices):\n if np.size(valid_indices) == 0:\n raise ValueError(\"{} has no valid window ranges\".format(name))\n\n def get_samples_py_op(idx_array):\n assert isinstance(idx_array, np.ndarray)\n assert idx_array.shape == (batch_size, )\n samp_results = np.zeros((batch_size, window_size, num_columns), dtype=np_dtype)\n for i, sample_idx in enumerate(idx_array):\n start_idx = random.choice(valid_start_window_indices[sample_idx])\n samp_results[i, :, :] = all_downsampled[sample_idx][start_idx: (\n start_idx + window_size)]\n assert samp_results.shape == (batch_size, window_size, num_columns)\n return samp_results\n\n def get_window_sample(idx_tensor):\n samples = tf.py_func(get_samples_py_op, [idx_tensor], np_dtype)\n samples.set_shape((batch_size, window_size, num_columns))\n return samples\n\n def random_negative_py_op(idx_array):\n assert isinstance(idx_array, np.ndarray)\n neg_idx_array = np.copy(idx_array)\n for i, idx in enumerate(idx_array):\n while neg_idx_array[i] == idx_array[i]:\n neg_idx_array[i] = random.randint(0, len(all_downsampled) - 1)\n return neg_idx_array\n\n def get_negative_window_sample(idx_tensor):\n neg_idx_tensor = tf.py_func(\n random_negative_py_op,\n [idx_tensor],\n idx_tensor.dtype)\n return get_window_sample(neg_idx_tensor)\n\n # Current sample method: First select sample index, then select window.\n num_samples = len(all_downsampled)\n if num_samples < 2:\n raise ValueError(\"Need at least 2 light curves for negative samples!\")\n dataset = tf.data.Dataset.range(num_samples)\n dataset = dataset.repeat().shuffle(num_samples * 2).batch(batch_size)\n\n positive = dataset.map(lambda idx_tensor: {\n 'left': get_window_sample(idx_tensor),\n 'right': get_window_sample(idx_tensor),\n 'goal': tf.constant([1.0] * batch_size, dtype=tf.float64)\n })\n negative = dataset.map(lambda idx_tensor: {\n 'left': get_window_sample(idx_tensor),\n 'right': get_negative_window_sample(idx_tensor),\n 'goal': tf.constant([0.0] * batch_size, dtype=tf.float64)\n })\n\n # TODO(gatoatigrado): Experiment with shuffling positive & negative within a batch.\n # Currently each batch is just positive or negative.\n assert positive.output_shapes == negative.output_shapes\n assert negative.output_types == positive.output_types\n dataset = tf.contrib.data.sample_from_datasets((positive, negative))\n assert dataset.output_shapes == negative.output_shapes\n return dataset",
"def input_fn(data_dir,\n subset,\n num_shards,\n batch_size,\n seq_length=4,\n use_distortion_for_training=True):\n with tf.device('/gpu:0' if num_shards >= 1 else '/cpu:0'):\n use_distortion = subset == 'train' and use_distortion_for_training\n dataset = data_parser.DataSet(data_dir, subset, use_distortion, seq_length)\n image_batch, label_batch, occlusion_batch, depth_batch = dataset.make_batch(batch_size)\n\n # Note that passing num=batch_size is safe here, even though\n # dataset.batch(batch_size) can, in some cases, return fewer than batch_size\n # examples. This is because it does so only when repeating for a limited\n # number of epochs, but our dataset repeats forever.\n image_batch = tf.unstack(image_batch, num=batch_size, axis=0)\n label_batch = tf.unstack(label_batch, num=batch_size, axis=0)\n occlusion_batch = tf.unstack(occlusion_batch, num=batch_size, axis=0)\n depth_batch = tf.unstack(depth_batch, num=batch_size, axis=0)\n feature_shards = [[] for i in range(num_shards)]\n label_shards = [[] for i in range(num_shards)]\n occlusion_shards = [[] for i in range(num_shards)]\n depth_shards = [[] for i in range(num_shards)]\n skip = batch_size/num_shards\n for idx in range(num_shards):\n feature_shards[idx].append(tf.parallel_stack(image_batch[idx*skip:(idx+1)*skip]))\n label_shards[idx].append([[tf.parallel_stack(label_batch[idx*skip:(idx+1)*skip])], [tf.parallel_stack(occlusion_batch[idx*skip:(idx+1)*skip])], [tf.parallel_stack(depth_batch[idx*skip:(idx+1)*skip])]])\n\n return feature_shards, label_shards",
"def input_fn(self, params):\n with tf.variable_scope('data_provider'):\n if self.mode == enums.ModelMode.INFERENCE:\n images = tf.placeholder(tf.float32, [\n None, self.preprocessor.preprocessing_options.image_size,\n self.preprocessor.preprocessing_options.image_size, 3\n ])\n return tf_estimator.export.TensorServingInputReceiver(\n features=images, receiver_tensors=images)\n\n # Retrieves the batch size for the current shard. The # of shards is\n # computed according to the input pipeline deployment. See\n # tf.contrib.tpu.RunConfig for details.\n batch_size = params['batch_size']\n\n if 'context' in params:\n current_host = params['context'].current_input_fn_deployment()[1]\n num_hosts = params['context'].num_hosts\n num_cores = params['context'].num_replicas\n else:\n current_host = 0\n num_hosts = 1\n num_cores = 1\n\n dataset = self.make_source_dataset(current_host, num_hosts)\n\n if (self.mode == enums.ModelMode.TRAIN and self.max_samples and\n self.max_samples > 0):\n dataset = dataset.take(self.max_samples)\n\n dataset = dataset.map(self.dataset_parser, num_parallel_calls=num_cores)\n if self.label_noise_prob > 0. and self.mode == enums.ModelMode.TRAIN:\n dataset = dataset.map(\n self._label_noise_fn, num_parallel_calls=num_cores)\n\n if self.cache:\n dataset = dataset.cache()\n if self.mode == enums.ModelMode.TRAIN:\n dataset = dataset.shuffle(self.shuffle_buffer).repeat()\n\n # Use the fused map-and-batch operation.\n #\n # For XLA, we must used fixed shapes. Because we repeat the source\n # training dataset indefinitely, we can use `drop_remainder=True` to get\n # fixed-size batches without dropping any training examples.\n #\n # When evaluating, `drop_remainder=True` prevents accidentally evaluating\n # the same image twice by dropping the final batch if it is less than a\n # full batch size. As long as this validation is done with consistent\n # batch size, exactly the same images will be used.\n dataset = dataset.apply(\n tf.data.experimental.map_and_batch(\n self._preprocess_image,\n batch_size=batch_size,\n num_parallel_batches=num_cores,\n drop_remainder=True))\n\n # Assign static batch size dimension\n dataset = dataset.map(\n functools.partial(self._set_static_batch_dim, batch_size))\n\n # Prefetch overlaps in-feed with training\n dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)\n\n return dataset",
"def sample_with_policy(self, rng, params, batch_size, policy, context=None):\n logging.info('Sampling from model (quickly)...')\n chain_sharded = self.p_sample_with_policy(rng, params, batch_size, policy,\n context)\n chain = chain_sharded.reshape(\n chain_sharded.shape[0], batch_size, *chain_sharded.shape[3:])\n return chain",
"def _GetShardedBatch() -> tf.types.experimental.distributed.PerReplica:\n per_host_batches: List[py_utils.NestedMap] = []\n # Note: `available_devices` omits the executor host; just those with TPUs.\n for host_device in py_utils.Flatten(\n cluster_factory.Current().available_devices.tolist()\n ):\n with tf.device(host_device):\n batch = self.task.input.GetPreprocessedInputBatch()\n\n # Remove bucket_keys; this relates to GenericInput pipelines.\n batch = batch.FilterKeyVal(lambda k, _: not k.endswith('bucket_keys'))\n\n # Process embedding ID features according to their specified types.\n batch = batch.TransformWithKey(\n tpu_embedding_layers_v2.TPU_EMBEDDING_MANAGER.ProcessInputFeature\n )\n\n per_host_batches.extend(Split(batch, replicas_per_host))\n\n return strategy.experimental_distribute_values_from_function(\n lambda ctx: per_host_batches[ctx.replica_id_in_sync_group]\n )",
"def train_input(config, params):\n\n def _grouping(pim, pla, iid):\n # group dataset elements as required by estimator\n features = {\n # 'rawimages': rim,\n 'proimages': pim,\n 'imageids': iid,\n # 'rawimagespaths': imp,\n # 'rawlabelspaths': lap,\n }\n labels = {\n # 'rawlabels': rla,\n 'prolabels': pla,\n }\n\n # next line for distributed debugging\n # tf.string tensors is not supported for DMA read/write to GPUs (TF bug)\n if params.distribute:\n # del features['rawimagespaths']\n # del features['rawlabelspaths']\n del features['imageids']\n\n return (features, labels)\n\n with tf.name_scope('input_pipeline'):\n dataset = prebatch_dataset(config, params)\n dataset = dataset.batch(get_temp_Nb(config, params.Nb))\n dataset = postbatch_dataset(dataset, config, params)\n dataset = dataset.map(_grouping, num_parallel_calls=NUM_PARALLEL_CALLS)\n options = tf.data.Options()\n options.experimental_autotune = True\n # seems than on average gives faster results\n dataset = dataset.prefetch(None).with_options(options)\n\n return dataset",
"def slice_batch(x, n_gpus, part):\n sh = K.shape(x)\n L = sh[0] // n_gpus\n if part == n_gpus - 1:\n return x[part*L:]\n return x[part*L:(part+1)*L]",
"def training_input_fn():\n\n dataset = tf.data.Dataset.range(len(np.array(cube_features)[0]))\n dataset = dataset.repeat().shuffle(1000).batch(batch_size)\n dataset = dataset.map(mapping_function)\n dataset = dataset.prefetch(16)\n return dataset",
"def input_fn(\n data_dir, subset, num_shards, batch_size, use_distortion_for_training=True\n):\n with tf.device(\"/cpu:0\"):\n use_distortion = subset == \"train\" and use_distortion_for_training\n dataset = cifar10.Cifar10DataSet(data_dir, subset, use_distortion)\n image_batch, label_batch = dataset.make_batch(batch_size)\n if num_shards <= 1:\n # No GPU available or only 1 GPU.\n return [image_batch], [label_batch]\n\n # Note that passing num=batch_size is safe here, even though\n # dataset.batch(batch_size) can, in some cases, return fewer than batch_size\n # examples. This is because it does so only when repeating for a limited\n # number of epochs, but our dataset repeats forever.\n image_batch = tf.unstack(image_batch, num=batch_size, axis=0)\n label_batch = tf.unstack(label_batch, num=batch_size, axis=0)\n feature_shards = [[] for i in range(num_shards)]\n label_shards = [[] for i in range(num_shards)]\n for i in xrange(batch_size):\n idx = i % num_shards\n feature_shards[idx].append(image_batch[i])\n label_shards[idx].append(label_batch[i])\n feature_shards = [tf.parallel_stack(x) for x in feature_shards]\n label_shards = [tf.parallel_stack(x) for x in label_shards]\n return feature_shards, label_shards",
"def _grid_nd_sample(in_tensor, indices, batch_dims=1):\n # with tf.variable_scope(\"grid_nd_sample\", reuse=False):\n interpolation_indices = indices[..., -2:]\n rounded_indices = indices[..., :-2]\n inter_floor = tf.floor(interpolation_indices)\n inter_ceil = tf.math.ceil(interpolation_indices)\n p1_indices = tf.concat([rounded_indices, inter_floor], axis=-1, name=\"p1_ind\")\n p2_indices = tf.concat([rounded_indices, inter_ceil[..., :1], inter_floor[..., 1:2]], axis=-1,\n name=\"p2_ind\")\n p3_indices = tf.concat([rounded_indices, inter_floor[..., :1], inter_ceil[..., 1:2]], axis=-1,\n name=\"p3_ind\")\n p4_indices = tf.concat([rounded_indices, inter_ceil], axis=-1, name=\"p4_ind\")\n mu = interpolation_indices - inter_floor\n\n # with tf.name_scope(\"gather_corners\"):\n p1v = tf.gather_nd(in_tensor, tf.cast(p1_indices, tf.int32), batch_dims=batch_dims, name=\"gather_p1\")\n p2v = tf.gather_nd(in_tensor, tf.cast(p2_indices, tf.int32), batch_dims=batch_dims, name=\"gather_p2\")\n p3v = tf.gather_nd(in_tensor, tf.cast(p3_indices, tf.int32), batch_dims=batch_dims, name=\"gather_p3\")\n p4v = tf.gather_nd(in_tensor, tf.cast(p4_indices, tf.int32), batch_dims=batch_dims, name=\"gather_p4\")\n mu_x, mu_y = tf.split(mu, 2, axis=-1)\n with tf.name_scope(\"interpolate_p12\"):\n p12_interp = p1v * (1 - mu_x) + p2v * mu_x\n with tf.name_scope(\"interpolate_p34\"):\n p34_interp = p3v * (1 - mu_x) + p4v * mu_x\n with tf.name_scope(\"interpolate_y\"):\n vertical_interp = p12_interp * (1 - mu_y) + p34_interp * mu_y\n return vertical_interp",
"def BeamSource(self, batch_size: Optional[int] = None) -> beam.PTransform:",
"def test_get_id_range_for_partition_with_sparse_range():\n min_id = 4\n max_id = 5999\n partition_size = 2000\n id_range_item_count = max_id - min_id + 1 # this many individual IDs should be processed for continuous ID range\n record_ids = {4, 5, 7, 99, 101, 120, 1998, 1999, 2000, 2001, 2002, 4444, 5999}\n etl_config = {\"partition_size\": partition_size}\n ctrl = PostgresElasticsearchIndexerController(etl_config)\n ctrl.min_id = min_id\n ctrl.max_id = max_id\n ctrl.record_count = len(record_ids)\n ctrl.config[\"partitions\"] = ctrl.determine_partitions()\n assert ctrl.config[\"partitions\"] == ceil(id_range_item_count / partition_size)\n partition_range = range(0, ctrl.config[\"partitions\"])\n # First batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[0])\n assert lower_bound == min_id\n assert upper_bound == lower_bound + (partition_size - 1)\n # Second batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[1])\n assert lower_bound == min_id + partition_size\n assert upper_bound == lower_bound + (partition_size - 1)\n # Last batch should go all the way up to max_id\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[-1])\n assert lower_bound == (min_id + (partition_size * partition_range[-1]))\n assert upper_bound == max_id\n assert _remove_seen_ids(ctrl, record_ids) == set({})",
"def prefetch_input_data(reader,\n\t\t\t\t\t\t\t\t\t\t\t\tcontents,\n\t\t\t\t\t\t\t\t\t\t\t\tis_training,\n\t\t\t\t\t\t\t\t\t\t\t\tbatch_size,\n\t\t\t\t\t\t\t\t\t\t\t\tvalues_per_shard,\n\t\t\t\t\t\t\t\t\t\t\t\tinput_queue_capacity_factor=16,\n\t\t\t\t\t\t\t\t\t\t\t\tnum_reader_threads=1,\n\t\t\t\t\t\t\t\t\t\t\t\tshard_queue_name=\"filename_queue\",\n\t\t\t\t\t\t\t\t\t\t\t\tvalue_queue_name=\"input_queue\"):\n\tdata_files = []\n\tfor pattern in contents:\n\t\tdata_files.extend(tf.gfile.Glob(pattern))\n\tif not data_files:\n\t\ttf.logging.fatal(\"Found no input files matching %s\", file_pattern)\n\telse:\n\t\ttf.logging.info(\"Prefetching values from %d files matching %s\",\n\t\t\t\t\t\t\t\t\t\tlen(data_files), file_pattern)\n\n\tif is_training:\n\t\tfilename_queue = tf.train.string_input_producer(\n\t\t\t\tdata_files, shuffle=True, capacity=16, name=shard_queue_name)\n\t\tmin_queue_examples = values_per_shard * input_queue_capacity_factor\n\t\tcapacity = min_queue_examples + 100 * batch_size\n\t\tvalues_queue = tf.RandomShuffleQueue(\n\t\t\t\tcapacity=capacity,\n\t\t\t\tmin_after_dequeue=min_queue_examples,\n\t\t\t\tdtypes=[tf.string],\n\t\t\t\tname=\"random_\" + value_queue_name)\n\telse:\n\t\tfilename_queue = tf.train.string_input_producer(\n\t\t\t\tdata_files, shuffle=False, capacity=1, name=shard_queue_name)\n\t\tcapacity = values_per_shard + 3 * batch_size\n\t\tvalues_queue = tf.FIFOQueue(\n\t\t\t\tcapacity=capacity, dtypes=[tf.string], name=\"fifo_\" + value_queue_name)\n\n\tenqueue_ops = []\n\tfor _ in range(num_reader_threads):\n\t\t_, value = reader.read(filename_queue)\n\t\tenqueue_ops.append(values_queue.enqueue([value]))\n\ttf.train.queue_runner.add_queue_runner(tf.train.queue_runner.QueueRunner(\n\t\t\tvalues_queue, enqueue_ops))\n\ttf.summary.scalar(\n\t\t\t\"queue/%s/fraction_of_%d_full\" % (values_queue.name, capacity),\n\t\t\ttf.cast(values_queue.size(), tf.float32) * (1. / capacity))\n\n\treturn values_queue",
"def simple_slice():\n examples = [\n benchmark.Example(\n inputs=[\n [[12, 34, 56, 78], [-1, -2, -3, -4]],\n -1,\n ],\n output=[[34, 56], [-2, -3]],\n ),\n ]\n constants = []\n description = 'Slice a tensor'\n target_program = 'in1[:, 1:in2]'\n source = 'handwritten task'\n return benchmark.Benchmark(examples=examples,\n constants=constants,\n description=description,\n target_program=target_program,\n source=source,\n name='simple_slice')",
"def test_get_id_range_for_partition_with_empty_partitions():\n min_id = 1\n max_id = 100\n partition_size = 20\n id_range_item_count = max_id - min_id + 1 # this many individual IDs should be processed for continuous ID range\n record_ids = {1, 5, 7, 15, 19, 20, 41, 100}\n etl_config = {\"partition_size\": partition_size}\n ctrl = PostgresElasticsearchIndexerController(etl_config)\n ctrl.min_id = min_id\n ctrl.max_id = max_id\n ctrl.record_count = len(record_ids)\n ctrl.config[\"partitions\"] = ctrl.determine_partitions()\n assert ctrl.config[\"partitions\"] == ceil(id_range_item_count / partition_size)\n partition_range = range(0, ctrl.config[\"partitions\"])\n # First batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[0])\n assert lower_bound == min_id\n assert upper_bound == lower_bound + (partition_size - 1)\n # Second batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[1])\n assert lower_bound == min_id + partition_size\n assert upper_bound == lower_bound + (partition_size - 1)\n # Last batch should go all the way up to max_id\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[-1])\n assert lower_bound == (min_id + (partition_size * partition_range[-1]))\n assert upper_bound == max_id\n assert _remove_seen_ids(ctrl, record_ids) == set({})",
"def _input_fn():\n # TODO(seominjoon): There is bottleneck in data feeding, slow for N >= 128.\n filename_queue = tf.train.string_input_producer(\n filenames, shuffle=shuffle_files, num_epochs=num_epochs)\n reader = tf.TFRecordReader()\n _, se = reader.read(filename_queue)\n # TODO(seominjoon): Consider moving data filtering to here.\n features_op = tf.parse_single_example(se, features=features)\n\n names = list(features_op.keys())\n dtypes = [features_op[name].dtype for name in names]\n shapes = [features_op[name].shape for name in names]\n\n if shuffle_examples:\n # Data shuffling.\n rq = tf.RandomShuffleQueue(\n queue_capacity, min_after_dequeue, dtypes, names=names)\n else:\n rq = tf.FIFOQueue(queue_capacity, dtypes, names=names)\n enqueue_op = rq.enqueue(features_op)\n dequeue_op = rq.dequeue()\n dequeue_op = [dequeue_op[name] for name in names]\n qr = tf.train.QueueRunner(rq, [enqueue_op])\n tf.train.add_queue_runner(qr)\n\n batch = tf.train.batch(\n dequeue_op,\n batch_size,\n capacity=queue_capacity,\n dynamic_pad=True,\n shapes=shapes,\n allow_smaller_final_batch=True,\n num_threads=5)\n batch = {name: each for name, each in zip(names, batch)}\n target_keys = [\n 'word_answer_starts', 'word_answer_ends', 'answers', 'num_answers'\n ]\n # TODO(seominjoon) For cheating-safe, comment out #.\n features_batch = {\n key: val\n for key, val in batch.items() # if key not in target_keys\n }\n\n # `metadata['emb_mat`]` contains GloVe embedding, and `xv` in\n # `features_batch` index into the vectors.\n features_batch['emb_mat'] = tf.constant(emb_mat)\n targets_batch = {key: batch[key] for key in target_keys}\n\n # Postprocessing for character data.\n # Due to the limitation of the python wrapper for prototxt,\n # the characters (by index) need to be flattened when saving on prototxt.\n # The following 'unflattens' the character tensor.\n actual_batch_size = tf.shape(batch['indexed_context_chars'])[0]\n features_batch['indexed_context_chars'] = tf.reshape(\n features_batch['indexed_context_chars'],\n [actual_batch_size, -1, metadata['num_chars_per_word']])\n features_batch['indexed_question_chars'] = tf.reshape(\n features_batch['indexed_question_chars'],\n [actual_batch_size, -1, metadata['num_chars_per_word']])\n\n # Make sure answer start and end positions are less than sequence lengths.\n # TODO(seominjoon) This will need to move to a separate test.\n with tf.control_dependencies([\n tf.assert_less(\n tf.reduce_max(targets_batch['word_answer_starts'], 1),\n features_batch['context_num_words'])\n ]):\n targets_batch['word_answer_starts'] = tf.identity(\n targets_batch['word_answer_starts'])\n with tf.control_dependencies([\n tf.assert_less(\n tf.reduce_max(targets_batch['word_answer_ends'], 1),\n features_batch['context_num_words'])\n ]):\n targets_batch['word_answer_ends'] = tf.identity(\n targets_batch['word_answer_ends'])\n\n # Stress test to ensure no OOM for GPU occurs.\n if oom_test:\n features_batch['indexed_context_words'] = tf.constant(\n np.ones(\n [batch_size, exp_metadata['max_context_size']], dtype='int64'))\n features_batch['glove_indexed_context_words'] = tf.constant(\n np.ones(\n [batch_size, exp_metadata['max_context_size']], dtype='int64'))\n features_batch['indexed_context_chars'] = tf.constant(\n np.ones(\n [\n batch_size, exp_metadata['max_context_size'], exp_metadata[\n 'num_chars_per_word']\n ],\n dtype='int64'))\n features_batch['indexed_question_words'] = tf.constant(\n np.ones([batch_size, exp_metadata['max_ques_size']], dtype='int64'))\n features_batch['glove_indexed_question_words'] = tf.constant(\n np.ones([batch_size, exp_metadata['max_ques_size']], dtype='int64'))\n features_batch['indexed_question_chars'] = tf.constant(\n np.ones(\n [\n batch_size, exp_metadata['max_ques_size'], exp_metadata[\n 'num_chars_per_word']\n ],\n dtype='int64'))\n features_batch['question_num_words'] = tf.constant(\n np.ones([batch_size], dtype='int64') * exp_metadata['max_ques_size'])\n features_batch['context_num_words'] = tf.constant(\n np.ones([batch_size], dtype='int64') *\n exp_metadata['max_context_size'])\n\n return features_batch, targets_batch",
"def test_get_id_range_for_partition_with_one_over():\n min_id = 1\n max_id = 101\n partition_size = 20\n id_range_item_count = max_id - min_id + 1 # this many individual IDs should be processed for continuous ID range\n assert id_range_item_count % partition_size == 1 # one over the partition size\n etl_config = {\"partition_size\": partition_size}\n ctrl = PostgresElasticsearchIndexerController(etl_config)\n ctrl.min_id = min_id\n ctrl.max_id = max_id\n ctrl.record_count = id_range_item_count # assume records exist for each ID in range\n ctrl.config[\"partitions\"] = ctrl.determine_partitions()\n assert ctrl.config[\"partitions\"] == ceil(id_range_item_count / partition_size)\n partition_range = range(0, ctrl.config[\"partitions\"])\n # First batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[0])\n assert lower_bound == min_id\n assert upper_bound == lower_bound + (partition_size - 1)\n # Second batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[1])\n assert lower_bound == min_id + partition_size\n assert upper_bound == lower_bound + (partition_size - 1)\n # Last batch should go all the way up to max_id\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[-1])\n assert lower_bound == (min_id + (partition_size * partition_range[-1])) == 101\n assert upper_bound == max_id == 101\n id_set = set(range(min_id, max_id + 1))\n assert _remove_seen_ids(ctrl, id_set) == set({})",
"def test_get_id_range_for_partition_with_evenly_divisible_partition_size_offset():\n min_id = 4\n max_id = 6004\n partition_size = 2000\n id_range_item_count = max_id - min_id + 1 # this many individual IDs should be processed for continuous ID range\n etl_config = {\"partition_size\": partition_size}\n ctrl = PostgresElasticsearchIndexerController(etl_config)\n ctrl.min_id = min_id\n ctrl.max_id = max_id\n ctrl.record_count = id_range_item_count # assume records exist for each ID in range\n ctrl.config[\"partitions\"] = ctrl.determine_partitions()\n assert ctrl.config[\"partitions\"] == ceil(id_range_item_count / partition_size)\n partition_range = range(0, ctrl.config[\"partitions\"])\n # First batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[0])\n assert lower_bound == min_id\n assert upper_bound == lower_bound + (partition_size - 1)\n # Second batch\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[1])\n assert lower_bound == min_id + partition_size\n assert upper_bound == lower_bound + (partition_size - 1)\n # Last batch should go all the way up to max_id\n lower_bound, upper_bound = ctrl.get_id_range_for_partition(partition_range[-1])\n assert lower_bound == (min_id + (partition_size * partition_range[-1]))\n assert upper_bound == max_id\n id_set = set(range(min_id, max_id + 1))\n assert _remove_seen_ids(ctrl, id_set) == set({})",
"def train_input_fn(features, labels, batch_size):\n\n dataset = tf.data.Dataset.from_tensor_slices((dict(features), labels))\n dataset = dataset.shuffle(10).repeat().batch(batch_size)\n return dataset",
"def test_multi_triggered_gbk_side_input(self):\n # TODO(https://github.com/apache/beam/issues/20065): Remove use of this\n # experiment. This flag is only necessary when using the multi-output\n # TestStream b/c it relies on using the PCollection output tags as the\n # PCollection output ids.\n with TestPipeline() as p:\n\n test_stream = (\n p\n | 'Mixed TestStream' >> TestStream().advance_watermark_to(\n 3,\n tag='main').add_elements(['a1'], tag='main').advance_watermark_to(\n 8, tag='main').add_elements(['a2'], tag='main').add_elements(\n [window.TimestampedValue(('k', 100), 2)], tag='side').\n add_elements([window.TimestampedValue(\n ('k', 400), 7)], tag='side').advance_watermark_to_infinity(\n tag='main').advance_watermark_to_infinity(tag='side'))\n\n main_data = (\n test_stream['main']\n | 'Main windowInto' >> beam.WindowInto(\n window.FixedWindows(5),\n accumulation_mode=trigger.AccumulationMode.DISCARDING))\n\n side_data = (\n test_stream['side']\n | 'Side windowInto' >> beam.WindowInto(\n window.FixedWindows(5),\n trigger=trigger.AfterWatermark(early=trigger.AfterCount(1)),\n accumulation_mode=trigger.AccumulationMode.DISCARDING)\n | beam.CombinePerKey(sum)\n | 'Values' >> Map(lambda k_vs: k_vs[1]))\n\n class RecordFn(beam.DoFn):\n def process(\n self,\n elm=beam.DoFn.ElementParam,\n ts=beam.DoFn.TimestampParam,\n side=beam.DoFn.SideInputParam):\n yield (elm, ts, side)\n\n records = (\n main_data\n | beam.ParDo(RecordFn(), beam.pvalue.AsList(side_data)))\n\n expected_window_to_elements = {\n window.IntervalWindow(0, 5): [\n ('a1', Timestamp(3), [100, 0]),\n ],\n window.IntervalWindow(5, 10): [('a2', Timestamp(8), [400, 0])],\n }\n\n assert_that(\n records,\n equal_to_per_window(expected_window_to_elements),\n use_global_window=False,\n label='assert per window')"
] | [
"0.7482561",
"0.66467416",
"0.6395901",
"0.63573223",
"0.6236432",
"0.62344116",
"0.621293",
"0.56327856",
"0.5514279",
"0.5450747",
"0.54114723",
"0.5390393",
"0.5366445",
"0.53125083",
"0.53047395",
"0.5300707",
"0.5293415",
"0.5252793",
"0.5236909",
"0.52050155",
"0.520145",
"0.5193504",
"0.51655185",
"0.51637554",
"0.5130889",
"0.5130762",
"0.5119399",
"0.51090604",
"0.5107973",
"0.5105322"
] | 0.7959238 | 0 |
Set the order attr to the selected one. | def set_order(self, order):
self.order = order | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def setOrder(self, order):\n\t\tself.orderInData = order",
"def order(self, order):\n self._order = order",
"def SetOrder(self, order):\n if self.__order != order:\n self.__order = order\n self.Modified()",
"def order(self, order):\n\n self._order = order",
"def order(self, order):\n\n self._order = order",
"def order(self, order):\n\n self._order = order",
"def set_document_order(self, order):\n self.set_value_into_input_field(self.order_text_field_locator, order)",
"def setOrder(self, *args):\n return _libsbml.CompartmentGlyph_setOrder(self, *args)",
"def setOrder(self, verbose = 1):\n\n self.order = np.arange(self.atoms.shape[0])\n if verbose > 0:\n string = \"Updated the saved order\"\n ut.infoPrint(string)",
"def changeOrder(self):\n order = self.orderSpinBox.value()\n nfilter = int(str(self.filterComboBox.currentText()))\n if order > nfilter - 2:\n order = nfilter - 2\n if order < 1:\n order = 1\n self.orderSpinBox.setValue(order)\n self.order = order",
"def set_task_order(self, order):\n for task in self.tasks:\n task.order = order",
"def set_order_weight(self):\n\n for child in self.all_children():\n for rule in self.options['ordering']:\n if child.lineage_test(rule):\n child.order_weight = rule['order']",
"def set_order_weight(self):\n\n for child in self.all_children():\n for rule in self.options['ordering']:\n if child.lineage_test(rule):\n child.order_weight = rule['order']",
"def update_order():",
"def update_order():",
"def order(self, order=0):\n # type: (int) -> Entity\n self.type_def['order'] = order\n\n return self",
"def get_selected_ordering(self):\n return self.request.GET.get(self.ordering_kwarg)",
"def order(self, order_id, symbol, **kwargs):\n pass",
"def set_bond_order(molecule, bond_index, bond_order):\n return molecule.SetBondOrder(bond_index, bond_order)",
"def OnReorder( self, event ):\n column = self.columns[event.GetColumn()]\n if column.sortOn:\n # multiple sorts for the click...\n columns = [ self.columnByAttribute( attr ) for attr in column.sortOn ]\n diff = [ (a,b) for a,b in zip( self.sortOrder, columns ) if b is not a[1]]\n if not diff:\n self.sortOrder[0] = (not self.sortOrder[0][0], column)\n else:\n self.sortOrder = [\n (c.defaultOrder,c) for c in columns \n ] + [ (a,b) for (a,b) in self.sortOrder if b not in columns]\n else:\n if column is self.sortOrder[0][1]:\n # reverse current major order\n self.sortOrder[0] = (not self.sortOrder[0][0], column)\n else:\n self.sortOrder = [(column.defaultOrder,column)] + [\n (a,b) \n for (a,b) in self.sortOrder if b is not column \n ]\n # TODO: store current selection and re-select after sorting...\n self.reorder()\n self.Refresh()",
"def update_order_property_setter(self, has_custom, fieldname):\n\t\tproperty_name = f\"{fieldname}_order\"\n\t\tif has_custom:\n\t\t\t# save the order of the actions and links\n\t\t\tself.make_property_setter(\n\t\t\t\tproperty_name, json.dumps([d.name for d in self.get(fieldname)]), \"Small Text\"\n\t\t\t)\n\t\telse:\n\t\t\tfrappe.db.delete(\"Property Setter\", dict(property=property_name, doc_type=self.doc_type))",
"def sort_order(self, sort_order):\n\n self._sort_order = sort_order",
"def order_id(self, order_id):\n\n self._order_id = order_id",
"def order_id(self, order_id):\n\n self._order_id = order_id",
"def order_id(self, order_id):\n\n self._order_id = order_id",
"def _setordering_customer_50A(self, val):\n self.swift_obj.OrderingCustomer_A = val\n self.swift_obj.OrderingCustomer_A.swiftTag = '50A'",
"def reorder(self, new_order):\n #TODO doesn't work probably CRA 3/2019\n for field in [\"atoms\", \"xyz\"]:\n self.__dict__[field] = self.__dict__[field][list(new_order)]\n self.atoms = [self.atoms[i] for i in new_order]",
"def set_window_order(self, order: int = 0) -> None:\n if order == 0:\n setval = QMdiArea.CreationOrder\n if order == 1:\n setval = QMdiArea.StackingOrder\n if order == 2:\n setval = QMdiArea.ActivationHistoryOrder\n\n self.setActivationOrder(setval)",
"def set_selected(self, selected):\n self.selected = selected",
"def _setordering_customer_50F(self, val):\n self.swift_obj.OrderingCustomer_F = val\n self.swift_obj.OrderingCustomer_F.swiftTag = '50F'"
] | [
"0.7143052",
"0.70097286",
"0.69960636",
"0.6928143",
"0.6928143",
"0.6928143",
"0.6850993",
"0.6164734",
"0.61367893",
"0.6133654",
"0.6111504",
"0.59993577",
"0.59993577",
"0.5986225",
"0.5986225",
"0.5852736",
"0.5828744",
"0.57500434",
"0.5696392",
"0.56741315",
"0.5641094",
"0.56319326",
"0.5609619",
"0.5609619",
"0.5609619",
"0.5598404",
"0.5579428",
"0.55569416",
"0.55165505",
"0.55025077"
] | 0.7511348 | 0 |
Gets the discriminator of this RecurrencePatternDto. | def discriminator(self) -> str:
return self.__class__.__name__ | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def discriminator(self) -> str:",
"def discriminator (self) -> tf.keras.Sequential:\n return self._discriminator",
"def discriminator(self) -> Any:\r\n return self._qda",
"def discriminator(self) -> Any:\r\n return self._lda",
"def discriminator(self) -> undefined.UndefinedOr[str]:",
"def remotediscriminator(self) :\n\t\ttry :\n\t\t\treturn self._remotediscriminator\n\t\texcept Exception as e:\n\t\t\traise e",
"def discriminator(self) -> Any:\r\n return None",
"def discriminator(self, discriminator: str):\n pass # setter is ignored for discriminator property",
"def discriminator(self, images): # pylint: disable=R0201\n return standard_discriminator(images)",
"def localdiscriminator(self) :\n\t\ttry :\n\t\t\treturn self._localdiscriminator\n\t\texcept Exception as e:\n\t\t\traise e",
"def get_real_child_model(self, data):\n discriminator_value = data[self.discriminator].lower()\n return self.discriminator_value_class_map.get(discriminator_value)",
"def discriminator(self):\n\n # Initializate the neural network\n discriminator = Sequential()\n\n # Convolution, bias, activate\n discriminator.add(Conv2D(filters=self.first_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform',\n input_shape=self.image_shape))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n # Convolution\n discriminator.add(Conv2D(filters=self.second_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n\n # Convolution\n discriminator.add(Conv2D(filters=self.third_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n # Convolution\n discriminator.add(Conv2D(filters=self.last_layer_size,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n\n discriminator.add(Flatten())\n discriminator.add(Dense(1))\n discriminator.add(Activation('sigmoid'))\n\n optimizer = Adam(lr=self.lr, beta_1=self.beta)\n discriminator.compile(loss=self.loss,\n optimizer=optimizer,\n metrics=None)\n\n return discriminator",
"def build_discriminator(self):\n with tf.variable_scope(\"discriminator\") as scope:\n\n # --- build the convolutional layers\n self.d_convlayers = list()\n mi = self.num_colors\n dim = self.img_dim\n count = 0\n for mo, filter_size, stride, apply_batch_norm in self.d_sizes['conv_layers']:\n name = f\"convlayer_{count}\" # name is used for get_variable later\n count += 1\n layer = ConvLayer(name, mi, mo, apply_batch_norm, filter_size, stride, lrelu)\n self.d_convlayers.append(layer)\n mi = mo\n print(f\"dim: {dim}\")\n # --- keep track of image dimensionality: need this for the first Dense layer\n dim = int(np.ceil(float(dim) / stride))\n\n # --- get the input dimensionalith for the first Dense layer\n mi = mi * dim * dim\n\n # --- build the dense layers\n self.d_denselayers = list()\n for mo, apply_batch_norm in self.d_sizes['dense_layers']:\n name = f\"denselayer_{count}\"\n count += 1\n layer = DenseLayer(name, mi, mo, apply_batch_norm, lrelu)\n mi = mo\n self.d_denselayers.append(layer)\n\n # --- final logistic regression layer (use it in the d_forward\n # function below to get the final logits)\n name = f\"denselayer_{count}\"\n self.d_finallayer = DenseLayer(name, mi, 1, False, lambda x: x)\n\n # --- get and return the logits\n logits = self.d_forward(self.X)\n return logits",
"def recurrence(self):\n if \"recurrence\" in self._prop_dict:\n if isinstance(self._prop_dict[\"recurrence\"], OneDriveObjectBase):\n return self._prop_dict[\"recurrence\"]\n else :\n self._prop_dict[\"recurrence\"] = PatternedRecurrence(self._prop_dict[\"recurrence\"])\n return self._prop_dict[\"recurrence\"]\n\n return None",
"def get_real_child_model(self, data):\n discriminator_key = self.attribute_map[self.discriminator]\n discriminator_value = data[discriminator_key]\n return self.discriminator_value_class_map.get(discriminator_value)",
"def get_real_child_model(self, data):\n discriminator_key = self.attribute_map[self.discriminator]\n discriminator_value = data[discriminator_key]\n return self.discriminator_value_class_map.get(discriminator_value)",
"def get_real_child_model(self, data):\n discriminator_key = self.attribute_map[self.discriminator]\n discriminator_value = data[discriminator_key]\n return self.discriminator_value_class_map.get(discriminator_value)",
"def get_real_child_model(self, data):\n discriminator_key = self.attribute_map[self.discriminator]\n discriminator_value = data[discriminator_key]\n return self.discriminator_value_class_map.get(discriminator_value)",
"def build_discriminator(self):\n # label input\n in_label = Input(shape=(1,))\n # embedding for categorical input\n li = Embedding(self.n_classes, 50)(in_label)\n # scale up to image dimensions with linear activation\n n_nodes = self.in_shape[0] * self.in_shape[1]\n li = Dense(n_nodes)(li)\n # reshape to additional channel\n li = Reshape((self.in_shape[0], self.in_shape[1], 1))(li)\n # image input\n in_image = Input(shape=self.in_shape)\n # concat label as a channel\n merge = Concatenate()([in_image, li])\n # downsample\n fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(merge)\n fe = LeakyReLU(alpha=0.2)(fe)\n # downsample\n fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(fe)\n fe = LeakyReLU(alpha=0.2)(fe)\n # flatten feature maps\n fe = Flatten()(fe)\n # dropout\n fe = Dropout(0.4)(fe)\n # output\n out_layer = Dense(1, activation='sigmoid')(fe)\n # define model\n self.d_model = Model([in_image, in_label], out_layer)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n self.d_model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])",
"def build_discriminator(self):\n\n def d_block(layer_input, filters, strides=1, bn=True):\n\n d = tf.keras.layers.Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input)\n if bn:\n d = tf.keras.layers.BatchNormalization(momentum=0.8)(d)\n d = tf.keras.layers.LeakyReLU(alpha=0.2)(d)\n \n return d\n\n # Input img\n d0 = tf.keras.layers.Input(shape=self.hr_shape)\n\n d1 = d_block(d0, self.df, bn=False)\n d2 = d_block(d1, self.df, strides=2)\n d3 = d_block(d2, self.df)\n d4 = d_block(d3, self.df, strides=2)\n d5 = d_block(d4, self.df * 2)\n d6 = d_block(d5, self.df * 2, strides=2)\n d7 = d_block(d6, self.df * 2)\n d8 = d_block(d7, self.df * 2, strides=2)\n\n validity = tf.keras.layers.Conv2D(1, kernel_size=1, strides=1, activation='sigmoid', padding='same')(d8)\n\n return tf.keras.models.Model(d0, validity)",
"def subtype(self):\n\n return self.__subtype",
"def patron_cls(self):\n return self.record_class_by_pid_type(PATRON_PID_TYPE)",
"def make_discriminator():\n constraint_shape = Params.environment.constraint_shape()\n solution_shape = Params.environment.solution_shape()\n joint_shape = constraint_shape[:]\n joint_shape[0] += solution_shape[0]\n\n constraint_input = placeholder_node(\"constraint_input\", constraint_shape, 1)\n solution_input = placeholder_node(\"solution_input\", solution_shape, 1)\n joint_input = tf.concat([constraint_input, solution_input], 1)\n return (\n constraint_input,\n solution_input,\n FeedforwardNetwork(\n name=\"artificial_discriminator\",\n session=Params.session,\n input_shape=joint_shape,\n layer_shapes=Params.internal_layer_shapes + [[1]],\n activations=Params.activation,\n input_node=joint_input,\n save_location=Params.save_location,\n ),\n )",
"def discriminator():\n\n # img = Input(shape=(28, 28, 1))\n # validity = ident(img)\n\n model = Model(img, validity)\n\n model.compile(loss=\"binary_crossentropy\", optimizer=op1,\n metrics=['accuracy'])\n\n # model.summary()\n\n return model",
"def discriminator(self, inpt, reuse, is_train):\n with tf.variable_scope(\"discriminator\"):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n net = conv2d(x=inpt, num_kernels=self.d_init, name=\"conv1\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = conv2d(x=net, num_kernels=self.d_init*2, name=\"conv2\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = conv2d(x=net, num_kernels=self.d_init*4, name=\"conv3\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = conv2d(x=net, num_kernels=self.d_init*8, name=\"conv4\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = dense_layer(x=net, num_neurons=1, name=\"output\", activation=tf.identity, is_train=is_train,\n stddv=self.stddv)\n return net",
"def __discriminator(self, inp, reuse_variables=None):\n with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables) as scope:\n nodes_input = 1\n for i in range(len(self.arch_D)):\n nodes_output = self.arch_D[i]\n inp = fc_layer(inp, nodes_input, nodes_output, 'D_' + str(i + 1) + '_')\n nodes_input = self.arch_D[i]\n\n return fc_layer(inp, self.arch_D[-1], 2,\n 'D_end_',\n final_layer=True)",
"def build_discriminator(shape):\n input_img = Input(shape=(shape)) \n x = Conv2D(64, (3, 3), padding='same')(input_img)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(8, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Dropout(0.25)(x)\n x = BatchNormalization(momentum=0.8)(x)\n x = AveragePooling2D((2, 2), padding='same')(x)\n x = Conv2D(1, (3, 3), padding='same')(x)\n x = LeakyReLU()(x)\n x = Flatten()(x)\n o = Dense(1,activation='sigmoid')(x)\n Discriminator=Model(input_img,o,name='discriminator')\n return input_img,o,Discriminator",
"def discriminant(self):\r\n return self.__b**2 - (4 * self.__a * self.__c)",
"def _define_discriminator_loss(self):\n real_d_loss = tf.reduce_mean(self._real_discriminator_out)\n real_d_loss = tf.negative(real_d_loss, name='real_discriminator_loss')\n gen_d_loss = tf.reduce_mean(self._gen_discriminator_out,\n name='gen_discriminator_loss')\n return tf.add(real_d_loss, gen_d_loss, name='discrminator_loss')",
"def _discriminator(self, x, reuse=False):\n with tf.variable_scope(\"discriminator\", reuse=reuse) as scope:\n layer_1= tf.contrib.slim.fully_connected(inputs = x, num_outputs = 151, activation_fn = tf.nn.relu)\n layer_2 = tf.contrib.slim.fully_connected(inputs = layer_1, num_outputs = 71,activation_fn = tf.nn.relu)\n y = tf.contrib.slim.fully_connected(inputs = layer_2, num_outputs = 1,activation_fn = None)\n print('y shape', tf.shape(y))\n return y"
] | [
"0.6453223",
"0.64276195",
"0.6355683",
"0.6312236",
"0.6309491",
"0.62741375",
"0.6186774",
"0.6178921",
"0.61462843",
"0.6067946",
"0.5821017",
"0.56907356",
"0.5532988",
"0.5486713",
"0.54712415",
"0.54712415",
"0.54712415",
"0.54712415",
"0.5469352",
"0.5446628",
"0.5388884",
"0.5363971",
"0.53541994",
"0.5241632",
"0.5194335",
"0.51620954",
"0.5096498",
"0.50869566",
"0.5070975",
"0.50346506"
] | 0.71240664 | 0 |
Sets the discriminator of this RecurrencePatternDto. | def discriminator(self, discriminator: str):
pass # setter is ignored for discriminator property | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def discriminator(self, images): # pylint: disable=R0201\n return standard_discriminator(images)",
"def discriminator(self) -> str:\n return self.__class__.__name__",
"def discriminator(self) -> str:",
"def discriminator (self) -> tf.keras.Sequential:\n return self._discriminator",
"def discriminator(self) -> Any:\r\n return None",
"def discriminator(self) -> undefined.UndefinedOr[str]:",
"def discriminator(self):\n\n # Initializate the neural network\n discriminator = Sequential()\n\n # Convolution, bias, activate\n discriminator.add(Conv2D(filters=self.first_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform',\n input_shape=self.image_shape))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n # Convolution\n discriminator.add(Conv2D(filters=self.second_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n\n # Convolution\n discriminator.add(Conv2D(filters=self.third_layer_filter,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n # Convolution\n discriminator.add(Conv2D(filters=self.last_layer_size,\n kernel_size=self.kernel_size,\n strides=self.stride_size,\n padding='same',\n data_format='channels_last',\n kernel_initializer='glorot_uniform'))\n # Normalize\n discriminator.add(BatchNormalization(momentum=0.5, epsilon=1e-5))\n # Activate\n discriminator.add(LeakyReLU(0.2))\n\n discriminator.add(Flatten())\n discriminator.add(Dense(1))\n discriminator.add(Activation('sigmoid'))\n\n optimizer = Adam(lr=self.lr, beta_1=self.beta)\n discriminator.compile(loss=self.loss,\n optimizer=optimizer,\n metrics=None)\n\n return discriminator",
"def discriminator(self) -> Any:\r\n return self._qda",
"def discriminator(self) -> Any:\r\n return self._lda",
"def make_discriminator():\n constraint_shape = Params.environment.constraint_shape()\n solution_shape = Params.environment.solution_shape()\n joint_shape = constraint_shape[:]\n joint_shape[0] += solution_shape[0]\n\n constraint_input = placeholder_node(\"constraint_input\", constraint_shape, 1)\n solution_input = placeholder_node(\"solution_input\", solution_shape, 1)\n joint_input = tf.concat([constraint_input, solution_input], 1)\n return (\n constraint_input,\n solution_input,\n FeedforwardNetwork(\n name=\"artificial_discriminator\",\n session=Params.session,\n input_shape=joint_shape,\n layer_shapes=Params.internal_layer_shapes + [[1]],\n activations=Params.activation,\n input_node=joint_input,\n save_location=Params.save_location,\n ),\n )",
"def remotediscriminator(self) :\n\t\ttry :\n\t\t\treturn self._remotediscriminator\n\t\texcept Exception as e:\n\t\t\traise e",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> DelegatedAdminRelationshipRequest:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return DelegatedAdminRelationshipRequest()",
"def build_discriminator(self):\n with tf.variable_scope(\"discriminator\") as scope:\n\n # --- build the convolutional layers\n self.d_convlayers = list()\n mi = self.num_colors\n dim = self.img_dim\n count = 0\n for mo, filter_size, stride, apply_batch_norm in self.d_sizes['conv_layers']:\n name = f\"convlayer_{count}\" # name is used for get_variable later\n count += 1\n layer = ConvLayer(name, mi, mo, apply_batch_norm, filter_size, stride, lrelu)\n self.d_convlayers.append(layer)\n mi = mo\n print(f\"dim: {dim}\")\n # --- keep track of image dimensionality: need this for the first Dense layer\n dim = int(np.ceil(float(dim) / stride))\n\n # --- get the input dimensionalith for the first Dense layer\n mi = mi * dim * dim\n\n # --- build the dense layers\n self.d_denselayers = list()\n for mo, apply_batch_norm in self.d_sizes['dense_layers']:\n name = f\"denselayer_{count}\"\n count += 1\n layer = DenseLayer(name, mi, mo, apply_batch_norm, lrelu)\n mi = mo\n self.d_denselayers.append(layer)\n\n # --- final logistic regression layer (use it in the d_forward\n # function below to get the final logits)\n name = f\"denselayer_{count}\"\n self.d_finallayer = DenseLayer(name, mi, 1, False, lambda x: x)\n\n # --- get and return the logits\n logits = self.d_forward(self.X)\n return logits",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> SynchronizationRule:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return SynchronizationRule()",
"def discriminator(self, inpt, reuse, is_train):\n with tf.variable_scope(\"discriminator\"):\n if reuse:\n tf.get_variable_scope().reuse_variables()\n net = conv2d(x=inpt, num_kernels=self.d_init, name=\"conv1\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = conv2d(x=net, num_kernels=self.d_init*2, name=\"conv2\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = conv2d(x=net, num_kernels=self.d_init*4, name=\"conv3\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = conv2d(x=net, num_kernels=self.d_init*8, name=\"conv4\", activation=lkrelu, padding=\"SAME\",\n alpha=0.02, is_train=is_train, stddv=self.stddv)\n net = dense_layer(x=net, num_neurons=1, name=\"output\", activation=tf.identity, is_train=is_train,\n stddv=self.stddv)\n return net",
"def build_discriminator(self):\n\n def d_block(layer_input, filters, strides=1, bn=True):\n\n d = tf.keras.layers.Conv2D(filters, kernel_size=3, strides=strides, padding='same')(layer_input)\n if bn:\n d = tf.keras.layers.BatchNormalization(momentum=0.8)(d)\n d = tf.keras.layers.LeakyReLU(alpha=0.2)(d)\n \n return d\n\n # Input img\n d0 = tf.keras.layers.Input(shape=self.hr_shape)\n\n d1 = d_block(d0, self.df, bn=False)\n d2 = d_block(d1, self.df, strides=2)\n d3 = d_block(d2, self.df)\n d4 = d_block(d3, self.df, strides=2)\n d5 = d_block(d4, self.df * 2)\n d6 = d_block(d5, self.df * 2, strides=2)\n d7 = d_block(d6, self.df * 2)\n d8 = d_block(d7, self.df * 2, strides=2)\n\n validity = tf.keras.layers.Conv2D(1, kernel_size=1, strides=1, activation='sigmoid', padding='same')(d8)\n\n return tf.keras.models.Model(d0, validity)",
"def build_discriminator(self):\n # label input\n in_label = Input(shape=(1,))\n # embedding for categorical input\n li = Embedding(self.n_classes, 50)(in_label)\n # scale up to image dimensions with linear activation\n n_nodes = self.in_shape[0] * self.in_shape[1]\n li = Dense(n_nodes)(li)\n # reshape to additional channel\n li = Reshape((self.in_shape[0], self.in_shape[1], 1))(li)\n # image input\n in_image = Input(shape=self.in_shape)\n # concat label as a channel\n merge = Concatenate()([in_image, li])\n # downsample\n fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(merge)\n fe = LeakyReLU(alpha=0.2)(fe)\n # downsample\n fe = Conv2D(128, (3,3), strides=(2,2), padding='same')(fe)\n fe = LeakyReLU(alpha=0.2)(fe)\n # flatten feature maps\n fe = Flatten()(fe)\n # dropout\n fe = Dropout(0.4)(fe)\n # output\n out_layer = Dense(1, activation='sigmoid')(fe)\n # define model\n self.d_model = Model([in_image, in_label], out_layer)\n # compile model\n opt = Adam(lr=0.0002, beta_1=0.5)\n self.d_model.compile(loss='binary_crossentropy', optimizer=opt, metrics=['accuracy'])",
"def train_discriminator(gan, generator, discriminator, x_train, batch_size):\n # Get a random set of input noise.\n noise = get_noise(batch_size)\n\n # Generate fake MNIST images.\n generated_images = generator.predict(noise)\n # Get a random set of images from the actual MNIST dataset.\n image_batch = x_train[np.random.randint(0, x_train.shape[0], size=batch_size)]\n # Put them together in a single vector(list).\n X = np.concatenate([image_batch, generated_images])\n\n # Generate 0.0 (fake) for the whole vector.\n Y = np.zeros(2*batch_size)\n # Label real images correctly as 1.0.\n Y[:batch_size] = 1.0\n\n discriminator.trainable = True\n discriminator.train_on_batch(X, Y)",
"async def put_missing_discriminator(\n self, complex_body: JSON, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> JSON:",
"async def put_missing_discriminator(\n self, complex_body: IO, *, content_type: str = \"application/json\", **kwargs: Any\n ) -> JSON:",
"def __discriminator(self, inp, reuse_variables=None):\n with tf.variable_scope(tf.get_variable_scope(), reuse=reuse_variables) as scope:\n nodes_input = 1\n for i in range(len(self.arch_D)):\n nodes_output = self.arch_D[i]\n inp = fc_layer(inp, nodes_input, nodes_output, 'D_' + str(i + 1) + '_')\n nodes_input = self.arch_D[i]\n\n return fc_layer(inp, self.arch_D[-1], 2,\n 'D_end_',\n final_layer=True)",
"def train_mixture_discriminator(self, opts, fake_images):\n with self._session.as_default(), self._session.graph.as_default():\n return self._train_mixture_discriminator_internal(opts, fake_images)",
"def train_batch_discriminator(self, real_lb, real_ul, epoch_loss):\n X_L, X_M = real_lb\n X_U = real_ul\n\n # When reaching the end of the array, the array size might be less than the true batch size\n batch_size = np.min([X_L.shape[0], X_U.shape[0]])\n\n if batch_size < X_M.shape[0]:\n idx = np.random.choice(X_M.shape[0], size=batch_size, replace=False)\n X_M = np.array([X_M[i] for i in idx])\n\n X = self.sample_X(X_L, X_U, size=batch_size)\n fake_M, Z = self.sdnet.Decomposer.predict(X)\n fake_X = self.sdnet.Reconstructor.predict([X_M, Z])\n\n # Pool of fake images. Using one pool regularises the Mask discriminator in the first epochs.\n self.fake_mask_pool, fake_M = self.get_fake(fake_M, self.fake_image_pool, size=batch_size)\n self.fake_image_pool, fake_X = self.get_fake(fake_X, self.fake_image_pool, size=batch_size)\n\n # If we have a pool of other images use some of it for real examples\n if self.other_masks is not None:\n M_other = next(self.other_masks)\n X_M = data_utils.sample(np.concatenate([X_M, M_other], axis=0), batch_size)\n\n # Train Discriminator\n zeros = np.zeros((X_M.shape[0],) + self.sdnet.D_model.output_shape[0][1:])\n ones = np.ones(zeros.shape)\n\n x = [X_M, fake_M, X, fake_X]\n y = [zeros, ones, zeros, ones]\n _, D_loss_real_M, D_loss_fake_M, D_loss_real_X, D_loss_fake_X = self.sdnet.D_model.train_on_batch(x, y)\n epoch_loss['dis_M'].append(np.mean([D_loss_real_M, D_loss_fake_M]))\n epoch_loss['dis_X'].append(np.mean([D_loss_real_X, D_loss_fake_X]))",
"def __init__(self):\n super(Discriminator, self).__init__()\n\n # Use stride in convolutions to downsample image to size 1\n\n # Using BatchNorm2d 0.8 for stability based on reading of https://github.com/eriklindernoren/PyTorch-GAN code\n layers = [nn.Conv2d(512, 1, kernel_size=4, stride=1, padding=0, bias=True),\n nn.Flatten(), nn.Sigmoid()]\n for i in range(3):\n out_chans = int(512 / (2 ** i))\n in_chans = int(out_chans / 2)\n layers.insert(0, nn.LeakyReLU(0.2, inplace=True))\n layers.insert(0, nn.BatchNorm2d(out_chans, 0.8))\n layers.insert(0, nn.Conv2d(in_chans, out_chans, kernel_size=4, stride=2, padding=1, bias=False))\n layers.insert(0, nn.LeakyReLU(0.2, inplace=True))\n layers.insert(0, nn.BatchNorm2d(64, 0.8))\n layers.insert(0, nn.Conv2d(3, 64, kernel_size=4, stride=2, padding=1, bias=False))\n print(layers)\n self.network = nn.Sequential(*layers)",
"def disresnet101(**kwargs):\n return Discriminator(resnetblocks.DresNetBottleneck, [3, 4, 23, 3], **kwargs)",
"def create_from_discriminator_value(parse_node: Optional[ParseNode] = None) -> SchemaExtension:\n if not parse_node:\n raise TypeError(\"parse_node cannot be null.\")\n return SchemaExtension()",
"def discriminator_block(self, name):\n\n if self.fit_mask : \n \n inputs = Input(shape=(2+self.nb_classe_mask, self.image_row, self.image_column, self.image_depth), name='dis_input')\n else :\n # In:\n inputs = Input(shape=(2, self.image_row, self.image_column, self.image_depth), name='dis_input')\n\n # Input 64\n disnet = Conv3D(self.discriminator_kernel * 1, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_1')(inputs)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 1 : 32\n disnet = Conv3D(self.discriminator_kernel * 2, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_2')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 2 : 16\n disnet = Conv3D(self.discriminator_kernel * 4, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_3')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 3 : 8\n disnet = Conv3D(self.discriminator_kernel * 8, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_4')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Hidden 4 : 4\n disnet = Conv3D(self.discriminator_kernel * 16, 4, strides=2,\n padding='same',\n kernel_initializer='he_normal',\n data_format='channels_first',\n name=name + '_conv_dis_5')(disnet)\n disnet = LeakyReLU(0.01)(disnet)\n\n # Decision : 2\n decision = Conv3D(1, 2, strides=1,\n use_bias=False,\n kernel_initializer='he_normal',\n data_format='channels_first',\n name='dis_decision')(disnet)\n\n decision = Reshape((1,))(decision)\n\n model = Model(inputs=[inputs], outputs=[decision], name=name)\n return model",
"def discriminator():\n\n # img = Input(shape=(28, 28, 1))\n # validity = ident(img)\n\n model = Model(img, validity)\n\n model.compile(loss=\"binary_crossentropy\", optimizer=op1,\n metrics=['accuracy'])\n\n # model.summary()\n\n return model",
"def train_discriminator(self, real_data, fake_data):\n self.d_optimizer.zero_grad()\n\n prediction_r = self.discriminator(real_data)\n target = torch.ones(real_data.size(0), 1)\n if self.label_smooth: target = .9 * target\n error_r = self.loss_function(prediction_r, target) # real\n error_r.backward()\n\n prediction_f = self.discriminator(fake_data)\n error_f = self.loss_function(prediction_f, torch.zeros(fake_data.size(0), 1)) # fake\n error_f.backward()\n\n self.d_optimizer.step()\n\n return error_r + error_f",
"def discriminator_model():\n\n Discriminator = Sequential(name='Discriminator')\n\n # Downsampling : 32x32x3 --> 16x16x64\n Discriminator.add(Conv2D(filters=64, kernel_size=(5, 5), strides=2, padding='same', \n kernel_initializer=RandomNormal(stddev=GAUSS_SD), \n input_shape=DISCRIMINATOR_INPUT))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 16x16x64 --> 8x8x128\n Discriminator.add(Conv2D(filters=128, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 8x8x128 --> 4x4x256\n Discriminator.add(Conv2D(filters=128, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Downsampling : 4x4x256 --> 2x2x512\n Discriminator.add(Conv2D(filters=512, kernel_size=(5, 5), strides=2, padding='same'))\n Discriminator.add(BatchNormalization(momentum=MOMENTUM))\n Discriminator.add(LeakyReLU(ALPHA))\n\n # Fully Connected Layer (classifier) , 2x2x512 (2048) --> 1\n Discriminator.add(Flatten())\n Discriminator.add(Dropout(DROPOUT))\n Discriminator.add(Dense(1))\n\n return Discriminator"
] | [
"0.5866368",
"0.56900775",
"0.5577675",
"0.5529219",
"0.54537046",
"0.5423825",
"0.5362164",
"0.51858824",
"0.5153772",
"0.51449656",
"0.5103829",
"0.508909",
"0.50875735",
"0.50779355",
"0.5076143",
"0.5073615",
"0.5065901",
"0.4970589",
"0.49438852",
"0.488279",
"0.47471884",
"0.47364596",
"0.47281617",
"0.47224152",
"0.4697664",
"0.46867192",
"0.46788335",
"0.4675371",
"0.46627414",
"0.46329397"
] | 0.7696652 | 0 |
Returns True if the test object "obj" matches the prototype object "proto". If obj and proto are mappings, obj matches proto if (key in obj) and (obj[key] matches proto[key]) for every key in proto. If obj and proto are sequences, obj matches proto if they are of the same length and (a matches b) for every (a,b) in zip(obj, proto). Otherwise, obj matches proto if obj == proto. | def matches(obj, proto):
if isinstance(obj, Mapping):
if not isinstance(proto, Mapping):
return False
return all((key in obj and matches(obj[key], val)) for key, val in proto.items())
if isinstance(obj, Sequence) and not isinstance(obj, str):
if not (isinstance(proto, Sequence) and not isinstance(proto, str)):
return False
if len(obj) != len(proto):
return False
return all(matches(obj[index], val) for index, val in enumerate(proto))
return obj == proto | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def equals(self, obj):\n if obj == None:\n return False\n if not (isinstance(obj, (SentenceForm, ))):\n return False\n o = obj\n if self.__name__ != o.__name__:\n return False\n if self.getTupleSize() != o.getTupleSize():\n return False\n return o.matches(self.underscoreSentence.get())",
"def is_protobuf(schema_obj):\n\n if isinstance(schema_obj, schema.Field):\n if schema_obj.data_type == schema.Field.DataType.ENUM:\n return is_protobuf(schema_obj.enum_type)\n elif schema_obj.data_type == schema.Field.DataType.STRUCT:\n return is_protobuf(schema_obj.struct_type)\n else:\n protobuf_prefixes = ('google.protobuf.',)\n return schema_obj.full_name.startswith(protobuf_prefixes)",
"def equals(self, obj: object) -> bool:\n ...",
"def check_encapsulated(obj_type, first_obj, second_obj, db):\n if obj_type == 'network':\n # the indexing is to get the list of networks out of the tuple[1] and\n # list[0] returned by get_nets\n first = get_nets([first_obj], db)[0][1]\n second = get_nets([second_obj], db)[0][1]\n\n elif obj_type == 'service':\n first = get_ports([first_obj], db)[0][1]\n second = get_ports([second_obj], db)[0][1]\n else:\n raise ValueError(\"check_encapsulated() currently only supports \"\n \"'network' and 'service' for the obj_type parameter\")\n # iterates over each object in the first group, and then each obj in the\n # second group, making sure each one in the first is contained\n # somewhere in the second.\n for obj in first:\n for sec_obj in second:\n if obj.version == sec_obj.version:\n if obj.subnet_of(sec_obj):\n break\n # if we got through every object in the second group, and didn't have\n # a match, then the first group is not entirely contained.\n else:\n return False\n # if we got here, then the group was fully contained.\n return True",
"def subkeys(obj, proto):\n if not (isinstance(obj, Mapping) and isinstance(proto, Mapping)):\n return obj\n\n new_obj = {}\n for key, value in obj.items():\n if key not in proto:\n new_obj[key] = value\n continue\n\n if matches(value, proto[key]) and matches(proto[key], value):\n continue\n\n if isinstance(value, Mapping):\n new_obj[key] = subkeys(value, proto[key])\n continue\n\n new_obj[key] = value\n\n return new_obj",
"def isimplementation(cls_: Optional[Type[Any]], proto: Type[Any]) -> bool:\n if cls_ is None:\n return False\n\n proto_annotations = get_type_hints(proto)\n cls_annotations = get_type_hints(cls_)\n\n for attr in _get_protocol_attrs(proto):\n try:\n proto_concrete = getattr(proto, attr)\n cls_concrete = getattr(cls_, attr)\n except AttributeError:\n proto_concrete = proto_annotations.get(attr)\n cls_concrete = cls_annotations.get(attr)\n\n if cls_concrete is None:\n return False\n\n if isfunction(proto_concrete):\n if not func_satisfies(cls_concrete, proto_concrete):\n return False\n\n continue\n\n if cls_concrete != proto_concrete:\n return False\n\n return True",
"def _matches(o, pattern):\n if not len(o) == len(pattern):\n return False\n comps = zip(o,pattern)\n return all(isinstance(obj,kind) for obj,kind in comps)",
"def supports_protocol(self, obj, protocol):\n\n return self.adapt(obj, protocol, None) is not None",
"def looks_like_mapping(obj):\n meths = (\"items\", \"keys\", \"values\")\n for meth in meths:\n if not callable(getattr(obj, meth, None)):\n return False\n return True",
"def __eq__(self, obj):\r\n\r\n #Types must be the same\r\n if type(self) != type(obj):\r\n return False\r\n\r\n #The number of players must be the same\r\n if len(self.players) != len(obj.players):\r\n return False\r\n\r\n #Players must be the same\r\n for i in range(len(self.players)):\r\n if self.players[i] != obj.players[i]:\r\n return False\r\n\r\n #The number to win must be the same \r\n if self.num_to_win != obj.num_to_win:\r\n return False\r\n\r\n #The turn number must be the same\r\n if self.turn_number != obj.turn_number:\r\n return False\r\n\r\n #The max turn numbers must be the same\r\n if self.max_turns != obj.max_turns:\r\n return False\r\n\r\n #The winner must be the same\r\n if self.winner != obj.winner:\r\n return False\r\n\r\n #The current board must be the same\r\n if self.board != obj.board:\r\n return False\r\n\r\n #The board histories must be the same length\r\n if len(self.board_history) != len(obj.board_history):\r\n return False\r\n\r\n #The histories must be the same\r\n for i in range(len(self.board_history)):\r\n if self.board_history[i] != obj.board_history[i]:\r\n return False\r\n\r\n #If all these conditions are met then we return true\r\n return True",
"def __is_hard_match(self, obj):\n for attr in self.list:\n try:\n if getattr(obj, attr) != getattr(self, attr):\n return False\n except AttributeError:\n pass\n return True",
"def __eq__(self, p_object):\n if any([self[i] != p_object[i] for i in range(9)]):\n return False\n return True",
"def is_common(schema_obj):\n\n return is_protobuf(schema_obj) or is_wdl(schema_obj)",
"def supports_protocol(obj, protocol):\n manager = get_global_adaptation_manager()\n return manager.supports_protocol(obj, protocol)",
"def __eq__(self, obj: \"Property\") -> bool:\n return self.name == obj.name and self.property_type == obj.property_type",
"def array_equal_to(obj):\n return ArrayIsEqual(obj)",
"def pod_equals(x, y):\n return type(x) == type(y) and x.__dict__ == y.__dict__",
"def is_verifiable(obj):\n from amelie.claudia.models import Mapping\n\n if isinstance(obj, Mapping):\n return True\n\n for k in Mapping.RELATED_CLASSES:\n if isinstance(obj, Mapping.RELATED_CLASSES[k]):\n return True\n\n return False",
"def __eq__(self, vs) -> bool:\n return {*map(tuple, self.__elements)} == {*map(tuple, vs)}",
"def attr_is_equal(first_obj, second_obj, attr):\n import numpy as np\n\n # Avoid comparing None's.\n return attr_has_same_shape(first_obj, second_obj, attr) and np.array_equal(\n getattr(first_obj, attr), getattr(second_obj, attr)\n )",
"def has_exactly_type(obj, tpe):\r\n return type(obj) == tpe",
"def soft_assert_objects_are_mapped(\n selenium, soft_assert, src_obj, objs, *args, **kwargs\n):\n ui_service_cls = factory.get_cls_webui_service(\n objects.get_plural(objs[0].type))\n mapped_objs = (ui_service_cls(driver=selenium, *args, **kwargs).\n get_list_objs_from_tree_view(src_obj=src_obj))\n soft_assert.expect(\n [obj.tree_item_representation() for obj in objs] == mapped_objs,\n messages.AssertionMessages.OBJS_SHOULD_BE_MAPPED_TO_OBJ.format(\n mapped_objs_names=[obj.title for obj in objs],\n src_obj_name=src_obj.title))",
"def __eq__(self, obj):\n if not obj:\n return False\n\n return str(self.key()) == str(obj.key())",
"def applies(cls, obj):\n return type(obj) in cls.types",
"def test_convert_proto_plus_to_protobuf_if_protobuf(self):\n protobuf = ProtobufFixture()\n converted = util.convert_proto_plus_to_protobuf(protobuf)\n self.assertEqual(protobuf, converted)",
"def contains(item, obj):\n return obj.__contains__(item)",
"def _is_instance_of(obj: dict, geojson_type: str) -> bool:\n try:\n schema_name = next(t + '.json' for t in GEOJSON_TYPES\n if t.lower() == geojson_type.lower())\n except StopIteration:\n raise GeoJSONError(f'Specified geojson_type ({geojson_type}) does '\n 'not match a supported GeoJSON type.')\n\n filename = DATA_DIR / schema_name\n with open(filename, 'r') as src:\n schema = json.load(src)\n\n return Draft7Validator(schema).is_valid(obj)",
"def __cmp__(self, obj): \n # If obj is an Address, stringifying it puts it in a state where it\n # can be parsed by IP().\n other = IP(str(obj))\n\n # Compare IPs by byte representation.\n if self.family == other.family:\n return cmp(self._bytes, other.toBytes())\n else:\n return cmp(self.family, other.family)",
"def __eq__(self, obj):\n if isinstance(obj, SearchProblem):\n return hash(str(self)) == hash(str(obj))\n return False",
"def _match_all(self, obj, criteria):\n return all(getattr(obj, key_, None) == value_ for\n key_, value_ in criteria.items())"
] | [
"0.5698699",
"0.5630985",
"0.539721",
"0.52875316",
"0.51983243",
"0.5193745",
"0.5188951",
"0.51687074",
"0.5071189",
"0.5066488",
"0.5043099",
"0.5028768",
"0.5018447",
"0.49935037",
"0.49558058",
"0.4921788",
"0.49159846",
"0.4899973",
"0.48826578",
"0.48544973",
"0.4847865",
"0.48296016",
"0.48199898",
"0.47790623",
"0.4753069",
"0.47408903",
"0.47323993",
"0.4728276",
"0.4706957",
"0.46980193"
] | 0.82880807 | 0 |
Returns the test mapping "obj" after factoring out the items it has in common with the prototype mapping "proto". Consider a recursive merge operation, merge(a, b) on mappings a and b, that returns a mapping, m, whose keys are the union of the keys of a and b, and | def subkeys(obj, proto):
if not (isinstance(obj, Mapping) and isinstance(proto, Mapping)):
return obj
new_obj = {}
for key, value in obj.items():
if key not in proto:
new_obj[key] = value
continue
if matches(value, proto[key]) and matches(proto[key], value):
continue
if isinstance(value, Mapping):
new_obj[key] = subkeys(value, proto[key])
continue
new_obj[key] = value
return new_obj | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def matches(obj, proto):\n if isinstance(obj, Mapping):\n if not isinstance(proto, Mapping):\n return False\n\n return all((key in obj and matches(obj[key], val)) for key, val in proto.items())\n\n if isinstance(obj, Sequence) and not isinstance(obj, str):\n\n if not (isinstance(proto, Sequence) and not isinstance(proto, str)):\n return False\n\n if len(obj) != len(proto):\n return False\n\n return all(matches(obj[index], val) for index, val in enumerate(proto))\n\n return obj == proto",
"def forward_map(\n self, obj: CombinatorialObjectType\n ) -> Tuple[Optional[CombinatorialObjectType], ...]:\n return self.strategy.forward_map(self.comb_class, obj, self.children)",
"def soft_assert_objects_are_mapped(\n selenium, soft_assert, src_obj, objs, *args, **kwargs\n):\n ui_service_cls = factory.get_cls_webui_service(\n objects.get_plural(objs[0].type))\n mapped_objs = (ui_service_cls(driver=selenium, *args, **kwargs).\n get_list_objs_from_tree_view(src_obj=src_obj))\n soft_assert.expect(\n [obj.tree_item_representation() for obj in objs] == mapped_objs,\n messages.AssertionMessages.OBJS_SHOULD_BE_MAPPED_TO_OBJ.format(\n mapped_objs_names=[obj.title for obj in objs],\n src_obj_name=src_obj.title))",
"def split_batch_with_lookup(self, sub: torch.tensor, pra: torch.tensor,\n obj: torch.tensor) -> dict():\n model_batches = dict()\n\n for (model_name, _) in list(self.models.items()):\n model_batches[model_name] = dict()\n mask = [self.lookup[elem] == model_name for elem in pra.tolist()]\n model_batches[model_name]['s'] = sub[mask]\n model_batches[model_name]['p'] = pra[mask]\n model_batches[model_name]['o'] = obj[mask]\n\n return model_batches",
"def get_mapper(obj, *, expected=None):\n try:\n mapper = object.__getattribute__(obj, MAPPER)\n except AttributeError:\n mapper = None\n\n if mapper and expected is False:\n msg = \"{!r} is already mapped\".format(obj)\n raise TypeError(msg)\n\n if not mapper and expected is True:\n msg = \"{!r} is not mapped\".format(obj)\n raise TypeError(msg)\n\n return mapper",
"def object_from_protobuf(pb, model_type=None):\n key = None\n if isinstance(pb, entity_pb2.Entity):\n pb = pb._pb\n\n if pb.HasField(\"key\"): # Message field (Key)\n key = CustomIterator.key_from_protobuf(pb.key)\n key._type = SubclassMap.get()[key.kind]\n\n entity_props = {}\n\n for prop_name, value_pb in pb.properties.items():\n value = CustomIterator._get_value_from_value_pb(value_pb)\n entity_props[prop_name] = value\n\n obj = model_type._dotted_dict_to_object(entity_props, key)\n return obj",
"def _merge_obj(result, obj, pointer=''): # changed code\n if not isinstance(result, dict):\n result = {}\n\n if not isinstance(obj, dict):\n return obj\n\n for key, value in obj.items():\n if isinstance(value, dict):\n target = result.get(key)\n if isinstance(target, dict):\n _merge_obj(target, value, pointer=f'{pointer}/{key}') # changed code\n continue\n result[key] = {}\n _merge_obj(result[key], value, pointer=f'{pointer}/{key}') # changed code\n continue\n\n # new code\n if key in result:\n pointer_and_key = f'{pointer}/{key}'\n # Exceptions.\n if (value is None and pointer_and_key == '/definitions/Milestone/properties/documents/deprecated' and\n repo_name in ('ocds_milestone_documents_extension', 'public-private-partnerships')):\n warnings.warn(f're-adds {pointer}')\n elif (value == [] and pointer_and_key == '/required' and\n repo_name == 'ocds_pagination_extension'):\n warnings.warn(f'empties {pointer_and_key}')\n else:\n if is_profile:\n message = ' - check for repeats across extension_versions.json, dependencies, testDependencies'\n else:\n message = ''\n raise Exception(f'unexpectedly overwrites {pointer_and_key}{message}')\n\n if value is None:\n result.pop(key, None)\n continue\n result[key] = value\n return result",
"def map_structure(fn: Callable[[T], R], obj: Collection[T]) ->Collection[R]:\n if hasattr(obj, '--no-map--'):\n return fn(obj)\n if isinstance(obj, list):\n return [map_structure(fn, x) for x in obj]\n if isinstance(obj, tuple):\n if isinstance(obj, torch.Size):\n return fn(obj)\n if hasattr(obj, '_fields'):\n return type(obj)(*[map_structure(fn, x) for x in obj])\n else:\n return tuple(map_structure(fn, x) for x in obj)\n if isinstance(obj, dict):\n return {k: map_structure(fn, v) for k, v in obj.items()}\n if isinstance(obj, set):\n return {map_structure(fn, x) for x in obj}\n return fn(obj)",
"def structural_hash(obj: object) -> bytes:\n hasher = hashlib.blake2b()\n if isinstance(obj, (int, str, float, PurePath)):\n hasher.update(bytes(\"P\" + str(obj), \"utf-8\"))\n elif dataclasses.is_dataclass(obj):\n fields = dataclasses.fields(obj)\n hasher.update(bytes(f\"O{len(fields)}\\x20\", \"utf-8\"))\n for field in sorted(fields, key=lambda x: x.name):\n if not field.metadata.get(\"nohash\"):\n hasher.update(bytes(f\"F{len(field.name)}\\x20{field.name}\", \"utf-8\"))\n hasher.update(structural_hash(getattr(obj, field.name)))\n elif isinstance(obj, (collections.abc.Sequence, collections.abc.Set)):\n hasher.update(bytes(f\"L{len(obj)}\\x20\", \"utf-8\"))\n for member in obj:\n child_hash = structural_hash(member)\n hasher.update(bytes(f\"E{len(child_hash)}\\x20\", \"utf-8\"))\n hasher.update(child_hash)\n elif isinstance(obj, collections.abc.Mapping):\n hasher.update(bytes(f\"M{len(obj)}\\x20\", \"utf-8\"))\n for key, member in obj.items():\n child_hash = structural_hash(member)\n hasher.update(\n bytes(f\"E{len(key)}\\x20{key}\\x20{len(child_hash)}\\x20\", \"utf-8\")\n )\n hasher.update(child_hash)\n elif isinstance(obj, enum.Enum):\n hasher.update(bytes(str(obj), \"utf-8\"))\n elif obj is None:\n hasher.update(b\"N\")\n else:\n raise TypeError(\"Unhashable type\", obj)\n\n return hasher.digest()",
"def test_roundtrip_nested_map():\n Person = Map(\n MapEntrySpec(1, \"name\", String),\n MapEntrySpec(2, \"age\", UnsignedInt),\n \"Person\"\n )\n Family = Map(\n MapEntrySpec(1, \"mother\", Person),\n MapEntrySpec(2, \"father\", Person),\n \"Family\"\n )\n\n my_family = {\n \"mother\": {\n \"name\": \"Helen\",\n \"age\": 62\n },\n \"father\": {\n \"name\": \"Mark\",\n \"age\": 65\n }\n }\n\n roundtripped_family = Family.read(Family.to_bytes(my_family))\n assert my_family == roundtripped_family",
"def FromProto(cls, proto_obj):\n key=None\n if proto_obj.twitter_account:\n twitter_id = long(proto_obj.twitter_account.id_str)\n key = team_twitter_key(twitter_id)\n else:\n twitter_id = 0\n if proto_obj.score_reporter_account:\n score_reporter_id = proto_obj.score_reporter_account.id\n key = team_score_reporter_key(score_reporter_id)\n else:\n score_reporter_id = ''\n return Team(twitter_id=twitter_id, score_reporter_id=score_reporter_id,\n parent=key)",
"def _get_py_obj(self, ctx, obj, route=[]):\n def access(obj, key):\n if key in obj:\n return obj[key]\n return None\n\n cloned = None\n if isinstance(obj, (list, tuple, PyV8.JSArray)):\n cloned = []\n num_elements = len(obj)\n for index in range(num_elements):\n elem = obj[index]\n cloned.append(self._get_py_obj(ctx, elem, route + [index]))\n elif isinstance(obj, (dict, PyV8.JSObject)):\n cloned = {}\n for key in obj.keys():\n cloned_val = None\n if type(key) == int:\n val = None\n try:\n val = access(obj, str(key))\n except KeyError:\n pass\n if val is None:\n val = access(obj, key)\n cloned_val = self._get_py_obj(ctx, val, route + [key])\n else:\n cloned_val = self._get_py_obj(\n ctx, access(obj, key), route + [key])\n cloned[key] = cloned_val\n elif isinstance(obj, (str, bytes)):\n cloned = obj.decode('utf-8')\n else:\n cloned = obj\n return cloned",
"def deep_cmp(obj1, obj2):\n pass",
"def decode(intervention, obj, clz):\n actual_keys = set(obj.keys()) \n expected_keys = set(clz.expected_keys)\n target_name = clz.__name__\n\n intersection = actual_keys.intersection(expected_keys)\n not_enough = len(expected_keys) > len(intersection)\n too_many = len(actual_keys) > len(intersection)\n\n if not_enough:\n raise ValueError(\"Missing keys (%s); maybe input is not a %s object?\" % (\n str(expected_keys.difference(actual_keys)), target_name))\n\n elif too_many:\n raise ValueError(\"Input object contains too many keys (%s); has the specification for %s changed?\" % (\n str(actual_keys), target_name))\n\n else: return clz(intervention, **obj)",
"def default(self, obj):\n if isinstance(obj, tuple(TYPES.values())):\n key = '__%s__' % obj.__class__.__name__\n return {key: obj.__dict__}\n return json.JSONEncoder.default(self, obj)",
"def subselect(self, obj):\n return dict(\n (key, value) for (key, value)\n in obj.items()\n if key in self.defaults)",
"def getObjectMaps(self,toMod):\n if self.objectMaps == None: self.loadObjectMaps()\n subset = {}\n for key in self.objectMaps.keys():\n if key[1] == toMod:\n subset[key[0]] = self.objectMaps[key]\n return subset",
"def custom_encode(obj):\n if isinstance(obj, DictionaryMethods):\n key = '__Dictionary__'\n return {key: [list(obj), obj.alpha, obj.pat, obj.pat_args,\n obj.auto_fields]}\n elif isinstance(obj, Entry):\n return obj.data\n else:\n raise TypeError(\"obj {!r} of type {}\".format(obj, type(obj)))",
"def getObjectMap(self,fromMod,toMod):\n if self.objectMaps == None: self.loadObjectMaps()\n return self.objectMaps.get((fromMod,toMod),None)",
"def test_apply_scalar_map(self):\n super(TestObjDict, self).test_apply_scalar_map(_as_obj=True)",
"def _proto2object(\n proto: GetGroupsMessage_PB,\n ) -> \"GetGroupsMessage\":\n\n return GetGroupsMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )",
"def _apply(obj: Any, schema: Dict[Any, Any], key: str) -> Any:\n def get_type(sch) -> Any:\n return sch[type if type in sch else 'type']\n\n def default(value) -> Any:\n return value() if callable(value) else value\n\n extra = ''.join(['for ', key]) if key else ''\n if not isinstance(schema, (dict, type)) and schema != 'const':\n raise ValueError(f'schema must be type, dict or \"const\" {extra}')\n elif schema == 'const':\n return obj\n elif isinstance(schema, type):\n if isinstance(obj, schema):\n return obj\n raise ValueError(f'\"{obj}\" is not type of \"{schema}\" {extra}')\n\n if 'pre_call' in schema:\n obj = schema['pre_call'](obj)\n\n if (schema_type := get_type(schema)) == 'const':\n if obj not in schema['value']:\n raise ValueError(f'\"{obj}\" is not allowed as \"{key}\"')\n elif isinstance(schema_type, type):\n if not isinstance(obj, schema_type):\n raise ValueError(f'''expected type \"{schema_type}\" {extra} ; got {type(obj)}''')\n if 'filter' in schema and not schema['filter'](obj):\n raise ValueError(f'\"{key}\" not passed filter')\n if schema.get('blank') is False and not obj:\n raise ValueError(f'\"{key}\" is blank')\n if 'max_length' in schema and len(obj) > schema['max_length']:\n raise ValueError(f'\"{key}\" > max_length')\n if 'min_length' in schema and len(obj) < schema['min_length']:\n raise ValueError(f'\"{key}\" < min_length')\n\n if issubclass(schema_type, list):\n if 'value' in schema:\n obj = [_apply(i, schema['value'], key=key) for i in obj]\n elif issubclass(schema_type, dict):\n if 'value' in schema:\n new_obj = {}\n if unex := {i for i in obj if i not in schema['value']}:\n if schema.get('unexpected', False):\n new_obj.update(\n {\n i: obj[i]\n for i in unex\n }\n )\n else:\n raise ValueError(f'''Got unexpected keys: \"{'\", \"'.join([str(i) for i in unex])}\" {extra};''')\n if missed := {i for i in schema['value'] if i not in obj and 'default' not in schema['value'][i]}:\n raise ValueError(f'''expected keys \"{'\", \"'.join([str(i) for i in missed])}\" {extra}''')\n\n new_obj.update(\n {\n i:\n default(schema['value'][i]['default'])\n if i not in obj else\n _apply(\n obj=obj[i],\n schema=schema['value'][i],\n key=i,\n )\n for i in schema['value']\n }\n )\n obj = new_obj\n elif 'anykey' in schema:\n obj = {i: _apply(obj[i], schema['anykey'], i) for i in obj}\n else:\n raise ValueError(f'''schema has unknown type \"{schema_type}\"''')\n\n if 'post_call' in schema:\n obj = schema['post_call'](obj)\n return obj",
"def _get_mapper(obj):\n its_a_model = isinstance(obj, type)\n mapper = class_mapper if its_a_model else object_mapper\n return mapper(obj)",
"def test_proto_serialization() -> None:\n\n uid = UID(value=uuid.UUID(int=333779996850170035686993356951732753684))\n obj = SpecificLocation(id=uid, name=\"Test\")\n\n blob = SpecificLocation.get_protobuf_schema()(id=sy.serialize(uid), name=\"Test\")\n\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob\n assert sy.serialize(obj, to_proto=True) == blob",
"def _proto2object(\n proto: GetGroupMessage_PB,\n ) -> \"GetGroupMessage\":\n\n return GetGroupMessage(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n content=json.loads(proto.content),\n reply_to=_deserialize(blob=proto.reply_to),\n )",
"def default(self, obj):\n if dataclasses.is_dataclass(obj):\n return {\n **dict(__dataclass__=obj.__class__.__name__),\n **{field.name: self.default(getattr(obj, field.name)) for field in dataclasses.fields(obj)},\n }\n elif type(obj) in JSON_TYPES:\n return obj\n super(ExtendedJsonEncoder, self).default(obj)",
"def mergelots(bigdict, tblstojoin, joincol, how='outer'):\n for tbl in tblstojoin:\n if tbl == tblstojoin[0]:\n bigtbl = bigdict[tbl].copy()\n else:\n bigtbl = bigtbl.merge(bigdict[tbl], how=how, on=joincol)\n return bigtbl",
"def expect_obj(expected, actual):\n return {k: v for k, v in expected.iteritems() if actual.get(k) != v}",
"def _stringify_proto(obj):\n return obj.SerializeToString()",
"def _proto2object(\n proto: GetGroupsResponse_PB,\n ) -> \"GetGroupsResponse\":\n\n return GetGroupsResponse(\n msg_id=_deserialize(blob=proto.msg_id),\n address=_deserialize(blob=proto.address),\n status_code=proto.status_code,\n content=json.loads(proto.content),\n )"
] | [
"0.5256622",
"0.5186322",
"0.49844736",
"0.49193838",
"0.47653937",
"0.4669957",
"0.46696606",
"0.46444932",
"0.4550077",
"0.4536834",
"0.45043802",
"0.450001",
"0.44756943",
"0.44490245",
"0.44445476",
"0.4428748",
"0.442491",
"0.4419134",
"0.4414419",
"0.4387293",
"0.43722725",
"0.43613577",
"0.43577254",
"0.43504268",
"0.43477353",
"0.4347446",
"0.43387467",
"0.43315026",
"0.431055",
"0.43028417"
] | 0.6700285 | 0 |
Modifies the given object "yaml" so that it includes an "extends" key whose value features "key". If "extends" is not in yaml, then yaml is modified such that yaml["extends"] == key. If yaml["extends"] is a str, then yaml is modified such that yaml["extends"] == [yaml["extends"], key] If yaml["extends"] is a list that does not include key, then key is appended to the list. Otherwise, yaml is left unchanged. | def add_extends(yaml, key):
has_key = "extends" in yaml
extends = yaml.get("extends")
if has_key and not isinstance(extends, (str, Sequence)):
return
if extends is None:
yaml["extends"] = key
return
if isinstance(extends, str):
if extends != key:
yaml["extends"] = [extends, key]
return
if key not in extends:
extends.append(key) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def test_deep_extend(self):\n sdict = {\"bar\": {\"baz\": [1, 2]}}\n res = dictupdate.extend_dict_key_value(sdict, \"bar:baz\", [42, 42])\n self.assertEqual({\"bar\": {\"baz\": [1, 2, 42, 42]}}, res)\n\n # Extend a not-yet existing list\n res = dictupdate.extend_dict_key_value({}, \"bar:baz:qux\", [42])\n self.assertEqual({\"bar\": {\"baz\": {\"qux\": [42]}}}, res)\n\n # Extend with a dict (remember, foo has been updated in the first test)\n res = dictupdate.extend_dict_key_value(sdict, \"bar:baz\", {\"qux\": \"quux\"})\n self.assertEqual({\"bar\": {\"baz\": [1, 2, 42, 42, \"qux\"]}}, res)",
"def common_subobject(yaml, sub):\n match_list = set(k for k, v in yaml.items() if matches(v, sub))\n\n if not match_list:\n return yaml, None\n\n common_prefix = \".c\"\n common_index = 0\n\n while True:\n common_key = \"\".join((common_prefix, str(common_index)))\n if common_key not in yaml:\n break\n common_index += 1\n\n new_yaml = {}\n\n for key, val in yaml.items():\n new_yaml[key] = copy.deepcopy(val)\n\n if not matches(val, sub):\n continue\n\n new_yaml[key] = subkeys(new_yaml[key], sub)\n add_extends(new_yaml[key], common_key)\n\n new_yaml[common_key] = sub\n\n return new_yaml, common_key",
"def _replace(yaml_config, path_to_key, replacement_value, start=0, nested_level=0):\n nested_path_to_replace = path_to_key.split(\"/\")\n\n # our regex looks for a specific number of spaces to ensure correct\n # level of nesting. It matches to the end of the line\n search_string = (\n \" \" * nested_level + \".*\" + nested_path_to_replace[0] + \"(')?(\\\")?:.*\\n\"\n )\n matches = re.search(search_string, yaml_config[start:])\n\n # early return if we haven't found anything\n if not matches:\n return yaml_config\n\n # if we're on the last item in the path, we need to get the value and\n # replace it in the original file\n if len(nested_path_to_replace) == 1:\n # replace the current key:value with the new replacement value\n match_start = start + matches.start(0) + len(\" \" * nested_level)\n match_end = start + matches.end(0)\n yaml_config = (\n yaml_config[:match_start]\n + \"{}: {}\\n\".format(\n nested_path_to_replace[0],\n _get_yaml_replacement_value(replacement_value, nested_level),\n )\n + yaml_config[match_end:]\n )\n\n return yaml_config\n\n # set new start point to past current match and move on to next match\n start = matches.end(0)\n nested_level += 1\n del nested_path_to_replace[0]\n\n return _replace(\n yaml_config,\n \"/\".join(nested_path_to_replace),\n replacement_value,\n start,\n nested_level,\n )",
"def set(self, key, value):\n try:\n if value.lower() in ['true', 'false']:\n value = value.lower() == 'true'\n except:\n pass\n\n try:\n if \".\" in key:\n keys = key.split(\".\")\n #\n # create parents\n #\n parents = keys[:-1]\n location = self.data\n for parent in parents:\n if parent not in location:\n location[parent] = {}\n location = location[parent]\n #\n # create entry\n #\n location[keys[-1]] = value\n else:\n self.data[key] = value\n\n except KeyError:\n raise ValueError(f\"The key '{key}' could not be found in the yaml file '{self.filename}'\")\n except Exception as e:\n print(e)\n raise ValueError(\"unkown error\")\n\n self.flush()",
"def format_yaml(yaml, **kwargs):\n template = _YamlTemplate(yaml)\n try:\n return template.substitute(flatten(kwargs or {},\n reducer='dot'))\n except KeyError as e:\n raise RuntimeError(\n 'Unknown placeholder: {}'.format(e.args[0])) from e",
"def loadfrom_yaml(key, path):\n\twith open(path, 'r') as f:\n\t\td = yaml.load(f)\n\t\tnew_namespace(key)\n\t\t\n\t\t# ns = get_namespace(key)\n\n\t\t# for key, value in d.items():\n\t\t# \t_recurse(0, key, value, ns)",
"def _extend_instruction(self, orig, extension):\n # keys that are turned into arrays & extended\n for ex_key in ['extends', 'then']:\n # Nothing to extend, skip out the pop at end\n if ex_key not in extension:\n continue\n # We can just copy it over\n elif ex_key not in orig:\n orig[ex_key] = extension[ex_key]\n else:\n # Wrap the original value in a list\n if not isinstance(orig[ex_key], list):\n orig[ex_key] = [orig[ex_key]]\n\n # Insert values at beginning if extension is also list, append otherwise.\n if isinstance(extension[ex_key], list):\n for i, v in enumerate(extension[ex_key]):\n orig[ex_key].insert(i, v)\n else:\n orig[ex_key].insert(0, extension[ex_key])\n\n # Clear out key for update at end\n extension.pop(ex_key)\n\n # keys that are updated\n for up_key in ['cookies', 'headers', 'posts']:\n # Nothing to update, skip out pop at end\n if up_key not in extension:\n continue\n # We can just copy it over\n elif up_key not in orig:\n orig[up_key] = extension[up_key]\n # If they're both dicts, then we update. If not, then a replace\n # will happen.\n else:\n orig_val = orig[up_key]\n up_val = extension[up_key]\n # Prefer orig_val\n if isinstance(orig_val, dict) and isinstance(up_val, dict):\n up_val.update(orig_val)\n orig[up_key] = up_val\n # Keep things available for total replacement.\n else:\n continue\n\n # Clear out key for update at end\n extension.pop(up_key)\n\n # everything else is replaced.\n orig.update(extension)",
"def _yaml_extension(self, string):\n if string.endswith(\".yaml\"):\n pass\n else:\n string += \".yaml\"\n return string",
"def _yaml_extension(self, string):\n if string.endswith(\".yaml\"):\n pass\n else:\n string += \".yaml\"\n return string",
"def transform_snapcraft_yaml(snapcraft_yaml):\n for key in snapcraft_yaml:\n stream = StringIO()\n data = {}\n data[key] = snapcraft_yaml[key]\n helpers.dump_yaml(data, stream, typ=\"rt\")\n stream = stream.getvalue()\n\n # Assuming content starts with yaml key name, wrap it in <b>\n # for some code highligthing in HTML\n content = re.sub(YAML_KEY_REGEXP, r\"<b>\\1</b>\\2\", stream)\n snapcraft_yaml[key] = content\n\n return snapcraft_yaml",
"def inherit_config(child, parent, keys):\n for key in keys:\n if key not in child.keys():\n child[key] = parent[key]\n print(\n \"{} not found in io.yaml file, falling back to main config\".format(key)\n )\n\n return child",
"def process(yamlDict, subDict=None, path=[], first=True):\n\n if subDict is None:\n subDict = yamlDict.copy()\n\n for key, value in subDict.items():\n\n if first:\n\n first = False\n path = path + [key]\n\n else: \n path[-1] = key\n\n if isinstance(value, dict):\n process(yamlDict, value, path)\n\n elif isinstance(value, str):\n\n while \"ref\" in value:\n\n # Parse value for target\n idxA = value.find(\"ref(\") + 4\n idxB = value[idxA:].find(')') + idxA\n target = value[idxA:idxB].split('.')\n\n # Error handling: circular reference\n if target == path:\n raise ValueError(\"Circular reference in input file\", value)\n\n # Error handling: invalid reference\n try:\n targetValue = get_value(yamlDict, target)\n except:\n raise KeyError(\"Invalid reference in input file\", value)\n\n # Value may be float, must cast to string\n refStr = \"ref(\" + value[idxA:idxB] + ')'\n value = value.replace(refStr, str(targetValue))\n\n # Evaluate any arithmetic expressions & reassign field\n value = math_eval(value)\n set_value(yamlDict, value, path)",
"def conversion_yaml():\r\n data ={\r\n 'name': 'george',\r\n 'age': 16,\r\n 'friends':\r\n [{'name': 'marry', 'age': 16}, {'name': 'jack', 'age': 17}]\r\n }\r\n yaml_data = yaml.dump(data)\r\n dirname = os.path.dirname(os.path.dirname(__file__))\r\n # data_dir = os.path.join(dirname, 'data')\r\n data_dir = '/'.join([dirname, 'data'])\r\n file_path = data_dir + '/' + 'test.yaml'\r\n with open(file_path, 'w') as fw:\r\n fw.write(yaml_data)\r\n print(yaml_data)",
"def update_file(filename,d):\n if os.path.exists(filename):\n f_old = open(filename,'r')\n d_old = yaml.load(f_old)\n f_old.close()\n d_old.update(d)\n d = d_old\n f = open(filename, 'w')\n yaml.dump(d, f)\n f.close()",
"def patch_config(self_config, indict):\n for key in self_config:\n if isinstance(self_config[key], Section) \\\n and key in indict and isinstance(indict[key], Section):\n self_config[key].parent = self_config\n self_config[key].main = self_config.main\n self_config.comments[key] = indict.comments[key]\n self_config.inline_comments[key] = indict.inline_comments[key]\n patch_config(self_config[key], indict[key])",
"def fix_data(yaml_data):\n if isinstance(yaml_data, list):\n return [ fix_data(item) for item in yaml_data ]\n if isinstance(yaml_data, dict):\n return { str(key): fix_data(yaml_data[key]) for key in yaml_data }\n return yaml_data",
"def merge(target, source):\n for key, value in source.items():\n if key not in target:\n target[key] = value\n elif type(target[key]) is dict:\n if key in self.OVERRIDE_ON_EXTENDS:\n target[key].update(value)\n else:\n merge(target[key], value)\n elif type(target[key]) is list:\n target[key] += value\n return target",
"def extend(d, k, v):\n\tn = d.copy()\n\tn[k] = v\n\treturn n",
"def json_yaml_adapt(self, data):\n data = super(YAML_Data, self).json_yaml_adapt(data)\n if isinstance(data, dict):\n for k in data:\n v = data[k]\n if isinstance(v, basestring):\n if len(v) == 0:\n data[k] = None\n if len(v) == 10 and v[4] == '-' and v[7] == '-':\n # date string of for 2015-10-05\n try:\n d = datetime.date(*map(int, v.split('-')))\n except:\n continue\n data[k] = d\n return data",
"def update_nested_dict(old_dict, new_dict, extend_list_values=False):\n for k, v in new_dict.items():\n if k in old_dict.keys():\n if isinstance(v, dict) and isinstance(old_dict[k], dict):\n old_dict[k] = update_nested_dict(\n old_dict[k], v, extend_list_values=extend_list_values\n )\n elif (\n extend_list_values\n and isinstance(old_dict[k], list)\n and isinstance(v, list)\n ):\n old_dict[k].extend(v)\n elif v:\n old_dict[k] = v\n else:\n old_dict[k] = v\n return old_dict",
"def set_deep(config, key_seq, new_val):\n if 1 == len(key_seq):\n config[key_seq[0]] = new_val\n else:\n set_deep(config[key_seq[0]], key_seq[1:], new_val)",
"def dictmerge(x, y, path=None, overwrite=False, extend=False):\n if path is None:\n path = []\n for key in y:\n if key in x:\n if isinstance(x[key], (dict, MutableMapping)) and isinstance(\n y[key], (dict, MutableMapping)\n ):\n dictmerge(\n x[key],\n y[key],\n path + [str(key)],\n overwrite=overwrite,\n extend=extend,\n )\n elif x[key] == y[key]:\n pass # same leaf value\n else:\n if not overwrite:\n raise Exception(\"Conflict at %s\" % \".\".join(path + [str(key)]))\n if isinstance(x[key], list) and isinstance(y[key], list) and extend:\n x[key].extend(y[key])\n else:\n x[key] = y[key]\n else:\n x[key] = y[key]\n return x",
"def update_schema(cls,yaml_str,dpath=\"properties\",update=True):\n sub_schema=yaml_manager.readstring(yaml_str)\n orig_schema=cls._validation_schema\n new_schema=orig_schema.get_copy()\n if update:\n new_schema.update_nested(dpath,sub_schema)\n else:\n new_schema.set_nested(dpath,sub_schema)\n return new_schema",
"def _include_yaml(loader: SafeLineLoader, node: yaml.nodes.Node) -> JSON_TYPE:\n fname = os.path.join(os.path.dirname(loader.name), node.value)\n try:\n return _add_reference(load_yaml(fname), loader, node)\n except FileNotFoundError as exc:\n raise XKNXException(f\"{node.start_mark}: Unable to read file {fname}.\") from exc",
"def deepupdate(target, src, overwrite=True):\n for k, v in src.items():\n if type(v) == list:\n if k not in target:\n target[k] = copy.deepcopy(v)\n elif overwrite is True:\n target[k].extend(v)\n elif type(v) == dict:\n if k not in target:\n target[k] = copy.deepcopy(v)\n else:\n deepupdate(target[k], v, overwrite=overwrite)\n elif type(v) == set:\n if k not in target:\n target[k] = v.copy()\n elif overwrite is True:\n if type(target[k]) == list:\n target[k].extend(v)\n elif type(target[k]) == set:\n target[k].update(v)\n else:\n raise TypeError(\"Cannot update {} with {}\".format(\n type(target[k]),\n type(v))\n )\n else:\n if k not in target or overwrite is True:\n target[k] = copy.copy(v)",
"def extend_dict(org_dict, new_dict, allow_overwrite=None):\n if not new_dict:\n return org_dict\n if not org_dict:\n return new_dict\n for key, value in new_dict.iteritems():\n if value:\n if not org_dict.get(key):\n # orginal dict doesn't has this key (or no value), just overwrite\n org_dict[key] = value\n else:\n # original dict already has this key, append results\n if isinstance(value, list):\n # make sure that our original value also is a list\n if isinstance(org_dict[key], list):\n for item in value:\n if item not in org_dict[key]:\n org_dict[key].append(item)\n # previous value was str, combine both in list\n elif isinstance(org_dict[key], (str, unicode)):\n org_dict[key] = org_dict[key].split(\" / \")\n for item in value:\n if item not in org_dict[key]:\n org_dict[key].append(item)\n elif isinstance(value, dict):\n org_dict[key] = extend_dict(org_dict[key], value, allow_overwrite)\n elif allow_overwrite and key in allow_overwrite:\n # value may be overwritten\n org_dict[key] = value\n else:\n # conflict, leave alone\n pass\n return org_dict",
"def merge_yamls(yaml1, yaml2):\n updated_tools_yaml = copy.deepcopy(yaml1)\n\n # unique_tools = [dict(y) for y in set(tuple(x.items())\n # for x in yaml1['tools'])]\n\n # copy base and updated tools entries (except the revisions list of each)\n for tool_entry in yaml2['tools']:\n if tool_entry not in yaml1['tools']:\n updated_tools_yaml['tools'].append(tool_entry)\n\n return updated_tools_yaml",
"def transform_dict(config_dict: Dict, expand: bool = True):\n ret: Dict[str, Any] = {}\n for k, v in config_dict.items():\n if v is None or isinstance(v, (int, float, str)):\n ret[k] = v\n elif isinstance(v, (list, tuple, set)):\n # Need to check if item in iterable is YAML-friendly\n t = transform_dict(dict(enumerate(v)), expand)\n # Transform back to iterable if expand is False\n ret[k] = t if expand else [t[i] for i in range(len(v))]\n elif isinstance(v, dict):\n ret[k] = transform_dict(v, expand)\n else:\n # Transform to YAML-friendly (str) format\n # Need to handle both Classes, Callables, Object Instances\n # Custom Classes might not have great __repr__ so __name__ might be better in these cases\n vname = v.__name__ if hasattr(v, '__name__') else v.__class__.__name__\n ret[k] = f\"{v.__module__}:{vname}\"\n return ret",
"def _dump_yaml(cls, dumper: yaml.Dumper, source: \"YamlModifier\") -> typing.Any:\n if isinstance(source.value, (list, tuple)):\n return dumper.represent_sequence(source.label(), source.value)\n return dumper.represent_mapping(source.label(), source.value)",
"def populate_string( yaml_string, data={}):\n import random\n\n def replace_in_line(line):\n if '{{' in line and '}}' in line and not ('#' in line and line.index('#') < line.index('{{')):\n begin = line.index('{{')\n end = line.index('}}', begin)\n variable_name = line[begin:end].strip().replace('{{','').replace('}}','').strip()\n try:\n return (\n line[:begin].replace('{{','').replace('}}','') +\n str(xeval(variable_name, merge(data, os.environ))) +\n line[end:].replace('}}','').replace('{{','')\n )\n except:\n var = locate_variable(line)\n raise Exception('yaml file needs all data to be evaluated: {{{{ {} }}}}'.format(variable_name))\n\n\n else:\n return line\n\n new_lines = list(map(replace_in_line, yaml_string.splitlines()))\n return '\\n'.join(new_lines)"
] | [
"0.5486271",
"0.5451538",
"0.5246947",
"0.5115644",
"0.49650216",
"0.4926991",
"0.49184754",
"0.4715134",
"0.4715134",
"0.46654904",
"0.46643496",
"0.46112788",
"0.46058592",
"0.4584441",
"0.45826346",
"0.45754874",
"0.4569336",
"0.4566999",
"0.45589757",
"0.45538986",
"0.45501965",
"0.45202324",
"0.4515921",
"0.4493655",
"0.44902098",
"0.44659263",
"0.44428682",
"0.44337437",
"0.44241965",
"0.44167635"
] | 0.8145743 | 0 |
Factor prototype object "sub" out of the values of mapping "yaml". | def common_subobject(yaml, sub):
match_list = set(k for k, v in yaml.items() if matches(v, sub))
if not match_list:
return yaml, None
common_prefix = ".c"
common_index = 0
while True:
common_key = "".join((common_prefix, str(common_index)))
if common_key not in yaml:
break
common_index += 1
new_yaml = {}
for key, val in yaml.items():
new_yaml[key] = copy.deepcopy(val)
if not matches(val, sub):
continue
new_yaml[key] = subkeys(new_yaml[key], sub)
add_extends(new_yaml[key], common_key)
new_yaml[common_key] = sub
return new_yaml, common_key | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def __construct(config, yml):\n\n for key in yml:\n if type(yml[key]) == dict:\n # create an object for the subsection\n klass = type(key, (), {})\n klass.__repr__ = repr_fx\n klass.__str__ = str_fx\n klass.__getitem__ = get_item_fx\n klass.get = get_fx\n obj = klass()\n __construct(obj, yml[key])\n setattr(config, key, obj)\n else:\n # just set simple value\n setattr(config, key, yml[key])",
"def __init__(self, data):\n assert isinstance(data, SplitDict)\n self.first = Classifier(data.super)\n self.second = dict()\n for meta in data.keys():\n self.second[meta] = Classifier(data[meta])",
"def _parse_subdefinitions(subdefinitions):\r\n parsed = {'definition': subdefinitions['difino']}\r\n parsed['examples'] = [\r\n {\r\n 'example': example['ekzemplo']\r\n } for example in subdefinitions['ekzemploj']]\r\n return parsed",
"def subkeys(obj, proto):\n if not (isinstance(obj, Mapping) and isinstance(proto, Mapping)):\n return obj\n\n new_obj = {}\n for key, value in obj.items():\n if key not in proto:\n new_obj[key] = value\n continue\n\n if matches(value, proto[key]) and matches(proto[key], value):\n continue\n\n if isinstance(value, Mapping):\n new_obj[key] = subkeys(value, proto[key])\n continue\n\n new_obj[key] = value\n\n return new_obj",
"def process(yamlDict, subDict=None, path=[], first=True):\n\n if subDict is None:\n subDict = yamlDict.copy()\n\n for key, value in subDict.items():\n\n if first:\n\n first = False\n path = path + [key]\n\n else: \n path[-1] = key\n\n if isinstance(value, dict):\n process(yamlDict, value, path)\n\n elif isinstance(value, str):\n\n while \"ref\" in value:\n\n # Parse value for target\n idxA = value.find(\"ref(\") + 4\n idxB = value[idxA:].find(')') + idxA\n target = value[idxA:idxB].split('.')\n\n # Error handling: circular reference\n if target == path:\n raise ValueError(\"Circular reference in input file\", value)\n\n # Error handling: invalid reference\n try:\n targetValue = get_value(yamlDict, target)\n except:\n raise KeyError(\"Invalid reference in input file\", value)\n\n # Value may be float, must cast to string\n refStr = \"ref(\" + value[idxA:idxB] + ')'\n value = value.replace(refStr, str(targetValue))\n\n # Evaluate any arithmetic expressions & reassign field\n value = math_eval(value)\n set_value(yamlDict, value, path)",
"def transform(self, tags, values_to_sub):\n for tag, properties in tags.items():\n val = values_to_sub.get(tag)\n values_to_sub[tag] = self.transform_val(properties, val)\n if properties.get(\"children\") is not None:\n children = properties.get(\"children\")\n for child_tag, child_properties in children.items():\n child_val = self.transform_val(child_properties, val)\n values_to_sub[child_tag] = child_val\n return values_to_sub",
"def from_yaml(self, yaml):\n self.hwAddress = yaml.get('hwAddress')\n if self.hwAddress:\n self.hwAddress = self.hwAddress.lower()\n self.ip = yaml.get('IP')\n self.formulas = {}\n for f in yaml:\n if isinstance(yaml[f], dict):\n self.formulas[f] = yaml[f]\n\n self.hwtype = yaml.get('hwtype')",
"def variableSub(self, subMap):\n\t\t#create a copy of our Statement\n\t\treturned = copy.deepcopy(self)\n\t\t\n\t\t#for every variable specified in the input map\n\t\tfor variable in subMap.keys():\n\t\t\t# get all the themes it corresponds to\n\t\t\tif variable in returned.VariableMap.keys():\n\t\t\t\tthemes = returned.VariableMap[variable]\n\t\t\t\t#set all of the themes to the variable specificed\n\t\t\t\tfor theme in themes:\n\t\t\t\t\tif theme in returned.ArgDict.keys():\n\t\t\t\t\t\treturned[theme] = subMap[variable]\n\t\treturn returned",
"def from_dict(self, data: dict):\n for name, sub in data.items():\n if hasattr(getattr(self, name, None), \"from_dict\"):\n getattr(self, name).from_dict(sub)\n else:\n setattr(self, name, decoder(sub))",
"def passivize(rule):\n rule[\"mother\"][\"subcat\"] = {\n \"obj\": None,\n \"preps\": {\n \"by\": [[\"*Subj\"]]}}\n\n rule[\"mother\"][\"hooks\"] = {\n \"head\": [\"*Obj\"]}\n\n rule[\"dtrs\"][0][\"subcat\"] = {\n \"obj\": [\"*Obj\"]}\n\n rule[\"dtrs\"][0][\"hooks\"] = {\n \"subj\": [\"*Subj\"]}\n\n return rule",
"def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"ToJson\":\n try:\n value = loader.construct_mapping(node, deep=True)\n except yaml.constructor.ConstructorError:\n value = loader.construct_sequence(node, deep=True)\n return cls(value)",
"def convert_sub(sub):\n\n args = sub.args\n (ref_aa, pos, new_aa) = args\n\n parent_fn_name = sub.parent_function.name_short\n prefix_list = {\"p\": \"p.\", \"r\": \"r.\", \"g\": \"c.\"}\n prefix = prefix_list[parent_fn_name]\n\n new_var_arg = f'\"{prefix}{belspec[\"namespaces\"][\"AminoAcid\"][\"to_short\"][ref_aa.value]}{pos.value}{belspec[\"namespaces\"][\"AminoAcid\"][\"to_short\"][new_aa.value]}\"'\n\n new_var = Function(\"var\", version=version)\n\n new_var.add_argument(StrArg(new_var_arg, new_var))\n\n return new_var",
"def _load_subset(cls_file, subset):\n with open(cls_file) as cls_yml:\n cls_dict = yaml.load(cls_yml)\n\n if not subset:\n return cls_dict\n\n try:\n return {x:cls_dict[x] for x in subset}\n except KeyError as err:\n keys = ', '.join(cls_dict.keys())\n raise ValueError('{} not in {}'.format(err.args[0], keys))",
"def apply(self, subj, include_unmapped=False):\n def selfref(k, v): return subj.get(v, '$' + k) == '$' + k\n variables = {k: subj[v] for k, v in self.items() if not selfref(k, v)}\n if include_unmapped:\n mapped = set(self.values()) | set(variables)\n variables.update({k: subj[k] for k in subj if k not in mapped})\n return variables",
"def __sub__(self,other):\n self._obj['u'] -= other._obj['u']\n self._obj['v'] -= other._obj['v']\n return self._obj",
"def subcfg2instance(\n cfg: DictConfig,\n keyname: str,\n valueidx: int,\n namespace: list = [],\n **kwargs: dict\n):\n # kwargs to variables\n for k,v in kwargs.items():\n globs = globals()\n locs = locals()\n exec(f'{k} = v', globs, locs)\n # multiple or one option\n if type(cfg[keyname]) == ListConfig:\n assert type(valueidx) == int, f'{keyname} has multiple options, but no index is givin.'\n c = cfg[keyname][valueidx]\n else:\n raise ValueError(f'expected ListConfig, but got {type(cfg[keyname])} in cfg {keyname} value')\n # if int, return directly\n if type(c) == int or type(c) == float:\n return c\n elif type(c) == ListConfig:\n return list(c)\n # if str enclosed in \", return as str\n elif type(c) == str and c.startswith(\"'\") and c.endswith(\"'\"):\n return c[1:-1]\n # if str, regarded as classname\n elif type(c) == str:\n classname = c\n arguments = {}\n elif type(c) == DictConfig:\n classname = list(c.keys())[0]\n assert type(classname) == str, f'expected str, but got {type(classname)} in {keyname} classname'\n arguments = list(c.values())[0]\n assert type(arguments) == DictConfig, f'expected DictConfig, but got {type(arguments)} in {keyname} arguments'\n globs = globals()\n locs = locals()\n tmp = {}\n for k,v in arguments.items():\n assert type(k) == str, f'expected str, but got {type(k)} in {keyname} argument key: {k}'\n if type(v) == int or type(v) == float or type(v) == bool:\n tmp[k] = v\n elif type(v) == str and v.startswith(\"'\") and v.endswith(\"'\"):\n tmp[k] = v[1:-1]\n elif type(v) == str:\n tmp[k] = eval(v, globs, locs)\n else:\n raise ValueError(f'expected ListConfig, DictConfig or str, but got {type(v)} in {keyname} argument value: {v}')\n arguments = tmp\n else:\n raise ValueError(f'expected DictConfig or str, but got {type(c)} in {c}')\n # get the operation corresponding to the class name\n return _classname2instance(classname, arguments, namespace)",
"def from_yaml(cls, loader, node):\n m = loader.construct_mapping(node)\n ret = cls()\n for k in cls._yaml_keys:\n setattr(ret, k, m[k])\n return ret",
"def test_transform_object(self):\n # Test object with nested \"international\" fields\n obj1 = {\n \"international\": {\n \"display_name\": {\n \"af\": \"Dokumentbestuurstelsel\",\n \"fr\": \"type de logiciel\",\n \"ro\": \"colecție organizată a documentelor\",\n }\n }\n }\n transform_object(obj1, \"international\")\n self.assertDictEqual(\n {\n \"international\": {\n \"display_name\": {\n \"keys\": [\"af\", \"fr\", \"ro\"],\n \"values\": [\n \"Dokumentbestuurstelsel\",\n \"type de logiciel\",\n \"colecție organizată \" \"a documentelor\",\n ],\n }\n }\n },\n obj1,\n )\n\n # Test object with nested \"international\" none\n obj2 = {\"international\": {\"display_name\": None}}\n transform_object(obj2, \"international\")\n self.assertDictEqual({\"international\": {\"display_name\": None}}, obj2)\n\n # Test object with nested \"abstract_inverted_index\" fields\n obj3 = {\n \"abstract_inverted_index\": {\n \"Malignant\": [0],\n \"hyperthermia\": [1],\n \"susceptibility\": [2],\n \"(MHS)\": [3],\n \"is\": [4, 6],\n \"primarily\": [5],\n }\n }\n transform_object(obj3, \"abstract_inverted_index\")\n self.assertDictEqual(\n {\n \"abstract_inverted_index\": {\n \"keys\": [\"Malignant\", \"hyperthermia\", \"susceptibility\", \"(MHS)\", \"is\", \"primarily\"],\n \"values\": [\"0\", \"1\", \"2\", \"3\", \"4, 6\", \"5\"],\n }\n },\n obj3,\n )\n\n # Test object with nested \"abstract_inverted_index\" none\n obj4 = {\"abstract_inverted_index\": None}\n transform_object(obj4, \"abstract_inverted_index\")\n self.assertDictEqual({\"abstract_inverted_index\": None}, obj4)",
"def test_roundtrip_nested_user_defined_nested_map():\n club = {\n \"members\": [\n dict(name=\"Bede\", age=20),\n dict(name=\"Jake\", age=21),\n dict(name=\"Cal\", age=22)\n ],\n \"name\": \"The Kool Kids Klub\"\n }\n Club = Map.from_file(\"definitions/Club.buf\")\n assert club == Club.read(bytes(Club.to_bytes(club)))",
"def from_dict(cls, dikt) -> 'BusinessSubCategory':\n return util.deserialize_model(dikt, cls)",
"def make_sub(self, sub):\n [lu, subs] = [self.lineup, self.subs]\n if 'PositionSwitch' in str(type(sub)):\n done = False\n for player in lu:\n if player.id == sub.player:\n done = True\n player.switch.append(player.pos)\n player.pos = sub.pos\n if sub.pos in player.switch:\n player.switch.remove(sub.pos)\n if not done:\n sub_idx = find_player_index(subs, sub.player)\n if sub.pos == 'p':\n subs[sub_idx].status = 'entered'\n if not len([s for s in lu if s.pos == 'p']) > 0:\n subs[sub_idx].order = 10\n lu.append(subs.pop(sub_idx))\n else:\n print(\"ERROR: NOT SURE WHAT TO DO WITH SUB\")\n print([p.__dict__ for p in lu])\n print(sub.__dict__)\n\n elif 'OffensiveSub' in str(type(sub)):\n lu_idx = find_player_index(lu, sub.sub)\n sub_idx = find_player_index(subs, sub.player)\n if sub_idx is None:\n print(\"ERROR: \" + str(sub.__dict__))\n else:\n if subs[sub_idx].status == 'removed':\n print('ILLEGAL SUB ' + str(subs[sub_idx].__dict__))\n if not lu_idx is None:\n lu[lu_idx].status = 'removed'\n subs.append(lu.pop(lu_idx))\n lu.insert(lu_idx, subs.pop(sub_idx))\n\n elif 'DefensiveSub' in str(type(sub)):\n lu_idx = find_player_index(lu, sub.sub)\n sub_idx = find_player_index(subs, sub.player)\n if sub_idx is None:\n if sub.pos == 'p':\n sub_idx = find_player_index(lu, sub.player)\n if not sub_idx is None and not lu_idx is None:\n add = lu[sub_idx]\n lu[lu_idx].status = 'removed'\n subs.append(lu.pop(lu_idx))\n lu.insert(lu_idx, add)\n if lu[lu_idx].order == 10:\n lu[lu_idx].order = lu_idx+1\n else:\n print(\"ERROR: \" + str(sub.__dict__))\n else:\n if subs[sub_idx].status == 'removed':\n print('ILLEGAL SUB ' + str(subs[sub_idx].__dict__ ))\n if not lu_idx is None: \n lu[lu_idx].status = 'removed'\n if lu[lu_idx].order != subs[sub_idx].order:\n print(\"ASSUMING ORDER FOR SUB: \" + subs[sub_idx].name)\n subs[sub_idx].order = lu[lu_idx].order\n for p in lu:\n if p.pos == subs[sub_idx].pos:\n p.pos = ''\n subs.append(lu.pop(lu_idx))\n lu.insert(lu_idx, subs.pop(sub_idx))\n\n elif 'Removal' in str(type(sub)):\n if lu[-1].id == sub.sub:\n lu_idx = len(lu)-1\n else:\n lu_idx = find_player_index(lu, sub.sub)\n if not lu_idx is None:\n lu[lu_idx].status = 'removed'\n subs.append(lu.pop(lu_idx))\n \n\n [self.lineup, self.subs] = [lu, subs]",
"def __json_decode__(cls, value: Dict[str, Any]) -> \"BaseDiscriminator\":\r\n return cls.from_config(value)",
"def test_translate_struct_dict_unique_key(self):\n root = netapp_api.NaElement('root')\n child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'}\n root.translate_struct(child)\n self.assertEqual(len(root.get_children()), 3)\n self.assertEqual(root.get_child_content('e1'), 'v1')\n self.assertEqual(root.get_child_content('e2'), 'v2')\n self.assertEqual(root.get_child_content('e3'), 'v3')",
"def test_translate_struct_dict_unique_key(self):\n root = netapp_api.NaElement('root')\n child = {'e1': 'v1', 'e2': 'v2', 'e3': 'v3'}\n root.translate_struct(child)\n self.assertEqual(3, len(root.get_children()))\n self.assertEqual('v1', root.get_child_content('e1'))\n self.assertEqual('v2', root.get_child_content('e2'))\n self.assertEqual('v3', root.get_child_content('e3'))",
"def test_map_deserialization(self):\r\n \r\n original = DeserializationTestModel.create(count=5, text='happy')\r\n nested = original.get_map()\r\n\r\n assert isinstance(nested, dict)\r\n assert nested['vertex'] == original\r\n assert nested['number'] == 5",
"def scaledict(ajson):\n thedict = {'url': ajson['url'],\n 'title': ajson['title'],\n 'text': ajson['text'],\n 'num_images': len(ajson['images']),\n 'authors': str(ajson['authors'])\n }\n\n ext = tldextract.extract(ajson['url'])\n thedict['domain'] = ext.domain\n return thedict",
"def fn_sub(self, value):\n\n if isinstance(value, list):\n value, variables = value\n else:\n # only template parameter names, resource logical IDs, and resource attributes, will be parsed\n value, variables = value, {}\n\n for name, target in variables.items():\n value = value.replace('${{{}}}'.format(name), target)\n\n return Functions.SUB_VARIABLE_PATTERN.sub(self._sub_variable, value)",
"def subs(self, pre, post):\n return SubbedBasisFunction(self, pre, post)",
"def test_loading_different_versions_of_yaml(self):\n arkane_spc_v_241 = ArkaneSpecies.__new__(ArkaneSpecies)\n arkane_spc_v_241.load_yaml(path=os.path.join(self.data_path, 'vinoxy_v_2.4.1.yml'))\n self.assertIsInstance(arkane_spc_v_241, ArkaneSpecies) # checks make_object\n self.assertEqual(arkane_spc_v_241.conformer.spin_multiplicity, 2)\n\n arkane_current = ArkaneSpecies.__new__(ArkaneSpecies)\n arkane_current.load_yaml(path=os.path.join(self.data_path, 'vinoxy_current.yml'))\n self.assertIsInstance(arkane_current, ArkaneSpecies) # checks make_object\n self.assertEqual(arkane_current.conformer.spin_multiplicity, 2)",
"def _from_yaml(cls, loader: yaml.Loader, node: yaml.Node) -> \"BotoError\":\n value = loader.construct_mapping(node, deep=True)\n return cls(value)"
] | [
"0.5672593",
"0.56290704",
"0.5447974",
"0.5426063",
"0.5008521",
"0.49819303",
"0.49602154",
"0.49591035",
"0.49587813",
"0.4902017",
"0.48708162",
"0.48490623",
"0.48394424",
"0.48307833",
"0.48064047",
"0.4801588",
"0.47973585",
"0.47964248",
"0.47927532",
"0.47637695",
"0.47519702",
"0.4729749",
"0.46910164",
"0.4685471",
"0.46816456",
"0.46536753",
"0.46420413",
"0.46366882",
"0.4636172",
"0.46323648"
] | 0.64096737 | 0 |
Try applying an optimization pass and return information about the result "name" is a string describing the nature of the pass. If it is a nonempty string, summary statistics are also printed to stdout. "yaml" is the object to apply the pass to. "optimization_pass" is the function implementing the pass to be applied. "args" and "kwargs" are the additional arguments to pass to optimization pass. The pass is applied as >>> (new_yaml, other_results) = optimization_pass(yaml, args, kwargs) The pass's results are greedily rejected if it does not modify the original yaml document, or if it produces a yaml document that serializes to a larger string. Returns (new_yaml, yaml, applied, other_results) if applied, or (yaml, new_yaml, applied, other_results) otherwise. | def try_optimization_pass(name, yaml, optimization_pass, *args, **kwargs):
result = optimization_pass(yaml, *args, **kwargs)
new_yaml, other_results = result[0], result[1:]
if new_yaml is yaml:
# pass was not applied
return (yaml, new_yaml, False, other_results)
pre_size = len(syaml.dump_config(sort_yaml_obj(yaml), default_flow_style=True))
post_size = len(syaml.dump_config(sort_yaml_obj(new_yaml), default_flow_style=True))
# pass makes the size worse: not applying
applied = post_size <= pre_size
if applied:
yaml, new_yaml = new_yaml, yaml
if name:
print_delta(name, pre_size, post_size, applied)
return (yaml, new_yaml, applied, other_results) | {
"objective": {
"self": [],
"paired": [],
"triplet": [
[
"query",
"document",
"negatives"
]
]
}
} | [
"def Optimization(*args, **kwargs):\n from warnings import warn\n\n warn(\n \"Optimization has been renamed to OptimizationResult and will be removed as soon as v0.13.0\", DeprecationWarning\n )\n return OptimizationResult(*args, **kwargs)",
"def get_optimizer(name: str, **kwargs: Optional[Dict[str, Any]]) -> Optimizer:\n if name not in AVAILABLE_OPTIMIZERS:\n raise ValueError(\n f\"Cannot resolve optimizer '{name}'. Available optimizers are : \" f\"{AVAILABLE_OPTIMIZERS.keys()}\"\n )\n if name == 'fused_adam':\n if not torch.cuda.is_available():\n raise ValueError(f'CUDA must be available to use fused_adam.')\n\n optimizer = AVAILABLE_OPTIMIZERS[name]\n optimizer = partial(optimizer, **kwargs)\n return optimizer",
"def get_voc_named_optim(self, name):\n return self.voc_checkpoints/f'{name}_optim.pyt'",
"def get_optimization_metric(name: str) -> OptimizationMetric:\n try:\n return _OPTIMIZATION_METRIC_REGISTRY[name]\n except KeyError:\n raise ValueError(f'Unsupported optimization metric \"{name}\", must be in: '\n f'{sorted(_OPTIMIZATION_METRIC_REGISTRY.keys())}')",
"def parse_optimizer_args(\n optimizer_name: str, optimizer_kwargs: Union[DictConfig, Dict[str, Any]]\n) -> Union[Dict[str, Any], DictConfig]:\n kwargs = {}\n\n if optimizer_kwargs is None:\n return kwargs\n\n optimizer_kwargs = copy.deepcopy(optimizer_kwargs)\n optimizer_kwargs = maybe_update_config_version(optimizer_kwargs)\n\n if isinstance(optimizer_kwargs, DictConfig):\n optimizer_kwargs = OmegaConf.to_container(optimizer_kwargs, resolve=True)\n\n # If it is a dictionary, perform stepwise resolution\n if hasattr(optimizer_kwargs, 'keys'):\n # Attempt class path resolution\n if '_target_' in optimizer_kwargs: # captures (target, _target_)\n optimizer_kwargs_config = OmegaConf.create(optimizer_kwargs)\n optimizer_instance = hydra.utils.instantiate(optimizer_kwargs_config) # type: DictConfig\n optimizer_instance = vars(optimizer_instance)\n return optimizer_instance\n\n # If class path was not provided, perhaps `name` is provided for resolution\n if 'name' in optimizer_kwargs:\n # If `auto` is passed as name for resolution of optimizer name,\n # then lookup optimizer name and resolve its parameter config\n if optimizer_kwargs['name'] == 'auto':\n optimizer_params_name = \"{}_params\".format(optimizer_name)\n optimizer_kwargs.pop('name')\n else:\n optimizer_params_name = optimizer_kwargs.pop('name')\n\n # Override arguments provided in the config yaml file\n if 'params' in optimizer_kwargs:\n # If optimizer kwarg overrides are wrapped in yaml `params`\n optimizer_params_override = optimizer_kwargs.get('params')\n else:\n # If the kwargs themselves are a DictConfig\n optimizer_params_override = optimizer_kwargs\n\n if isinstance(optimizer_params_override, DictConfig):\n optimizer_params_override = OmegaConf.to_container(optimizer_params_override, resolve=True)\n\n optimizer_params_cls = get_optimizer_config(optimizer_params_name, **optimizer_params_override)\n\n # If we are provided just a Config object, simply return the dictionary of that object\n if optimizer_params_name is None:\n optimizer_params = vars(optimizer_params_cls)\n return optimizer_params\n\n else:\n # If we are provided a partial class instantiation of a Config,\n # Instantiate it and retrieve its vars as a dictionary\n optimizer_params = optimizer_params_cls() # instantiate the parameters object\n optimizer_params = vars(optimizer_params)\n return optimizer_params\n\n # simply return the dictionary that was provided\n return optimizer_kwargs\n\n return kwargs",
"def compile(self, cfg, id):\n \n passes = self.build_options(cfg)\n \n build_res = None\n opt_res = None\n try:\n # optimize\n opt_res = self.make(\"optimize\", ID=id, PASSES=passes)\n \n if not self.optOnly:\n # build executable\n build_res = self.make(\"link\", ID=id)\n \n except Exception as inst:\n print \"---------------------------------------------\"\n print inst\n # ensure we made it all the way through\n if opt_res == None:\n print \"died during OPTIMIZE... trying to reduce the pass configuration.\"\n self.auto_reduce(passes, id, \"optimize\")\n assert False, \"done.\"\n \n assert build_res != None, \"died during LINK\"\n assert False, \"Something else went wrong!!\"\n \n # we only care about optimization time\n return opt_res",
"def opt_wrapper(m, **kwargs):\n m.optimize(**kwargs)\n return m.optimization_runs[-1]",
"def opt_wrapper(m, **kwargs):\r\n m.optimize(**kwargs)\r\n return m.optimization_runs[-1]",
"def optim_args(self) -> Tuple[Optional[str], Dict]:\n opt_conf = self.config.get('optim')\n if opt_conf:\n return opt_conf.get('name'), opt_conf.get('args')\n else:\n return None, {}",
"def get_result(self, name, node=None, comp=None, index=None,\n check_results=True, state=False):\n\n if self.results is None and check_results:\n raise Exception('The optimization problem has not been solved yet.')\n\n obj = self.get_component(comp, node)\n\n return obj.get_result(name, index, state, self.start_time)",
"def optimise_fn(self, x):\n\n success = self._set_material_parameters(x)\n if not success:\n return self._bad_metric()\n\n # some iterations are repeated so cache the results to avoid unnecessary iterations\n cached_result_key = tuple(x)\n metric_value = self.cached_results.get(cached_result_key)\n\n if metric_value is None:\n print('--> Optimiser: {}'.format(self.material_model))\n\n sim_result = fs.run_simulation(stoma_cfg=self.stoma_cfg,\n from_optimiser=True)\n\n # when the simulation fails we want a non-constant measure for the optimiser to use\n metric_value = sim_result.metric_value if sim_result.success else self._bad_metric()\n\n self.cached_results[cached_result_key] = metric_value\n\n print('--> Optimiser: {} - metric={}'.format(self.material_model, metric_value))\n else:\n print('--> Optimiser: {} - metric={} (cached result)'.format(self.material_model, metric_value))\n\n return metric_value",
"def _get_optimization_object(self):\n optTask = self._getTask('optimization')\n optProblem = optTask.find(xmlns + 'Problem')\n parameterText = optProblem.find(xmlns + 'ParameterText')\n return parameterText.text.strip()",
"def get_optim(name: str):\n if name.lower() == 'adam':\n optimizer = torch.optim.Adam\n elif name.lower() == 'adamw':\n optimizer = torch.optim.AdamW\n elif name.lower() == 'sgd':\n optimizer = torch.optim.SGD\n elif name.lower() == 'sgdw':\n from ..learn.optim import SGDW\n optimizer = SGDW\n elif name.lower() == 'nsgd':\n from ..learn.optim import NesterovSGD\n optimizer = NesterovSGD\n elif name.lower() == 'nsgdw':\n from ..learn.optim import NesterovSGDW\n optimizer = NesterovSGDW\n elif name.lower() == 'rmsprop':\n optimizer = torch.optim.rmsprop\n elif name.lower() == 'adagrad':\n optimizer = torch.optim.adagrad\n elif name.lower() == 'amsgrad':\n from ..learn.optim import AMSGrad\n optimizer = AMSGrad\n else:\n raise SynthtorchError(f'Optimizer: \"{name}\" not a valid optimizer routine or not supported.')\n return optimizer",
"def test_optimize_basic(name, builder):\n\n model = Model(name)\n dirty = True\n printing = True\n counter = 1\n stats = list()\n\n with model.build():\n builder()\n\n if printing:\n print_graphs(f'opt_{name}_init', model)\n\n while dirty:\n\n print()\n\n dirty, new_model = model.run_algebra(\n OptimizeAlg(name=name,\n counter=counter,\n stats=stats,\n num_steps=1))\n \n if printing: \n print_graphs(f'opt_{name}_post({counter})', new_model)\n\n model = new_model\n counter += 1\n\n if printing:\n print_stats(f'opt_{name}', stats)",
"def objective(args: Namespace, trial: optuna.trial._trial.Trial) -> float:\n # Paramters (to tune)\n args.embedding_dim = trial.suggest_int(\"embedding_dim\", 128, 512)\n args.num_filters = trial.suggest_int(\"num_filters\", 128, 512)\n args.hidden_dim = trial.suggest_int(\"hidden_dim\", 128, 512)\n args.dropout_p = trial.suggest_uniform(\"dropout_p\", 0.3, 0.8)\n args.lr = trial.suggest_loguniform(\"lr\", 5e-5, 5e-4)\n\n # Train (can move some of these outside for efficiency)\n logger.info(f\"\\nTrial {trial.number}:\")\n logger.info(json.dumps(trial.params, indent=2))\n artifacts = run(args=args, trial=trial)\n\n # Set additional attributes\n args = artifacts[\"args\"]\n performance = artifacts[\"performance\"]\n logger.info(json.dumps(performance[\"overall\"], indent=2))\n trial.set_user_attr(\"threshold\", args.threshold)\n trial.set_user_attr(\"precision\", performance[\"overall\"][\"precision\"])\n trial.set_user_attr(\"recall\", performance[\"overall\"][\"recall\"])\n trial.set_user_attr(\"f1\", performance[\"overall\"][\"f1\"])\n\n return performance[\"overall\"][\"f1\"]",
"def run_analyze():\n\n parser = ArgumentParser()\n parser.add_argument('name',nargs='?',default=None,help=\"Results file or directory with result files\")\n parser.add_argument('-o','--output',help=\"Analysis output directory\")\n parser.add_argument('-d','--diffs-only',action='store_true',help=\"Show only diffs on detail pages\")\n parser.set_defaults(output='',diffs_only=False)\n\n script_runner.run_analyze(parser.parse_args())",
"def mapping(self, name: str) -> Optional[Tuple[rules.LrpRule,\n Dict[str, Union[torch.Tensor, float]]]]:\n for layer_names, rule, rule_kwargs in self.rule_layer_map:\n # Apply rule only to layers included in mapping\n if name in layer_names:\n return rule, rule_kwargs\n\n return None",
"def evaluate(results, name):\n \n new_results = results.copy()\n \n # String to dictionary\n new_results['hyperparameters'] = new_results['hyperparameters'].map(ast.literal_eval)\n \n # Sort with best values on top\n new_results = new_results.sort_values('score', ascending = False).reset_index(drop = True)\n \n # Print out cross validation high score\n print('The highest cross validation score from {} was {:.5f} found on iteration {}.'.format(name, \n new_results.loc[0, 'score'], new_results.loc[0, 'iteration']))\n \n # Create dataframe of hyperparameters\n hyp_df = pd.DataFrame(columns = list(new_results.loc[0, 'hyperparameters'].keys()))\n \n for i, row in enumerate(new_results['hyperparameters']):\n if 'hidden_layer_sizes' in row:\n new_results['hyperparameters'][i]['hidden_layer_sizes'] = str(new_results['hyperparameters'][i]['hidden_layer_sizes'])\n\n # Iterate through each set of hyperparameters that were evaluated\n for i, hyp in enumerate(new_results['hyperparameters']):\n hyp_df = hyp_df.append(pd.DataFrame(hyp, index = [0]), \n ignore_index = True)\n \n # Put the iteration and score in the hyperparameter dataframe\n hyp_df['iteration'] = new_results['iteration']\n hyp_df['score'] = new_results['score']\n \n return hyp_df",
"def tell_optim(optim, n, jobs_left):\n result = n.result()\n\n if result is not None:\n optim.tell(n.params, result)\n print(\"Result:\", n.name, result,\n \"(\"+str(jobs_left)+\" jobs left)\", file=sys.stderr)\n else:\n print(\"Warning:\", n.name, \"result is None\",\n file=sys.stderr)",
"def get_parameter(self, name: str) -> any:\r\n if name in self.kwargs:\r\n return self.kwargs[name]\r\n for x in self.args:\r\n if isinstance(x, dict) and name in x:\r\n return x[name]\r\n else:\r\n return None",
"def record(\n self, result, name, error=None, publish=True, **kwargs\n ) -> Tuple[dict, Optional[Any]]:\n (\n tags,\n optional_benchmark_info,\n context,\n info,\n github,\n options,\n cluster_info,\n output,\n ) = self._init(kwargs)\n\n if (tags.get(\"name\") is not None) and (name != tags[\"name\"]):\n raise ValueError(\n f\"`name` and `tags[\\\"name\\\"] are both supplied and do not match: {name=} != {tags['name']=}\"\n )\n\n # Make sure that `name` key is in tags; currently the canonical way to\n # communicate the name of the benchmark that this result has been\n # obtained for.\n tags[\"name\"] = name\n\n batch_id = options.get(\"batch_id\")\n if not batch_id:\n batch_id = self._batch_id\n\n run_id = options.get(\"run_id\")\n if not run_id:\n run_id = self._run_id\n\n # Make this naming challenge explicit.\n optional_result_info = optional_benchmark_info\n\n benchmark_result = {\n \"run_id\": run_id,\n \"batch_id\": batch_id,\n \"timestamp\": _now_formatted(),\n \"context\": context,\n \"info\": info,\n \"tags\": tags,\n \"optional_benchmark_info\": optional_result_info,\n \"github\": github,\n }\n if error:\n benchmark_result[\"error\"] = error\n\n if result and result[\"data\"]:\n benchmark_result[\"stats\"] = self._stats(\n result[\"data\"],\n result[\"unit\"],\n result.get(\"times\", []),\n result.get(\"time_unit\", \"s\"),\n )\n\n if cluster_info:\n benchmark_result[\"cluster_info\"] = cluster_info\n else:\n benchmark_result[\"machine_info\"] = self.machine_info\n\n run_name = options.get(\"run_name\")\n if run_name is not None:\n benchmark_result[\"run_name\"] = run_name\n\n run_reason = options.get(\"run_reason\")\n if run_reason is not None:\n benchmark_result[\"run_reason\"] = run_reason\n\n if publish:\n self.publish(benchmark_result)\n\n return benchmark_result, output",
"def main():\n long_description = (\n \"Computing a deflex scenario. By default the name of the result file \"\n \"is derived from the name of the input file but by adding '--results` \"\n \"it is possible to define a custom path. The results will be of the \"\n \"same file format as the input scenario.\\n\"\n \"Optionally a dump-file can be stored. If no path is given the path \"\n \"is derived from the path of the input scenario. The suffix of the \"\n \"dump is '.dflx'.\"\n )\n parser = argparse.ArgumentParser(\n prog=\"deflex-compute\",\n description=long_description,\n formatter_class=argparse.RawDescriptionHelpFormatter,\n )\n parser.add_argument(\n \"--version\", action=\"version\", version=f\"deflex {deflex.__version__}\"\n )\n parser.add_argument(\"path\", type=str, help=\"Input file or directory.\")\n parser.add_argument(\n \"--results\",\n dest=\"results\",\n const=True,\n default=True,\n nargs=\"?\",\n help=(\n \"The name of the results file or directory or False to get no \"\n \"result file. By default the path is derived from scenario path.\"\n ),\n )\n parser.add_argument(\n \"--dump\",\n dest=\"dump\",\n const=True,\n default=None,\n nargs=\"?\",\n help=(\n \"The name of the dump file. Leave empty for the default file name.\"\n ),\n )\n parser.add_argument(\n \"--solver\",\n dest=\"solver\",\n const=\"cbc\",\n default=\"cbc\",\n nargs=\"?\",\n help=\"Solver to use for computing (default: cbc)\",\n )\n\n args = parser.parse_args()\n\n deflex.use_logging()\n\n deflex.scripts.model_scenario(**vars(args))",
"def run(self, args):\n self.pen.score_for_matched_lexical = args[0]\n self.pen.score_for_matched_synonym = args[1]\n self.factor_word_offset_penalty = args[2]\n self.factor_sentence_length_mismatch = args[3]\n self.factor_name_mismatch = args[4]\n self.factor_fe_offset_penalty = args[5]\n self.weight_target_frame_element = args[6]\n self.weight_frame_elements = args[7]\n self.factor_frame_offset_penalty = args[8]\n misses = []\n for row in range(self.data.get_number_of_rows()):\n ref_sentence = self.data.get_row(row)[self.data.get_gold()]\n results = {}\n for team, team_sentence in self.data.get_row_for_teams(self.evaluator.get_teams(row), row).iteritems():\n results[team] = self.get_sentence_score(ref_sentence, team_sentence)\n misses.append(self.evaluator.compare_all(results, row))\n return np.mean(misses) / 5.0",
"def wrapper(args):\n if args[\"--time\"]:\n import time\n start_time = time.time()\n result = fun(args)\n LOGGER.info(\"Total time:\", time.time() - start_time)\n return result\n\n return fun(args)",
"def build_result_param(task_passport, param_def=None, name=RESULT_PARAM):\n # type: (TaskPassport, Optional[ParameterDefinition], str) -> ParameterValue\n\n if not param_def:\n from targets.values import ObjectValueType\n\n # naive creation of result param definition - default named \"result\" and single value\n param_def = parameter.modify(\n name=name, value_type=ObjectValueType\n ).output.build_parameter(\"inline\")\n\n return ParameterValue(\n parameter=param_def,\n source=task_passport.full_task_family_short,\n source_value=None,\n value=NOTHING,\n parsed=False,\n )",
"def reconstruction_metrics(input_node, reconstruction_node, name, variables=None):\n loss_node = tf.losses.mean_squared_error(input_node, reconstruction_node)\n optimiser = default_adam_optimiser(loss_node, name, variables=variables)\n return loss_node, optimiser",
"def judge(name):\n score = 0\n for scoreID, scorer, weight in weights:\n subscore = scorer(name)\n score += subscore * weight\n name.scores[scoreID] = subscore\n name.score = score\n return score",
"def optimize_dataset(name=\"Movie\"):\r\n #load textlevel training data, doc2vec model and sentencelevel validation data\r\n if name==\"Movie\":\r\n data = pickle.load(open(\"data/embeddings/movie_embeddings_small.p\", \"rb\"))\r\n model = gensim.models.Doc2Vec.load(\"data/doc_2_vec/movie_model\")\r\n sentence_level_val_data = pickle.load(open(\"data/sentence_level_data/movie/movie_sentence_level_val_data.p\", \"rb\"))\r\n elif name==\"Financial\":\r\n data = pickle.load(open(\"data/embeddings/financial_embeddings_small.p\", \"rb\"))\r\n model = gensim.models.Doc2Vec.load(\"data/doc_2_vec/financial_model\")\r\n sentence_level_val_data = pickle.load(open(\"data/sentence_level_data/financial/financial_sentence_level_val_data.p\", \"rb\"))\r\n \r\n #split textlevel dataset in training and validation set training for gradien descent optimization validation to select best beta\r\n train, valid = split_dataset(dataset=data, test_size=0.2)\r\n betas, text_accuracies, sentence_accuracies, losses = min_batch_gradient_descent_with_momentum(train_data=train,\r\n val_data_text=valid,\r\n batch_size=5,\r\n iterations=2,\r\n step_size=0.05, #0.05\r\n lam=0.98,\r\n momentum_step_size=0.7,\r\n vec_lenght=200,\r\n d2v_model=model,\r\n val_data_sentence=sentence_level_val_data)\r\n #save textlevel validation data\r\n pickle.dump(valid, open(\"data/embeddings/validation_data_\" + name + \".p\", \"wb\"))\r\n \r\n #select best beta, as beta with highes accuracy in text-level validation data\r\n for i in range(text_accuracies.shape[0]):\r\n if text_accuracies[i] == numpy.max(text_accuracies):\r\n beta_opt_text_level = betas[i]\r\n \r\n #select best beta, as beta with highes accuracy in sentence-level validation data, if sentence-level validation data is given \r\n if not sentence_accuracies==None: \r\n for i in range(sentence_accuracies.shape[0]):\r\n if sentence_accuracies[i] == numpy.max(sentence_accuracies):\r\n beta_opt_sentecne_level = betas[i]\r\n \r\n #plot accuracies for text-level validation data\r\n matplotlib.pyplot.plot(text_accuracies)\r\n matplotlib.pyplot.ylabel('text Accuracie')\r\n matplotlib.pyplot.xlabel('Iterations')\r\n matplotlib.pyplot.show()\r\n \r\n #plot accuracies for sentence-level validation data\r\n if not sentence_accuracies==None:\r\n matplotlib.pyplot.plot(sentence_accuracies)\r\n matplotlib.pyplot.ylabel('Sentence Accuracie')\r\n matplotlib.pyplot.xlabel('Iterations')\r\n matplotlib.pyplot.show()\r\n \r\n #plot loss (calcualted at text-level)\r\n matplotlib.pyplot.plot(losses)\r\n matplotlib.pyplot.ylabel('Cost')\r\n matplotlib.pyplot.xlabel('Iterations')\r\n matplotlib.pyplot.show()\r\n \r\n acc_max_text = numpy.max(text_accuracies)\r\n acc_max_sentence = numpy.max(sentence_accuracies)\r\n \r\n \r\n return beta_opt_text_level, beta_opt_sentecne_level, acc_max_text, acc_max_sentence",
"def create_basic_parser(name=''):\n # SEE: https://docs.python.org/3/library/argparse.html\n parser = argparse.ArgumentParser('Benchmark on Image Registration - %s' % name)\n parser.add_argument('-n', '--name', type=str, required=False, default=None, help='custom experiment name')\n parser.add_argument('-t', '--path_table', type=str, required=True, help='path to the csv cover file')\n parser.add_argument(\n '-d',\n '--path_dataset',\n type=str,\n required=False,\n default=None,\n help='path to the dataset location, if missing in table'\n )\n parser.add_argument('-o', '--path_out', type=str, required=True, help='path to the output directory')\n parser.add_argument(\n '--unique', dest='unique', action='store_true', help='whether each experiment have unique time stamp'\n )\n parser.add_argument('--visual', dest='visual', action='store_true', help='whether visualise partial results')\n parser.add_argument(\n '-pproc',\n '--preprocessing',\n type=str,\n required=False,\n nargs='+',\n help='use some image pre-processing, the other matter',\n choices=['gray'] + ['matching-%s' % clr for clr in CONVERT_RGB]\n )\n # parser.add_argument('--lock_expt', dest='lock_thread', action='store_true',\n # help='whether lock to run experiment in single thread')\n parser.add_argument('--run_comp_benchmark', action='store_true', help='run computation benchmark on the end')\n parser.add_argument(\n '--nb_workers', type=int, required=False, default=1, help='number of registration running in parallel'\n )\n return parser",
"def RunGraph(graph_name, inputs=(), outputs=[], stage=None, return_outputs=True):\n if len(inputs) > 0 and len(inputs[0]) > 0:\n if len(inputs[0]) != len(inputs[1]):\n raise RuntimeError('Defined {} args, but {} are given.'\n .format(len(inputs[0]), len(inputs[1])))\n for idx in range(len(inputs[0])):\n FeedTensor(inputs[0][idx], inputs[1][idx])\n if stage is None: stage = 'default'\n rules = stages[stage]\n RunGraphCC(str(graph_name), str(rules['include']), str(rules['exclude']))\n # force to return may lead crash if encountering un-computed outputs\n if return_outputs:\n if len(outputs) == 0 : return None\n elif len(outputs) == 1: return outputs[0].get_value()\n else: return [outputs[i].get_value() for i in range(len(outputs))]"
] | [
"0.51373553",
"0.48719892",
"0.48290378",
"0.4802629",
"0.47797635",
"0.47134107",
"0.4667771",
"0.46635738",
"0.45581153",
"0.4458564",
"0.4437545",
"0.4433735",
"0.44055456",
"0.44029266",
"0.43912202",
"0.43725234",
"0.43424857",
"0.43341213",
"0.42941284",
"0.4288936",
"0.42810565",
"0.42732117",
"0.42687663",
"0.42622936",
"0.42534152",
"0.42336035",
"0.42132416",
"0.42083785",
"0.41901016",
"0.41824034"
] | 0.82080066 | 0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.