query
stringlengths
9
9.05k
document
stringlengths
10
222k
metadata
dict
negatives
sequencelengths
30
30
negative_scores
sequencelengths
30
30
document_score
stringlengths
4
10
document_rank
stringclasses
2 values
Test API can get a list of all employees(GET request)
def test_api_can_get_all_employees(self): res = self.client().get(service_url_emp) self.assertEqual(res.status_code, 200) self.assertIn('name1', str(res.data)) self.assertIn('name2', str(res.data))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self):\n employees = self.service.get_employees(strategy=selectinload)\n return self.schema.dump(employees, many=True), 200", "def list(self, request):\n employee = self.controller.retrieve_all_employees()\n serializer = data_serializers.PresentEmployeeDataSerializer(employee, many=True)\n return Response(serializer.data)", "def test_api_can_get_employee_by_id(self):\n res = self.client().get(service_url_emp+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))", "def list(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def get(self, request):\n varemployee = employee.objects.all()\n serializer = employeeSerializer(varemployee, many=True)\n return Response(serializer.data)", "def test_fetch_all_offices(self):\n \n access_token = self.generate_token()\n self.create_office()\n response_data = self.client.get(\n \"api/v2/admin/offices\",\n\n headers={\"content-type\":\"application/json\",\n\n 'Authorization': f'Bearer {access_token}'}\n )\n self.assertEqual(response_data.status_code, 200)", "def test_api_can_get_all_expenses(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n results = json.loads(res.data)\n self.assertEqual(results['items'][0]['name'], self.expense['name'])", "def list(self, request):\n teams = self.controller.retrieve_all_teams_employees()\n serializer = data_serializers.PresentTeamEmployeeDataSerializer(teams, many=True)\n return Response(serializer.data)", "def employees():\n # gather data from db about all employees\n return render_template(\"employees.html\")", "def test_office_list(self):\n url = '/api/v1/consultorios/'\n request = self.client.get(url)\n self.assertEqual(request.status_code, status.HTTP_200_OK)", "def get(self):\n resultado = EmployeeModel.query.all()\n return resultado", "def employees(employee_id=None):\n\tif not employee_id:\n\t\temployee_data = _serialize_list(Employee.query.all())\n\telse:\n\t\temployee_data = _serialize_model(Employee.query.filter_by(id=employee_id).first())\n\n\tresp = jsonify(employee_data)\n\treturn resp", "def test_fetch_all():\n response = requests.get('http://localhost:5000/api/persons')\n data = response.json()\n\n assert response.status_code == 200\n for field in FIELDS:\n assert field in data[0]", "def employee_list(request):\n response_data = []\n for emp in Employee.objects.all().values(\n 'id', 'first_name', 'last_name', 'age', 'address', 'city',\n 'state', 'country'):\n response_data.append(emp)\n return JsonResponse(response_data, safe=False)", "def show_all_employees(self):\n try:\n employees = self.admin_repository.show_all_employees()\n if employees:\n for employee in employees:\n print(\"Employee Id : {}\".format(employee[0]))\n print(\"Name : {}\".format(employee[1]))\n print(\"Email : {}\".format(employee[2]))\n print(\"----------------------------\")\n return True\n else:\n print(\"No records found.\")\n return False\n except Exception as e:\n print(\"Some Error occurred.Please try again\")\n return False", "def test_get_me(self):\n url = reverse_lazy('api:me-employees')\n response = self.client.get(url)\n self.assertEquals(response.status_code, 200)\n response_json = response.json()\n\n self.assertIn('user', response_json, response_json)\n self.assertIn('positions', response_json, response_json)\n self.assertIn('favoritelist_set', response_json, response_json)\n\n self.assertIsInstance(response_json['favoritelist_set'], list)\n self.assertIsInstance(response_json['positions'], list)", "def test_response_for_getting_all_users(self):\n response = self.client.get(\"/team/all/\", format='json')\n self.assertEqual(response.status_code, 200)", "def test_admin_get_all(self):\n response = self.app.get('/api/v3/users', headers=self.admin_header)\n self.assertEqual(response.status_code, 200)", "def test_get_model_list():\n with app.test_client() as c:\n response = c.get('/REST/api/v1.0/model_list') \n assert response.status_code == 201", "def test_get(self, employee_model):\n\n request = Mock()\n employee = Mock()\n employee_model.objects.get.return_value = employee\n\n self.serializer.data = {\n \"emp_no\": 10090,\n \"birth_date\": \"1961-05-30\",\n \"first_name\": \"Kendra\",\n \"last_name\": \"Hofting\",\n \"gender\": \"M\",\n \"hire_date\": \"1986-03-14\"\n }\n response = self.view.get(request, emp_no=1)\n\n employee_model.objects.get.assert_called_with(\n pk=1\n )\n self.assertEqual(response.status_code, 200)", "def test_list(self):\n response = self.client.get('/exercises/')\n expected = {\n 'id': self.exer1.id,\n 'name': self.exer1.name,\n 'description': self.exer1.description,\n 'muscle_group': self.exer1.muscle_group\n }\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data['count'], 2)\n self.assertEqual(len(response.data['results']), 2)\n self.assertEqual(response.data['results'][0], expected)", "def get_employees(self):\n return self.employees", "def test_get_predict_list():\n with app.test_client() as c:\n response = c.get('/REST/api/v1.0/predict_list') \n assert response.status_code == 201", "def test_api_can_get_all_users(self):\n response = self.client().get('/api/v1/user/')\n self.assertTrue(response.status_code, 200)", "def test_presenters_get(self):\n query_string = [('label', 'label_example'),\n ('page', 1),\n ('per_page', 100)]\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/presenters',\n method='GET',\n headers=headers,\n query_string=query_string)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_fetch_all_user(self):\n\n payload = self.get_req('api/v1/users')\n self.assertEqual(payload.status_code, 200)\n self.assertEqual(payload.json['users'], [])", "def test_get_list(self):\n pass", "def test_get_no_employee(self):\n self.test_employee.delete()\n url = reverse_lazy('api:me-employees')\n response = self.client.get(url)\n self.assertEquals(response.status_code, 403)", "def test_list(self):\n url = '/api/users/'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n r = response.json()\n self.assertTrue(isinstance(r['objects'], list))\n # Response should not contain inactive, contractors or shared accounts.\n self.assertContains(response, self.user1.email)\n self.assertNotContains(response, self.del_user.email)\n self.assertNotContains(response, self.contract_user.email)\n self.assertNotContains(response, self.shared.email)\n # Test the compact response.\n url = '/api/users/?compact=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)\n # Test the minimal response.\n url = '/api/users/?minimal=true'\n response = self.client.get(url)\n self.assertEqual(response.status_code, 200)", "def getEmployees(self):\n return self.employees" ]
[ "0.7478678", "0.7415778", "0.7400966", "0.73436856", "0.711193", "0.6989079", "0.68708664", "0.68625116", "0.68500465", "0.68442833", "0.68407613", "0.67822254", "0.66658956", "0.6658519", "0.66278076", "0.6532204", "0.65165836", "0.64601713", "0.6449148", "0.6427563", "0.641366", "0.64049834", "0.63761514", "0.6374115", "0.6370123", "0.6364976", "0.6362051", "0.6344424", "0.63391876", "0.6325179" ]
0.8612314
0
Test API can get a single employee by it's id
def test_api_can_get_employee_by_id(self): res = self.client().get(service_url_emp+'/1') self.assertEqual(res.status_code, 200) self.assertIn('name1', str(res.data))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get(self, id):\n resultado = EmployeeModel.query.filter_by(employee_id=id).first()\n if resultado:\n return resultado\n api.abort(404)", "def get(cls, employee_id):\n employee = EmployeeModel.find_by_id(employee_id)\n if not employee:\n return {'message': 'Employee not found, or you do not have the access'}, 404\n\n return employee.json()", "def get_employee(employee_id):\n\n employee = Employee.objects.get_or_404(id=employee_id)\n\n return jsonify({\n 'employee': employee\n })", "def employee_get(emp_id):\n try:\n emp = Employee.objects.get(id=emp_id)\n except Employee.DoesNotExist:\n return JsonResponse({\n 'status': False,\n 'message': 'Employee does not exists in database'\n }, status=404)\n _data = {\n 'id': emp.id,\n 'first_name': emp.first_name,\n 'last_name': emp.last_name,\n 'age': emp.age,\n 'city': emp.city.name,\n 'state': emp.state.name,\n 'country': emp.country.name\n }\n return JsonResponse(_data, safe=False)", "def get_employee_by_id(self, employee_id):\n employee = self.admin_repository.get_employee_by_id(employee_id)\n if employee:\n print('''Name: {}\\nEmail: {}\\n\n '''.format(employee[0], employee[1]))\n return employee\n else:\n print(\"Invalid Id\")\n return False", "def get_employee_by_id(employee_id):\n where = Employee.id == employee_id\n query = get_employee_query(where)\n return query.one()", "def get(id_: int):\n logger.debug('Retrieving employee by id %i.', id_)\n try:\n query = db.session.query(Employee)\n employee = query.filter(\n Employee.id == id_\n ).scalar()\n if not employee:\n raise Exception(f\"Can't get employee with id {id_}\", )\n except Exception as exception:\n logger.error('An error occurred while retrieving employee with id %i.'\n ' Exception: %s', id_, str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved employee by id %i.', id_)\n return employee", "def get_employee(self, id):\n from Employee import Employee\n cursor = self.dbconnect.get_cursor()\n cursor.execute('SELECT * FROM employee WHERE employeeID=%s ', (id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8])", "def get_employee(self, employee_id):\n cursor = self.dbconnect.get_cursor()\n\n try:\n cursor.execute('SELECT id, name, email, office, extra_info, picture_location, research_group, title, is_external,'\n ' is_admin, is_active FROM employee WHERE LOWER(id)=LOWER(%s)', (employee_id,))\n row = cursor.fetchone()\n return Employee(row[0], row[1], row[2], row[3], row[4], row[5], row[6], row[7], row[8], row[9], row[10])\n\n except:\n self.dbconnect.rollback()\n raise", "def test_api_can_get_all_employees(self):\n res = self.client().get(service_url_emp)\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))", "def find_employee_by_id(self,id):\n self.employee_id()\n if id in self.emp_id:\n print(self.emp_id[id])\n return self.emp_id[id]\n else:\n print(\"Employee not found\")", "def test_get(self, employee_model):\n\n request = Mock()\n employee = Mock()\n employee_model.objects.get.return_value = employee\n\n self.serializer.data = {\n \"emp_no\": 10090,\n \"birth_date\": \"1961-05-30\",\n \"first_name\": \"Kendra\",\n \"last_name\": \"Hofting\",\n \"gender\": \"M\",\n \"hire_date\": \"1986-03-14\"\n }\n response = self.view.get(request, emp_no=1)\n\n employee_model.objects.get.assert_called_with(\n pk=1\n )\n self.assertEqual(response.status_code, 200)", "def retrieve(self, request, pk=None):\n employee = self.get_employee_object(pk)\n print(F\"Employee: {employee}\")\n serializer = data_serializers.PresentEmployeeDataSerializer(employee)\n return Response(serializer.data)", "def get(self, uuid: str):\n try:\n employee = self.service.get_employee_by_uuid(uuid)\n except ValueError:\n return self.NOT_FOUND_MESSAGE, 404\n return self.schema.dump(employee), 200", "def employee(employee_id):\n # gather data from db about all employees\n return render_template(\"employee.html\",\n employee_id=employee_id)", "def test_presenters_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/presenters/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_api_can_get_expense_by_id(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(rv.status_code, 201)\n result_in_json = json.loads(rv.data.decode('utf-8').replace(\"'\", \"\\\"\"))\n results = self.client().get(\n '/expenses/{}'.format(result_in_json['id']), headers=dict(Authorization=\"Bearer \" + access_token))\n res = json.loads(results.data)\n self.assertEqual(results.status_code, 200)\n self.assertEqual('snacks', str(res['name']))", "def test_abbeys_id_get(self):\n headers = { \n 'Accept': 'application/json',\n }\n response = self.client.open(\n '/v0.0.1/abbeys/{id}'.format(id='id_example'),\n method='GET',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def get_employee():\n\n employee_id = get_employee_input_int('Enter employee ID to get the data ')\n employee = db.get_employee(employee_id)\n if not employee:\n print(\"No employee found with id \", employee_id)\n else:\n payscale = db.get_payScale(employee.grade)\n print('DATA:-> {} {} has grade = {} which gives {} per hours\\n'\n .format(employee.first_name, employee.last_name, employee.grade, payscale.salary))", "def test_new_employee_crud_methods(self):\n response = self.client.get(\n '/employees/', kwargs={'employer_id': self.employee.id})\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(Employee.objects.all()), 1)\n\n # Test that a new employee can be added\n response = self.client.post(\n '/employees/',\n {'name': 'MAdtraxx!!', 'employer': self.employer.id},\n kwargs={'pk': self.employer.id})\n self.assertEqual(response.status_code, 201)\n self.assertEqual(Employee.objects.count(), 2)\n\n # Test that employee info may be edited\n response = self.client.put('/employees/1/',\n {'name': 'Ashley',\n 'employer': self.employer.id},\n kwargs={'employer_id': self.employee.id,\n 'pk': self.employee.id})\n self.assertEqual(response.status_code, 200)", "def show_employee(emp_id, fields=None):\n ret = {}\n if fields is None:\n fields = \",\".join(\n (\n \"canUploadPhoto\",\n \"department\",\n \"displayName\",\n \"firstName\",\n \"id\",\n \"jobTitle\",\n \"lastName\",\n \"location\",\n \"mobilePhone\",\n \"nickname\",\n \"photoUploaded\",\n \"photoUrl\",\n \"workEmail\",\n \"workPhone\",\n \"workPhoneExtension\",\n )\n )\n\n status, result = _query(action=\"employees\", command=emp_id, args={\"fields\": fields})\n\n root = ET.fromstring(result)\n\n ret = {\"id\": emp_id}\n for item in root:\n ret[next(iter(item.values()))] = item.text\n return ret", "def test_api():\n # person id for one long time employee\n content = get_person(10050)\n assert content['preferredName'].endswith('immel')", "def employees(employee_id=None):\n\tif not employee_id:\n\t\temployee_data = _serialize_list(Employee.query.all())\n\telse:\n\t\temployee_data = _serialize_model(Employee.query.filter_by(id=employee_id).first())\n\n\tresp = jsonify(employee_data)\n\treturn resp", "def test_get_no_employee(self):\n self.test_employee.delete()\n url = reverse_lazy('api:me-employees')\n response = self.client.get(url)\n self.assertEquals(response.status_code, 403)", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_find_entity_by_id_action(self):\n pass", "def test_employee_deletion(self):\n res = self.client().delete(service_url_emp, json={\"id_emp\": 1})\n self.assertEqual(res.status_code, 204)\n # Test to see if it exists, should return a 400\n result = self.client().get(service_url_emp+'/1')\n self.assertEqual(result.status_code, 400)", "def get_details(office_id):\n\n office = OfficeModel()\n office_exists = office.get_one(office_id)\n print(office)\n if office_exists is not None:\n return make_response(jsonify(\n {'status': 200, 'data': office.sub_set()}\n ), 200)\n\n return make_response(jsonify(\n {'status': 404,\n \"error\": 'Office with id {} not found'.format(office_id)}\n ), 404)", "def test_getItineraryFromId(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n uid = str('alex_' + date['date'])\n invuid = '00000000000000000000000'\n\n rv = self.json_get('/getItineraryFromId/bbbb', {'uid': uid})\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getItineraryFromId/alex', {'uid': invuid})\n assert 'Itinerary not found' in str(rv.data)\n\n rv = self.json_get('/getItineraryFromId/alex', {'uid': uid})\n assert uid in str(rv.data)", "def test_employee_creation(self):\n helper = EmployeeHelper(name='Andrew', hired_on='2019-10-01T00:00:00', salary=50000, department_id=1)\n\n # Returned result is an OrderedDict\n result = self.client.execute(helper.get_create_employee_query())['data']['createEmployee']['employee']\n\n self.assertEqual(result['name'], helper.name)\n self.assertEqual(result['hiredOn'], helper.hired_on)\n self.assertEqual(result['salary'], helper.salary)\n self.assertEqual(result['departmentId'], helper.department_id)", "def employers_id_get(id): # noqa: E501\n\n\n return query_manager.get_resource(id=id,\n rdf_type_uri=EMPLOYER_TYPE_URI,\n rdf_type_name=EMPLOYER_TYPE_NAME, \n kls=Employer)" ]
[ "0.77058524", "0.76768017", "0.7541592", "0.74530566", "0.73932606", "0.7201871", "0.71742916", "0.71203345", "0.7098658", "0.70788777", "0.6995682", "0.6907228", "0.6837458", "0.67132115", "0.6658866", "0.66381055", "0.6613429", "0.659436", "0.65936464", "0.657754", "0.6559862", "0.6502767", "0.64636177", "0.6437592", "0.6422298", "0.6357685", "0.6325181", "0.6322865", "0.62875426", "0.6284863" ]
0.8674464
0
Test API can delete an existing employee. (DELETE request)
def test_employee_deletion(self): res = self.client().delete(service_url_emp, json={"id_emp": 1}) self.assertEqual(res.status_code, 204) # Test to see if it exists, should return a 400 result = self.client().get(service_url_emp+'/1') self.assertEqual(result.status_code, 400)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_no_employee(self):\n self.test_employee.delete()\n url = reverse_lazy('api:me-employees')\n response = self.client.get(url)\n self.assertEquals(response.status_code, 403)", "def test_delete():\n sample_uuid = get_sample_id()\n response = requests.delete(f'http://localhost:5000/api/persons/{sample_uuid}')\n\n assert response.status_code == 200", "def delete(self, request, pk):\n employee = EmployeeDetail.objects.get(pk=pk)\n employee.delete()\n return Response(\n data=' Entry deleted',\n status=status.HTTP_400_BAD_REQUEST\n )", "def delete(self, id):\n empleadoeliminar = EmployeeModel.query.filter_by(employee_id=id).first()\n if empleadoeliminar:\n db.session.delete(empleadoeliminar)\n db.session.commit()\n return 201\n api.abort(404)", "def test_delete_cascade(self):\n\n self.assertEquals(\n Employee.objects.get(cpf=\"974.220.200-16\"),\n self.employee\n )\n\n self.user.delete()\n\n with self.assertRaises(Employee.DoesNotExist):\n Employee.objects.get(cpf=\"974.220.200-16\")", "def cmd_delete_employee():\r\n id = request.form.get('id', \"\")\r\n confirm = request.form.get(\"confirm\", \"\")\r\n if confirm != \"DELETE\":\r\n flash(f\"Contact '{id}' NOT deleted. Please enter DELETE in the confirm field.\")\r\n return redirect(url_for('main.jobs'))\r\n \r\n index = get_employee_by_id(id)\r\n User.query.filter(User.id == id).delete()\r\n db.session.commit()\r\n\r\n\r\n if index != None:\r\n flash(f\"Employee '{id}' was succesfully deleted!\")\r\n return redirect(url_for('main.employees'))\r\n else:\r\n flash(f\"Employee '{id}' was not found\")\r\n return redirect(url_for('main.employees'))", "def test_delete_experiment(client, users):\n login_experimenter(client)\n\n exp = ExperimentFactory()\n exp.save()\n\n exp_url = \"/experiments/\" + str(exp.id)\n\n response = client.delete(exp_url)\n assert response.status_code == 200\n assert json_success(response.data)\n\n response = client.get(\"/experiments/\")\n data = response.data.decode(response.charset)\n assert response.status_code == 200\n assert exp.name not in data", "def delete_employee():\r\n id = request.args.get('id', \"\")\r\n return render_template(\"delete_employee.html\", id=id)", "def test_expense_deletion(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n rv = self.client().post(\n '/expenses/', headers=dict(Authorization=\"Bearer \" + access_token),\n data=self.expense)\n self.assertEqual(rv.status_code, 201)\n res = self.client().delete('/expenses/1', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n # Test to see if it exists, should return a 404\n result = self.client().get('/expenses/1', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(result.status_code, 404)", "def test_delete(self):\n query = {\"id\":0}\n result = self.app.delete('/testParaDelete', query_string=query)\n self.assertEqual(result.status_code, 200)\n self.assertEqual(result.data, 'ok')", "def test_validate_delete(client):\n response = client.delete('/user/1')\n assert response.status_code == 400\n assert response.json['message'] == INVALID_ACTION_MESSAGE", "def test_delete(self):\n pass", "def test_destroy(self):\n DoctorFactory.create(id=15)\n response = self.unath_client.get(reverse('doctor-detail', args=[15]))\n self.assertEqual(response.status_code, status.HTTP_401_UNAUTHORIZED)\n\n response = self.client.get(reverse('doctor-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertTrue(len(response.data), 1)\n\n response = self.client.delete(reverse('doctor-detail', args=[15]))\n self.assertEqual(response.status_code, status.HTTP_405_METHOD_NOT_ALLOWED)\n\n response = self.client.get(reverse('doctor-list'))\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data), 1)", "def destroy(self, request, pk=None):\n try:\n deleted_team = self.controller.delete_employee(pk)\n return Response(status=status.HTTP_204_NO_CONTENT)\n except domain_exceptions.ObjectEntityDoesNotExist as e:\n return Response(e.message, status=status.HTTP_404_NOT_FOUND)", "def test_delete(client):\n rv = delete(client, 'Michael')\n assert json.loads(rv.data.decode())['code'] == 0\n assert json.loads(rv.data.decode())['owner'] == 'Michael'", "def test_delete_api_resource(self, mock_delete: Mock, mock_set_token: Mock) -> None:\n exonet_client = ExonetClient(\"kaSD0ffAD1ldSA92A0KODkaksda02KDAK\")\n exonet_client.delete_api_resource(\n ApiResource({\"type\": \"dns_records\", \"id\": \"qjJWA0Km8xgw\"})\n )\n\n # Check mock calls.\n assert mock_delete.call_count == 1\n assert mock_set_token.call_count == 1\n\n # Check call args.\n assert mock_set_token.call_args[0][0] == \"kaSD0ffAD1ldSA92A0KODkaksda02KDAK\"", "def delete(self, uuid: str):\n try:\n self.service.delete_employee(uuid)\n except ValueError:\n return self.NOT_FOUND_MESSAGE, 404\n return self.NO_CONTENT_MESSAGE, 204", "def test_delete(self):\n self.client.force_authenticate(user=self.admin)\n\n response = self.client.delete(\n reverse(\n 'retreat:waitqueuenotification-detail',\n kwargs={'pk': 1},\n ),\n )\n\n self.assertEqual(\n response.status_code,\n status.HTTP_405_METHOD_NOT_ALLOWED\n )", "def test_delete(self):\n client = RestClient(host=self.host, username='')\n rest_url = 'some/url/'\n \n # Mock good response\n with responses.RequestsMock() as rsps:\n rsps.add(responses.DELETE, f'{self.host}/{rest_url}', status=200,\n json={'value':\"good!\"})\n r = client.delete(rest_url)", "def test_delete_attendance(client, json_access_token, school_json, attendance_json, student_json):\n # create school\n res = client.post(\"/api/v1/schools\", json=school_json, headers=json_access_token)\n assert res.status_code == 200\n # create student\n res = client.post(\"/api/v1/students\", json=student_json, headers=json_access_token)\n assert res.status_code == 200\n # create attendances\n res = client.post(\"/api/v1/attendances\", json=attendance_json, headers=json_access_token)\n assert res.status_code == 200\n # update attendances\n res = client.delete(\"/api/v1/attendances/1\", json=attendance_json, headers=json_access_token)\n assert res.status_code == 200", "def test_otoroshi_controllers_adminapi_tcp_service_api_controller_delete_entity_action(self):\n pass", "async def test_delete_organization_address(client):\n params = [('access_token', 'access_token_example')]\n headers = { \n 'Accept': 'application/json',\n }\n response = await client.request(\n method='DELETE',\n path='/v1/addresses/{address_id}'.format(address_id=56),\n headers=headers,\n params=params,\n )\n assert response.status == 200, 'Response body is : ' + (await response.read()).decode('utf-8')", "def test_delete_record(self):\n pass", "def test_delete(self):\n self.assertFalse(self.user1.ad_deleted)\n self.assertTrue(self.user1.active)\n url = '/api/users/{}/'.format(self.user1.ad_guid)\n data = {'Deleted': True}\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)\n user = DepartmentUser.objects.get(pk=self.user1.pk) # Refresh from db\n self.assertTrue(user.ad_deleted)\n self.assertFalse(user.active)\n self.assertTrue(user.in_sync)\n # Also delete a second object, to check for silly 'empty string' collisions.\n url = '/api/users/{}/'.format(self.user2.ad_guid)\n response = self.client.put(url, json.dumps(data), content_type='application/json')\n self.assertEqual(response.status_code, 202)", "def test_delete_user(self):\n cursor = connection.cursor()\n resp = DeleteTest.client.post('/api/deleteuser/',{\"token\":DeleteTest.valid_token,\"email\":\"[email protected]\"})\n stmt = 'SELECT * FROM `'+ USER_TABLENAME + \"`\"\n cursor.execute(stmt)\n records = cursor.fetchall()\n self.assertEqual(len(records), 3 ,\"Not deleted\")", "def test_department_deletion(self):\n res = self.client().delete(service_url, json={\"id_dep\": 1})\n self.assertEqual(res.status_code, 204)\n # Test to see if it exists, should return a 400\n result = self.client().get(service_url+'/1')\n self.assertEqual(result.status_code, 400)", "def test_delete_movie(self): # executive can delete movies\r\n res = self.client().delete('/movies/3/delete', headers=executive_producer)\r\n data = json.loads(res.data)\r\n\r\n #self.assertEqual(res.status_code, 200)\r\n #self.assertTrue(data[\"success\"])\r\n #self.assertTrue(data[\"deleted\"])\r", "def test_delete_pet(self):\n headers = [('api_key', 'api_key_example')]\n response = self.client.open(\n '/pet/{petId}'.format(pet_id=789),\n method='DELETE',\n headers=headers)\n self.assert200(response,\n 'Response body is : ' + response.data.decode('utf-8'))", "def test_delete_records(self):\n pass", "def test_delete_a_todo(self):\n # hit the API endpoint\n response = self.delete_a_todo(1)\n self.assertEqual(response.status_code, status.HTTP_204_NO_CONTENT)\n # test with invalid data\n response = self.delete_a_todo(100)\n self.assertEqual(response.status_code, status.HTTP_404_NOT_FOUND)" ]
[ "0.7686066", "0.7555719", "0.7523974", "0.72330946", "0.72253567", "0.72195405", "0.7156919", "0.70961356", "0.70768404", "0.70662475", "0.70438784", "0.7029802", "0.70208484", "0.6992566", "0.69601595", "0.6914826", "0.6910496", "0.6900446", "0.6896528", "0.6888153", "0.68701184", "0.68649536", "0.685138", "0.6846836", "0.6841262", "0.6841026", "0.6839026", "0.6809248", "0.68042904", "0.6792126" ]
0.8828227
0
Test API can search employee by birth date
def test_api_can_search_employee_by_birth_date(self): res = self.client().get(service_url_emp+'/search/2014-10-24') self.assertEqual(res.status_code, 200) self.assertIn('name2', str(res.data))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_can_search_employee_by_between_dates(self):\n res = self.client().get(service_url_emp+'/search_between/2013-10-24,2014-10-24')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))", "def test_search_two_dates(self):\n # search via 2 dates.\n self.data.search(user_date='01/01/1800', second_date='02/04/1827',\n all_names=True)\n\n test = self.data.search(user_date='5/21/2012',\n second_date='04/10/2012', first_name='Trevor',\n last_name='Harvey')\n item_date = datetime.datetime(month=4, day=19, year=2012)\n self.assertEqual(test[0].entry_date, item_date)\n\n self.data.search(user_date='03/12/0001', second_date='03/13/0001',\n all_names=True)\n return self.data.search(user_date='1/10/2013', second_date='5/21/2011',\n first_name='Trevor', last_name='Harvey')", "def test_search_one_date(self):\n # search via 1 date.\n test = self.data.search(user_date='04/19/2012', all_names=True)\n item_date = datetime.datetime(month=4, day=19, year=2012)\n self.assertEqual(test[0].entry_date, item_date)", "def test_patient_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, '2000-01-01')", "def test_birth_validation(self):", "def test_api_can_get_all_employees(self):\n res = self.client().get(service_url_emp)\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))", "def test_api():\n # person id for one long time employee\n content = get_person(10050)\n assert content['preferredName'].endswith('immel')", "def test_get(self, employee_model):\n\n request = Mock()\n employee = Mock()\n employee_model.objects.get.return_value = employee\n\n self.serializer.data = {\n \"emp_no\": 10090,\n \"birth_date\": \"1961-05-30\",\n \"first_name\": \"Kendra\",\n \"last_name\": \"Hofting\",\n \"gender\": \"M\",\n \"hire_date\": \"1986-03-14\"\n }\n response = self.view.get(request, emp_no=1)\n\n employee_model.objects.get.assert_called_with(\n pk=1\n )\n self.assertEqual(response.status_code, 200)", "def get(self):\n args = self.parser.parse_args()\n date = get_date_or_none(args['date'])\n start_date = get_date_or_none(args['start_date'])\n end_date = get_date_or_none(args['end_date'])\n\n if date:\n employees = self.service.get_employees_by_date_of_birth(\n date, strategy=selectinload\n )\n elif start_date and end_date:\n employees = self.service.get_employees_born_in_period(\n start_date, end_date, strategy=selectinload\n )\n else:\n return self.BAD_DATE_MESSAGE, 400\n\n return self.schema.dump(employees, many=True), 200", "def test_GET_startdate(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 1122, 'date_of_expense': '10-01-2021'})\n self.assertEqual(rv.status_code, 201)\n resl = self.client().get('/expenses/?start_date=01-01-2021', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(resl.status_code, 200)\n results = json.loads(resl.data)\n self.assertEqual(results['items'][0]['date_of_expense'], self.expense['date_of_expense'])", "def test_date_of_birth(self):\n entries = {'uid=test,ou=people,dc=esmgquadrivium,dc=nl': {\n 'uid': ['test'],\n 'qDateOfBirth': [19951226],\n }}\n clone(entries)\n self.assertEqual(date(1995, 12, 26), Person.objects.first().date_of_birth)", "def get_birthday_employees(self):\n birthday_employees = []\n\n employees = self.search([\n ('birthday_reminders', '=', True),\n ('birthday', '!=', False),\n ])\n if not employees:\n return birthday_employees\n\n return employees.filtered(lambda x: self.check_emp_birthday(x.birthday))", "def test_get_filter_with_date_contacts(self):\n data = {\"date_start\": '2018-08-20',\n \"date_end\": '2018-08-25'}\n response = self.client.get(reverse('contacts-filter'), data)\n # import pdb; pdb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"count\"], 4)", "def test_05_get_person_by_name(self):\n p1 = Person.query.first()\n p1_data = p1.wrap()\n p1_f_name = p1_data[\"first_name\"]\n # find by first name only\n # get part of name and search\n q_string = \"?first_name={}\".format(p1_f_name[:3]) # TODO - verify the length\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 1)\n\n # find by first name and last name\n p1_l_name = p1_data[\"last_name\"]\n q_string = \"?first_name={}&last_name={}\".format(p1_f_name[:3], p1_l_name)\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 1)\n\n # find by first name and non-existing last name\n q_string = \"?first_name={}&last_name={}\".format(p1_f_name[:3], \"iAmNotThere\")\n rv = self.app.get('persons', query_string=q_string)\n data = json.loads(rv.data)\n self.assertEqual(data[\"count\"], 0)", "def test_patient_one_date_of_birth(self):\r\n self.assertEqual(self.test_patient.dateOfBirth, datetime.date(2000, 2, 13))", "def test_get_filter_with_date_contacts_e(self):\n data = {\"type_contact\": 1, \"date_start\": '2018-08-20',\n \"date_end\": '2018-08-25'}\n response = self.client.get(reverse('contacts-filter'), data)\n # import pdb; pdb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"count\"], 2)", "def test_api_can_get_employee_by_id(self):\n res = self.client().get(service_url_emp+'/1')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))", "def test_search_risk_by_dates(self, field, attr):\n current_date = datetime.date.today()\n with factories.single_commit():\n factories.RiskFactory(**{attr: current_date})\n request_data = [{\n \"filters\": {\n \"expression\": {\n \"left\": {\"left\": field,\n \"op\": {\"name\": \"~\"},\n \"right\": current_date.strftime(\"%Y-%m-%d\")},\n \"op\": {\"name\": \"AND\"},\n \"right\": {\"left\": \"Status\",\n \"op\": {\"name\": \"IN\"},\n \"right\": [\"Active\", \"Draft\", \"Deprecated\"]}\n }\n },\n \"object_name\": \"Risk\",\n \"order_by\": [{\"name\": \"updated_at\", \"desc\": \"true\"}],\n }]\n\n response = self.api.post(\n all_models.Risk,\n data=request_data,\n url=\"/query\",\n )\n\n self.assert200(response)\n response_data = response.json[0][\"Risk\"]\n self.assertEqual(response_data[\"count\"], 1)\n self.assertEqual(response_data[\"values\"][0][attr],\n current_date.strftime(\"%Y-%m-%d\"))", "def test_search(self):\n\n with self.client as c:\n response = c.get(\"/users?q=al\")\n data = str(response.data)\n\n self.assertIn(\"@alice\", data)\n self.assertIn(\"@alvin\", data)\n\n self.assertNotIn(\"@bob\", data)\n self.assertNotIn(\"@carl\", data)", "def test_hotel_search(self):\n test_params = {\n 'DEBUG': False,\n 'TESTING': True\n }\n\n app = create_app(settings_override=test_params).test_client\n params = {\n \"city\": \"Las Vegas\",\n \"checkin\": \"2018-05-27\",\n \"checkout\": \"2018-05-28\"\n }\n\n with app() as c:\n response = c.post('/search', json=params)\n\n assert response.status_code == 200\n assert isinstance(response.json, list)\n assert len(response.json) > 0", "def find_by_birthday(start: date, end: date):\n logger.debug('Retrieving all employees with birthday between %s and %s.',\n start, end)\n try:\n employees = db.session.query(\n Employee\n ).filter(\n Employee.birthday >= start\n ).filter(\n Employee.birthday <= end\n ).all()\n except Exception as exception:\n logger.error('An error occurred while retrieving employees. '\n 'Exception: %s', str(exception))\n db.session.rollback()\n raise\n db.session.commit()\n logger.info('Successfully retrieved all employees with birthday '\n 'between %s and %s (amount = %i).', start, end, len(employees))\n return employees", "def test_filter_by_bad_date(admin_client, public_resource_with_metadata):\n query_filter = {\"date\": [\"2019-11-01\", \"bad-date\"]}\n djangoresponse = admin_client.get('/discoverapi/?filter={}'.format(json.dumps(query_filter)), follow=True)\n response = json.loads(djangoresponse.content.decode(\"utf-8\"))\n assert djangoresponse.status_code == 400\n assert \"date parsing error\" in response['message']", "def test_date_rage(self):\n\n query_params = {\n 'until_date': self.today,\n 'from_date': self.today,\n }\n search = OrderSearchEngine()\n query = search.filter_query(query_params)\n content = Q(created_at__range=[self.from_date, self.until_date])\n self.assertEqual(str(query), str(content))", "def test_invalid_birthdate(self):\n data = self.valid_payload\n data['birthdate'] = '2017/09/19'\n response = self.client.post(\n reverse('contacts'),\n data=json.dumps(data),\n content_type='application/json'\n )\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def test_GET_enddate(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 1122, 'date_of_expense': '10-01-2021'})\n self.assertEqual(rv.status_code, 201)\n resl = self.client().get('/expenses/?end_date=03-01-2021', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(resl.status_code, 200)\n results = json.loads(resl.data)\n self.assertEqual(results['items'][0]['date_of_expense'], self.expense['date_of_expense'])", "def test_email_search(self):\n # A name in the database\n search_string = \"[email protected]\"\n # Search For Umut\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string})\n search_result = json.loads(resp.content)\n # Check the e_mail field of the result\n self.assertEqual(search_string,search_result[0]['e_mail'],\"It doesn't return the user with the email {}\".format(search_string))", "def test_date_field():", "def test_GET_Search(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(res.status_code, 201)\n res = self.client().get('/expenses/?name=snacks', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(res.status_code, 200)\n results = json.loads(res.data)\n self.assertEqual(results['items'][0]['name'], self.expense['name'])", "def test_wrong_search_criteria(self):\n # Name Decreasing sorting criteria is selected\n sorting_criteria = \"surname_decrease\"\n # Search string that returns all of the users\n search_string = \"''\"\n # Search For all users\n resp = SearchTest.client.get('/api/search/',{\"token\":SearchTest.valid_token,\"search_string\":search_string,\"sorting_criteria\":sorting_criteria})\n search_result = json.loads(resp.content)\n self.assertEqual(json.loads(resp.content),\"You give your input in wrong format. Please check the API documentation for the appropriate input format!!\",\"Sorting Critera Input Control Doesn't Work\")", "def test_future_birth_date_import():\n _curr_date = datetime.utcnow()\n _later_date = _curr_date + relativedelta(days=1)\n later_date = _later_date.strftime(\"%d.%m.%Y\")\n\n citizen_with_birth_date_later_than_current = deepcopy(CITIZEN_EXAMPLE)\n citizen_with_birth_date_later_than_current[\"birth_date\"] = later_date\n with TestClient(app) as client:\n response = client.post(\n \"/imports\",\n json={\n \"citizens\": [\n citizen_with_birth_date_later_than_current\n ]}\n )\n\n assert response.status_code == 400" ]
[ "0.70000744", "0.63187903", "0.62191707", "0.6206472", "0.6201012", "0.6194432", "0.61828655", "0.6132267", "0.61312056", "0.61046326", "0.6066911", "0.6014641", "0.5994299", "0.59937394", "0.5989396", "0.59791684", "0.59529334", "0.59364206", "0.5929552", "0.58888996", "0.5862343", "0.58426094", "0.57896614", "0.5759354", "0.5756347", "0.5745462", "0.5731104", "0.5725414", "0.5713522", "0.56830674" ]
0.8959767
0
Test API can search employee by between two dates
def test_api_can_search_employee_by_between_dates(self): res = self.client().get(service_url_emp+'/search_between/2013-10-24,2014-10-24') self.assertEqual(res.status_code, 200) self.assertIn('name1', str(res.data)) self.assertIn('name2', str(res.data))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_api_can_search_employee_by_birth_date(self):\n res = self.client().get(service_url_emp+'/search/2014-10-24')\n self.assertEqual(res.status_code, 200)\n self.assertIn('name2', str(res.data))", "def test_date_rage(self):\n\n query_params = {\n 'until_date': self.today,\n 'from_date': self.today,\n }\n search = OrderSearchEngine()\n query = search.filter_query(query_params)\n content = Q(created_at__range=[self.from_date, self.until_date])\n self.assertEqual(str(query), str(content))", "def test_search_two_dates(self):\n # search via 2 dates.\n self.data.search(user_date='01/01/1800', second_date='02/04/1827',\n all_names=True)\n\n test = self.data.search(user_date='5/21/2012',\n second_date='04/10/2012', first_name='Trevor',\n last_name='Harvey')\n item_date = datetime.datetime(month=4, day=19, year=2012)\n self.assertEqual(test[0].entry_date, item_date)\n\n self.data.search(user_date='03/12/0001', second_date='03/13/0001',\n all_names=True)\n return self.data.search(user_date='1/10/2013', second_date='5/21/2011',\n first_name='Trevor', last_name='Harvey')", "def test_date_range(self):\n\n url = '/%s/job-types/status/?started=%s&ended=%s' % ( self.api,\n '2015-01-01T00:00:00Z',\n '2015-01-02T00:00:00Z')\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 4)", "def test_api_can_get_all_employees(self):\n res = self.client().get(service_url_emp)\n self.assertEqual(res.status_code, 200)\n self.assertIn('name1', str(res.data))\n self.assertIn('name2', str(res.data))", "def test_get_filter_with_date_contacts(self):\n data = {\"date_start\": '2018-08-20',\n \"date_end\": '2018-08-25'}\n response = self.client.get(reverse('contacts-filter'), data)\n # import pdb; pdb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"count\"], 4)", "def test_GET_startdate(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 1122, 'date_of_expense': '10-01-2021'})\n self.assertEqual(rv.status_code, 201)\n resl = self.client().get('/expenses/?start_date=01-01-2021', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(resl.status_code, 200)\n results = json.loads(resl.data)\n self.assertEqual(results['items'][0]['date_of_expense'], self.expense['date_of_expense'])", "def test_search_date_range_retrieves_corect_db_entries(self):\n # add some data to the database\n test_employee = [\n {'id': 1, 'name': \"Test Employee 1\"},\n ]\n test_log_entry_dates = [\n datetime.date(2018, 1, 1),\n datetime.date(2018, 1, 2),\n datetime.date(2018, 3, 4),\n datetime.date(2018, 5, 6),\n datetime.date(2018, 5, 7),\n ]\n e = db_manager.Employee.get_or_create(name=test_employee[0]['name'])\n # create some log entries\n for date in test_log_entry_dates:\n db_manager.LogEntry.create(\n employee=e[0],\n date=date,\n task_name='Test task for date {}'.format(date),\n duration=10,\n notes='Note'\n )\n\n start_index = 1\n end_index = -2\n\n match_slice = test_log_entry_dates[start_index:end_index + 1]\n\n expected_records = []\n for date in match_slice:\n new_record = OrderedDict([\n ('name', test_employee[0]['name']),\n ('date', date),\n ('task_name', 'Test task for date {}'.format(date)),\n ('duration', 10),\n ('notes', \"Note\")\n ])\n expected_records.append(new_record)\n\n fmt = \"%Y-%m-%d\"\n start_date_string = test_log_entry_dates[start_index].strftime(fmt)\n end_date_string = test_log_entry_dates[end_index].strftime(fmt)\n user_inputs = [\n start_date_string,\n end_date_string\n ]\n\n with patch('builtins.input', side_effect=user_inputs):\n self.menu.search_date_range()\n\n self.assertEqual(expected_records, self.menu.records)", "def test_until_date(self):\n\n query_params = {\n 'until_date': self.today,\n }\n search = OrderSearchEngine()\n query = search.filter_query(query_params)\n content = Q(created_at__lte=self.until_date)\n self.assertEqual(str(query), str(content))", "def test_get_filter_with_date_contacts_e(self):\n data = {\"type_contact\": 1, \"date_start\": '2018-08-20',\n \"date_end\": '2018-08-25'}\n response = self.client.get(reverse('contacts-filter'), data)\n # import pdb; pdb.set_trace()\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(response.data[\"count\"], 2)", "def test_query_events(self):\n CommonTestCases.admin_token_assert_in(\n self,\n query_events,\n \"Events do not exist for the date range\"\n )", "def test_movements_date_from_date_to(api_client):\n\n MovementFactory(date=datetime.date(2017, 2, 9))\n MovementFactory(date=datetime.date(2017, 2, 10))\n MovementFactory(date=datetime.date(2017, 2, 11))\n MovementFactory(date=datetime.date(2017, 2, 12))\n\n response = api_client.get(\n reverse(\"api:movements-list\"),\n {\"date_from\": \"2017-02-10\", \"date_to\": \"2017-02-11\"},\n )\n\n assert response.status_code == 200\n assert len(response.data) == 2\n assert response.data[0][\"date\"] == \"2017-02-10\"\n assert response.data[1][\"date\"] == \"2017-02-11\"", "def test_get_pricehistory_end_before_start(self):\n url = \"/products/1/pricehistory?start_date=1000&end_date=900\"\n res = self.get(url=url, role=\"admin\")\n self.assertException(res, exc.InvalidData)", "def test_search_risk_by_dates(self, field, attr):\n current_date = datetime.date.today()\n with factories.single_commit():\n factories.RiskFactory(**{attr: current_date})\n request_data = [{\n \"filters\": {\n \"expression\": {\n \"left\": {\"left\": field,\n \"op\": {\"name\": \"~\"},\n \"right\": current_date.strftime(\"%Y-%m-%d\")},\n \"op\": {\"name\": \"AND\"},\n \"right\": {\"left\": \"Status\",\n \"op\": {\"name\": \"IN\"},\n \"right\": [\"Active\", \"Draft\", \"Deprecated\"]}\n }\n },\n \"object_name\": \"Risk\",\n \"order_by\": [{\"name\": \"updated_at\", \"desc\": \"true\"}],\n }]\n\n response = self.api.post(\n all_models.Risk,\n data=request_data,\n url=\"/query\",\n )\n\n self.assert200(response)\n response_data = response.json[0][\"Risk\"]\n self.assertEqual(response_data[\"count\"], 1)\n self.assertEqual(response_data[\"values\"][0][attr],\n current_date.strftime(\"%Y-%m-%d\"))", "def get(self):\n args = self.parser.parse_args()\n date = get_date_or_none(args['date'])\n start_date = get_date_or_none(args['start_date'])\n end_date = get_date_or_none(args['end_date'])\n\n if date:\n employees = self.service.get_employees_by_date_of_birth(\n date, strategy=selectinload\n )\n elif start_date and end_date:\n employees = self.service.get_employees_born_in_period(\n start_date, end_date, strategy=selectinload\n )\n else:\n return self.BAD_DATE_MESSAGE, 400\n\n return self.schema.dump(employees, many=True), 200", "def test_search_date_range_returns_correct_menu(self):\n # add some data to the database\n test_employee = [\n {'id': 1, 'name': \"Test Employee 1\"},\n ]\n test_log_entry_dates = [\n datetime.date(2018, 1, 1),\n datetime.date(2018, 1, 2),\n datetime.date(2018, 3, 4),\n datetime.date(2018, 5, 6),\n datetime.date(2018, 5, 7),\n ]\n e = db_manager.Employee.get_or_create(name=test_employee[0]['name'])\n # create some log entries\n for date in test_log_entry_dates:\n db_manager.LogEntry.create(\n employee=e[0],\n date=date,\n task_name='Test task for date {}'.format(date),\n duration=10,\n notes='Note'\n )\n\n start_index = 1\n end_index = -2\n\n fmt = \"%Y-%m-%d\"\n start_date_string = test_log_entry_dates[start_index].strftime(fmt)\n end_date_string = test_log_entry_dates[end_index].strftime(\"%Y-%m-%d\")\n user_inputs = [\n start_date_string,\n end_date_string\n ]\n\n with patch('builtins.input', side_effect=user_inputs):\n result = self.menu.search_date_range()\n\n expected_result = self.menu.present_next_result\n\n self.assertEqual(expected_result, result)", "def test_query_events_with_start_date_before_end_date(self):\n CommonTestCases.admin_token_assert_in(\n self,\n query_events_with_start_date_before_end_date,\n \"Start date must be lower than end date\"\n )", "def test_GET_enddate(self):\n self.register_user()\n result = self.login_user()\n access_token = json.loads(result.data.decode())['access_token']\n res = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=self.expense)\n self.assertEqual(res.status_code, 201)\n rv = self.client().post('/expenses/', headers=dict(Authorization=\"Bearer \" + access_token), data=\n {'name': 'soda', 'amount': 1122, 'date_of_expense': '10-01-2021'})\n self.assertEqual(rv.status_code, 201)\n resl = self.client().get('/expenses/?end_date=03-01-2021', headers=dict(Authorization=\"Bearer \" + access_token))\n self.assertEqual(resl.status_code, 200)\n results = json.loads(resl.data)\n self.assertEqual(results['items'][0]['date_of_expense'], self.expense['date_of_expense'])", "def test_range_query(self):\r\n start = datetime(*self.base_date.timetuple()[:3])\r\n end = start + timedelta(days=3)\r\n\r\n results = DateTimeQueryTestModel.filter(user=0, day__gte=start, day__lt=end)\r\n assert len(results) == 3", "def test_data_with_range_view(self):\n\n self.create_model()\n self.create_machine()\n self.insert_data()\n\n date_literal = '%Y-%m-%d'\n start_date = dt.today()\n end_date = start_date + datetime.timedelta(days=1)\n\n self.create_user_account_and_login()\n query_url = self.range_url + '/' + self.data['mid'] + \\\n '/?s=' + dt.strftime(start_date, date_literal) + \\\n '&e=' + dt.strftime(end_date, date_literal)\n\n response = self.client.get(query_url)\n results = json.loads(response.content)\n\n self.assertEquals(len(results), 2)", "def test_get_pricehistory_invalid_start_or_end_date(self):\n # Testing start date\n url = \"/products/1/pricehistory?start_date=trololol\"\n res = self.get(url=url, role=\"admin\")\n self.assertException(res, exc.WrongType)\n\n # Testing end date\n url = \"/products/1/pricehistory?end_date=trololol\"\n res = self.get(url=url, role=\"admin\")\n self.assertException(res, exc.WrongType)\n\n # Testing start and end date\n url = \"/products/1/pricehistory?start_date=trololol&end_date=trololol\"\n res = self.get(url=url, role=\"admin\")\n self.assertException(res, exc.WrongType)", "def test_date_range_fields():\n now = datetime.datetime(2017, 6, 13, 9, 44, 31, 62870)\n fields = {\n 'estimated_land_date_after': now,\n 'estimated_land_date_before': now,\n 'adviser.id': 1234,\n }\n\n filters, ranges = _split_range_fields(fields)\n\n assert filters == {\n 'adviser.id': 1234,\n }\n assert ranges == {\n 'estimated_land_date': {\n 'gte': now,\n 'lte': now,\n },\n }", "def test_getEventsForItinerary(self):\n date = {'date': '2015-08-21T00:00:00.000Z'}\n events = []\n for i in range(10):\n hh = str(i)\n events.append(dict(start = '2015-08-21T'+hh+':23:00.000Z',\n end = '2015-08-21T'+hh+':25:00.000Z',\n date = '2015-08-21T00:00:00.000Z'))\n\n rv = self.json_get('/getEventsForItinerary/bbbb', date)\n assert 'Invalid username' in str(rv.data)\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n assert 'Itinerary for the day not found' in str(rv.data)\n\n # Create sample itinerary for alex for the event day\n self.json_post('/createItinerary/alex', dict(\n name = 'New Day',\n date = date['date']\n ))\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n assert '{\"events\": []}' in str(rv.data)\n\n for e in events:\n rv = self.json_post('/createEvent/alex', e)\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n\n rv = self.json_get('/getEventsForItinerary/alex', date)\n for e in events:\n uid = str('alex_' + e['start'] + e['end'])\n assert uid in str(rv.data)\n assert e['start'] in str(rv.data)\n assert e['end'] in str(rv.data)", "def test_hotel_search(self):\n test_params = {\n 'DEBUG': False,\n 'TESTING': True\n }\n\n app = create_app(settings_override=test_params).test_client\n params = {\n \"city\": \"Las Vegas\",\n \"checkin\": \"2018-05-27\",\n \"checkout\": \"2018-05-28\"\n }\n\n with app() as c:\n response = c.post('/search', json=params)\n\n assert response.status_code == 200\n assert isinstance(response.json, list)\n assert len(response.json) > 0", "def date_search(data, start_date, end_date):\n # change dates for date search\n data['timestamp'] = pd.to_datetime(data['timestamp']).dt.date\n d1 = datetime.datetime.strptime(f'{start_date}', '%Y-%m-%d').date()\n d2 = datetime.datetime.strptime(f'{end_date}', '%Y-%m-%d').date()\n\n # constrict data by date search parameters\n less_data = data[(data['timestamp'] >= d1) & (data['timestamp'] <= d2)]\n\n return less_data", "def test_listing_incidents_invalid_date_rage(self):\n resp = self.client.get(\n reverse('incidents', kwargs={'team_id': '7de98e0c-8bf9-414c-b397-05acb136935e'}), {\"since\": \"05-01-2019\", \"until\": \"01-01-2019\"}\n )\n\n self.assertEqual(resp.status_code, 400)\n self.assertEqual(resp.json(), {'error': 'since cannot be newer than until'})", "def test_query_params_date(session, params, expected_number_of_hits):\n result = get_search(session, params)\n compare(result['total']['value'], expected_number_of_hits)", "def test_get_pricehistory_defining_start_and_end_date(self):\n # Change the creation date of the product to 01.01.2019\n dt = datetime.strptime(\"01.01.2019\", \"%d.%m.%Y\")\n Product.query.filter_by(id=1).first().creation_date = dt\n ProductPrice.query.filter_by(product_id=1).first().timestamp = dt\n db.session.commit()\n\n # Insert a pricehistory\n timestamps = [\"02.01.2019\", \"03.01.2019\", \"08.01.2019\", \"10.01.2019\"]\n self.insert_pricehistory(timestamps)\n\n # Query all entries from the 02.01.19 to 08.01.19\n start = int(datetime(year=2019, month=1, day=2).timestamp())\n end = int(datetime(year=2019, month=1, day=8).timestamp())\n url = f\"/products/1/pricehistory?start_date={start}&end_date={end}\"\n res = self.get(url=url, role=\"admin\")\n pricehistory = json.loads(res.data)\n # There should be only the entries [02.01.19, 03.01.19 and 08.01.19]\n self.assertEqual(len(pricehistory), 3)", "def test_movements_date_from(api_client):\n\n MovementFactory(date=datetime.date(2017, 2, 10))\n MovementFactory(date=datetime.date(2017, 2, 11))\n\n response = api_client.get(\n reverse(\"api:movements-list\"), {\"date_from\": \"2017-02-11\"}\n )\n\n assert response.status_code == 200\n assert len(response.data) == 1\n assert response.data[0][\"date\"] == \"2017-02-11\"", "def test_get_event_dates(self):\n date = EventDate.objects.create(\n event=self.event_show2,\n date=(timezone.now() + timedelta(days=10))\n )\n dates = list(get_event_dates(self.event_show2))\n self.assertTrue(date in dates)\n self.assertTrue(self.future_date in dates)\n self.assertFalse(self.past_date in dates)" ]
[ "0.7454268", "0.68482393", "0.6834848", "0.6611345", "0.6533663", "0.64855504", "0.64845145", "0.6470128", "0.646424", "0.6409036", "0.6393102", "0.63234764", "0.62835866", "0.6271889", "0.6265861", "0.6254339", "0.6247781", "0.62460834", "0.6193098", "0.6177852", "0.61352086", "0.61335766", "0.6091689", "0.60860234", "0.6056555", "0.6046105", "0.60454965", "0.60434175", "0.60412276", "0.6006904" ]
0.88856506
0
This function can be used to selectively filter out specific permutation combinations. It is called by RunPermutations for every possible permutation of the variables in the permutations dict. It should return True for valid a combination of permutation values and False for an invalid one.
def permutationFilter(perm): # An example of how to use this #if perm['__consumption_encoder']['maxval'] > 300: # return False; # return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def permutation_is_valid(permutation):\n pass", "def check_permutation(u, v):\n for permutation in itertools.permutations(u):\n if v == permutation:\n return True\n return False", "def valid_parameter_combinations(parameterSpace):\n all_combinations = product(*parameterSpace.values())\n all_combinations = [{k:v for k,v in zip(parameterSpace.keys(), combn)} for combn in all_combinations]\n return [x for x in all_combinations if x[\"dim\"] % x[\"Kstep\"] == 0]", "def valid_colset_comb(prod, pairedcols):\n for c1, ci, cn in prod:\n cols = set([c1, cn]) | ci\n for a, b in pairedcols:\n if (a in ci and b not in ci and b != cn) or (b in ci and a not in ci and a != c1):\n return False\n if (a == c1 and b not in ci) or (a == cn and b in cols):\n return False\n if (b == c1 and a in cols) or (b == cn and a not in ci):\n return False\n return all(valid_colset_pair(x, pairedcols) for x in combinations(prod, 2))", "def is_permutation(input1, input2):\n if len(input1) != len(input2):\n return False\n if build_permutation_dictionary(input1) == build_permutation_dictionary(input2):\n return True\n return False", "def test_permutations(experiment, verbose=False):\n topics = experiment.topics\n no_topics = len(topics) # The total number of topics used for the given experiment.\n no_permutations = experiment.n # The total number of possible permutations.\n\n if verbose:\n print \"Topics: {0} (total of {1})\".format(topics, no_topics)\n print \"Total permutations: {0}\".format(no_permutations)\n print\n\n for i in range(0, no_permutations):\n rotations = experiment.get_rotations(i)\n\n if verbose:\n print \"Permutation {0} ({1})\".format(i, rotations)\n\n for k in range(0, no_topics):\n rotation_topic = experiment.get_rotation_topic(i, k)\n\n if verbose:\n print \"\\tTopic {0} at permutation list position {1}\".format(rotation_topic, k)\n\n if experiment.get_rotations(i)[k] == experiment.get_rotation_topic(i, k):\n if verbose:\n print \"\\t\\tPASS\"\n else:\n if verbose:\n print \"\\t\\tFAIL\"\n return False\n\n if verbose:\n print \"Permutation check PASSED\"\n\n return True", "def valid_parameter_combinations(parameterSpace):\n all_combinations = product(*parameterSpace.values())\n return [{k:v for k,v in zip(parameterSpace.keys(), combn)} for combn in all_combinations]", "def post_hoc_perm(conditions, n_shuffles, dataframe, method = scipy.stats.ttest_rel, seed = 1010):\n \n np.random.seed(seed)\n\n pairs = [pair for pair in itertools.combinations(conditions, 2)]\n n_pairs = len(pairs)\n\n t = np.floor(n_pairs * 0.25)\n\n obs_cond = {}\n perm_cond = {}\n p_cond = {}\n p_ph = {}\n\n maxT = np.zeros(n_shuffles)\n\n #First loop: Generate permutations\n for n, pair in enumerate(pairs):\n\n if n % t == 0:\n print((n / n_pairs) * 100)\n\n term = pair[0] + '_vs_' + pair[1]\n obs, perm, p = t_perm(dataframe[pair[0]], dataframe[pair[1]], n_shuffles, term)\n obs_cond.update(obs)\n perm_cond.update(perm)\n p_cond.update(p)\n\n\n\n for n in range(0, n_shuffles):\n shuffle = np.array([shuffles[n] for shuffles in perm_cond.values()])\n maxT[n] = shuffle[np.squeeze(np.where(abs(shuffle) == np.max(np.abs(shuffle))))]\n\n p_ph = {cond: sum(abs(maxT) >= abs(obs_cond[cond])) / n_shuffles for cond in obs_cond.keys()}\n \n print('Complete')\n return(obs_cond, perm_cond, maxT, p_ph)", "def _can_fuse_set_of_gridded_perms(\n self, fuse_counter: Counter[GriddedPerm]\n ) -> bool:\n return all(\n self._is_valid_count(count, gp) for gp, count in fuse_counter.items()\n )", "def confound_restricted_permutations(target, confounds):\n\n raise NotImplementedError()", "def validate(dic, option_list):\n\tfor key in dic.viewkeys():\n\t\tif key in option_list:\n\t\t\tfor option in option_list:\n\t\t\t\tif option != key:\n\t\t\t\t\tif dic[option] and dic[key]:\n\t\t\t\t\t\traise click.UsageError('Invalid option combination --%s \\\n\t\t\t\t\t\t\tcannot be used with --%s' % (option, key))\n\n\treturn True", "def test_random_valid_mutation_with_all(self):\n\n applied_mutation = defaultdict(int)\n N = self._min_trials(n_mutations=4)\n\n for i in range(N):\n ind = self.individuals[self.ind_strings[1]]\n ind_clone = self.gama._toolbox.clone(ind)\n new_ind, = mut_replace_primitive(ind_clone, self.gama._pset)\n if self._mutShrink_is_applied(ind, new_ind)[0]:\n applied_mutation['shrink'] += 1\n elif self._mutInsert_is_applied(ind, new_ind)[0]:\n applied_mutation['insert'] += 1\n elif self._mut_replace_terminal_is_applied(ind, new_ind)[0]:\n applied_mutation['terminal'] += 1\n elif self._mut_replace_primitive_is_applied(ind, new_ind)[0]:\n applied_mutation['primitive'] += 1\n else:\n self.fail(\"No mutation (or one that is unaccounted for) is applied.\")\n\n self.assertTrue(all([n > 0 for (mut, n) in applied_mutation.items()]))", "def check_mutation_parameters(variables_number: int, **mutation_params: Any) -> None:\n if \"mutation_points_number\" in mutation_params:\n check_mutation_points_number(variables_number, mutation_params[\"mutation_points_number\"])", "def check_combination(self, combination):\n\n # we first check if there are any pieces of the right value well placed.\n for j in range(0, 4):\n if combination[j] == self.answer[j]:\n self.try_return['well_placed'] += 1\n self.already_checked += [combination[j]]\n self.avoid += [j]\n\n for p in range(0, 4):\n for s in range(0, 4):\n if not p in self.avoid:\n if combination[s] == self.answer[p] and not combination[s] in self.already_checked:\n\n self.try_return['misplaced'] += 1\n self.duplicate += [combination[s]]\n if self.duplicate.count(combination[s]) > 1:\n self.try_return['misplaced'] -= 1", "def validate_permutation(p):\n if not isinstance(p, list):\n raise ValueError(\"A permutation should be a list of integers\")\n\n for i in p:\n if not isinstance(i, int):\n raise ValueError(\"A permutation should be a list of integers\")\n\n if set(p) != set(range(len(p))):\n raise ValueError(\"A permutation should only contain each position exactly once\")", "def is_permutation3(A, B, C):\n return set(A) == set(B) == set(C)", "def has_necessary_permissions(perm_json, required_perms, all_required=True):\n\n # Make list if not required_perms is string\n if isinstance(required_perms, str) or isinstance(required_perms, unicode):\n list_perms = [required_perms]\n else:\n list_perms = required_perms\n\n # Loop and check presence\n is_permitted = True\n for perm_key in list_perms:\n is_present = lookup_permission(perm_json, perm_key)\n\n if all_required:\n # All required: AND operation\n is_permitted = is_permitted and is_present\n if not is_permitted:\n break\n else:\n # Atleast one required: OR operation\n is_permitted = is_permitted or is_present\n\n\n return is_permitted", "def _validate_freq_params(freq_params):\n allowed_params = (\n \"Nfreqs\",\n \"start_freq\",\n \"bandwidth\",\n \"freq_array\",\n \"channel_width\",\n )\n allowed_combinations = [\n combo\n for combo in itertools.combinations(allowed_params, 3)\n if \"start_freq\" in combo and \"freq_array\" not in combo\n ] + [(\"freq_array\",)]\n for combination in allowed_combinations:\n if all(freq_params.get(param, None) is not None for param in combination):\n return True\n\n # None of the minimum necessary combinations are satisfied if we get here\n return False", "def check_occuring_variables(formula,variables_to_consider,allowed_variables) :\n variable_set=set(allowed_variables)\n for clause in formula :\n variables_in_clause = {abs(l) for l in clause if abs(l) in variables_to_consider}\n if not variables_in_clause <= variable_set:\n return False, [v for v in variables_in_clause if not v in variable_set] \n return True, []", "def feasible(self, c):\n\t\tfor played_combination in self.combinations:\n\t\t\tif not self.consistent(c, played_combination):\n\t\t\t\treturn False\n\t\treturn True", "def subset_pass_constraints(df):\n # All the constraints strings to test against. Must follow regex.\n # Keys: user-firendly constraint name, used for pass/fail bool column\n # Values: strings to test against\n accept_constraints = {\n \"pass_del_a_mu\": r\"Muon magn\\. mom\\. more than 2 sigma away\",\n \"pass_relic\": r\"Relic density too small \\(Planck\\)\",\n \"pass_bctaunu\": r\"b \\-> c tau nu more than 2 sigma away \\(as SM\\)\",\n \"pass_chi2zz\": r\"chi2\\(H\\->ZZ\\) > 6\\.18\",\n \"pass_chi2bb\": r\"chi2\\(H\\->bb\\) > 6\\.18\",\n \"pass_chi2gg\": r\"chi2\\(H\\->gg\\) > 6\\.18\",\n \"pass_cms4mu\": r\"Excluded H_125\\->AA\\->4mu \\(CMS\\)\"\n }\n for k, v in accept_constraints.iteritems():\n df[k] = ~df.constraints.str.contains(v)\n\n # We want a bitmask, so for each entry we simply want a True or False\n # First make a copy of the constraints Series\n con_series = df.constraints.copy(deep=True)\n # Now for each entry we remove the constraints we don't mind failing\n for c in accept_constraints.values():\n con_series = con_series.str.replace(c, \"\")\n # con_series = con_series.str.replace(r\"^\\|+$\", \"\") # Any leftover separators\n con_series = con_series.apply(lambda x: x.strip('|'))\n con_series = con_series.str.replace(r\"\\|\\|+\", r\"|\") # Any multiple separators\n # Now figure out which ones are empty\n mask = con_series.str.match(\"^$\")\n # Return those entries, allowing for a +ve muon mag moment contribution\n return df[mask & (df.Del_a_mu > 0)]", "def filter(self, molgrp):\n if self.dict_filter is None:\n return True\n\n for cond_name, cond_vals in self.dict_filter.items():\n\n try:\n val = molgrp['targets/' + cond_name][()]\n except KeyError:\n warnings.warn(f'Filter {cond_name} not found for mol '\n f'{molgrp.name}')\n\n # if we have a string it's more complicated\n if isinstance(cond_vals, str):\n ops = ['>', '<', '==', '<=', '>=']\n new_cond_vals = cond_vals\n for o in ops:\n new_cond_vals = new_cond_vals.replace(\n o, 'val' + o)\n if not eval(new_cond_vals):\n return False\n else:\n raise ValueError(\n 'Conditions not supported', cond_vals)\n\n return True", "def filter(self, filter_dict):\n self.result = [x for x in self.result if all(str(x[y]) == z or (hasattr(x[y], \"__iter__\") and (z in str(x[y]) or any(z in str(d.values) for d in x[y] if isinstance(d, dict)))) for y,z in filter_dict.items())] \n\n return self", "def section_4_9():\n from itertools import permutations\n from itertools import combinations\n from itertools import combinations_with_replacement\n\n items = ['a', 'b', 'c']\n\n def test1():\n for p in permutations(items):\n print(p)\n\n def test2():\n for p in combinations(items, 3):\n print(p)\n print()\n for p in combinations(items, 2):\n print(p)\n print()\n for p in combinations(items, 1):\n print(p)\n print()\n for p in combinations_with_replacement(items, 3):\n print(p)", "def _check_allowed_values(self, parameters):\n for key, allowed_values in self.ALLOWED_VALUES:\n self.log([u\"Checking allowed values for parameter '%s'\", key])\n if key in parameters:\n value = parameters[key]\n if value not in allowed_values:\n self._failed(u\"Parameter '%s' has value '%s' which is not allowed.\" % (key, value))\n return\n self.log(u\"Passed\")", "def validate_unity(unity_permutation: List[Any]) -> bool:\n if unity_permutation[1] != 1.0:\n raise ValueError(\"The unity permutation does not have a phase of 1.0\")\n\n if unity_permutation[2]:\n return False\n\n static = unity_permutation[0]\n lowlimit = -1\n\n for index in static:\n if index < lowlimit:\n raise ValueError(\"The first entry is not unity\")\n lowlimit = index\n\n return True", "def check_restrictions(restrictions, element, keys, verbose):\n params = OrderedDict(zip(keys, element))\n for restrict in restrictions:\n if not eval(replace_param_occurrences(restrict, params)):\n if verbose:\n print(\"skipping config\", get_instance_string(params), \"reason: config fails restriction\")\n return False\n return True", "def is_permutation(A, B):\n return set(A) == set(B)", "def reduce_puzzle(values):\n\tstalled = False\n\n\twhile not stalled:\n\n\t\tstart_values = dict(values)\n\n\t\treduced_values = eliminate(values)\n\t\treduced_values = only_choice(reduced_values)\n\t\t\n\t\tstalled = start_values == reduced_values\n\n\t\tempties = [box for box in boxes if len(values[box]) == 0]\n\n\t\tif empties:\n\t\t\treturn False\n\t\n\treturn values", "def test_random_valid_mutation_without_shrink(self):\n\n applied_mutation = defaultdict(int)\n N = self._min_trials(n_mutations=3)\n\n for i in range(N):\n ind = self.individuals[self.ind_strings[2]]\n ind_clone = self.gama._toolbox.clone(ind)\n new_ind, = mut_replace_primitive(ind_clone, self.gama._pset)\n if self._mutInsert_is_applied(ind, new_ind)[0]:\n applied_mutation['insert'] += 1\n elif self._mut_replace_terminal_is_applied(ind, new_ind)[0]:\n applied_mutation['terminal'] += 1\n elif self._mut_replace_primitive_is_applied(ind, new_ind)[0]:\n applied_mutation['primitive'] += 1\n else:\n self.fail(\"No mutation (or one that is unaccounted for) is applied.\")\n\n self.assertTrue(all([n > 0 for (mut, n) in applied_mutation.items()]))" ]
[ "0.6440923", "0.62097514", "0.6092914", "0.56775457", "0.5497833", "0.5466948", "0.5403812", "0.5400389", "0.53718925", "0.53197914", "0.5313259", "0.52997786", "0.52935493", "0.5287549", "0.5284966", "0.52562", "0.5228215", "0.5162008", "0.51274145", "0.51215994", "0.5102217", "0.51020825", "0.5052471", "0.50446486", "0.50407195", "0.50245464", "0.50021195", "0.5001697", "0.49968314", "0.49949202" ]
0.7232595
0
Get a list of available variants. The list may be empty, and must be None in case of error.
def fetchVariantList(self, url): html = self.fetchHtml(url) if html is None: return None # Get variants variants_data = [] variants = [] for button in html.xpath("//h3[@class='downloads']//a"): name = button.xpath("text()")[0].strip() if name == 'SBSAR': continue variants_data.append(button) variants.append(name) # Save some data for fetchVariant self._html = html self._variants_data = variants_data return variants
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def variants ( self ) :\n vars = []\n items = [ 'distrib' , 'default' ]\n items += [ 'stat_%s' % d for d in range ( 10 ) ]\n items += [ 'syst_%s' % d for d in range ( 10 ) ]\n \n from ostap.core.core import rootError \n from ostap.logger.logger import logFatal\n \n for item in items :\n if self.__variant == item : continue \n path = os.path.join ( self.__config_run.eosrootdir ,\n self.__config ,\n \"%s_%s.root\" % ( self.__dataset, item ) )\n with logFatal() , rootError () : \n rf = ROOT.TFile.Open ( path , 'READ' , exception = False )\n if rf and rf.IsOpen ( ) :\n vars.append ( item )\n rf.Close() \n \n return tuple ( vars )", "def view_variants(context, variant_id):\n adapter = context.obj['adapter']\n\n results = []\n if variant_id is not None:\n results = adapter.find_variant({'display_name': variant_id})\n\n else:\n results = adapter.find_variants({})\n\n click.echo(pprint(results))", "def find_all_variants():\n return Variant.select(lambda s: s.deletedAt is None)[:]", "def match_variants(self,state,variants):\r\n for v in variants:\r\n terms = self.match_variant(state,v)\r\n if terms is not None:\r\n return terms\r\n return None", "def all_variants(self):\n variants = []\n for digit_count, mp_number in self.mp_numbers.items():\n variants.extend(mp_number.variants)\n return variants", "def get_all_variants():\n # reads the session\n session = request.args.get('session', type=str)\n # reads the requested process name\n process = request.args.get('process', default='receipt', type=str)\n\n dictio = {}\n\n if check_session_validity(session):\n user = get_user_from_session(session)\n if lh.check_user_log_visibility(user, process):\n variants = lh.get_handler_for_process_and_session(process, session).get_variant_statistics()\n dictio = {\"variants\": variants}\n\n ret = jsonify(dictio)\n\n return ret", "def get_variants(self):\n node, index, line = self.stack[-1]\n if len(line) <= index + 1:\n return []\n next = line[index + 1]\n if type(next) == list:\n return next\n return []", "def variants(self) -> localedata.LocaleDataDict:\n return self._data['variants']", "def find_variants(self, variants, gene, family):\n \n # get the inheritance for the gene (monoalleleic, biallelic, hemizygous\n # etc), but allow for times when we haven't specified a list of genes\n # to use\n known_gene = None\n gene_inh = None\n if self.known_genes is not None and gene in self.known_genes:\n known_gene = self.known_genes[gene]\n gene_inh = known_gene['inh']\n \n chrom_inheritance = variants[0].get_inheritance_type()\n \n # If we are looking for variants in a set of known genes, and the gene\n # isn't part of that set, then we don't ant to examine the variant for\n # that gene, UNLESS the variant is a CNV, since CNVs can be included\n # purely from size thresholds, regardless of which gene they overlap.\n if self.known_genes is not None and gene not in self.known_genes:\n variants = [ x for x in variants if x.is_cnv() ]\n \n # ignore intergenic variants\n if gene is None:\n for var in variants:\n if var.get_chrom() == self.debug_chrom and var.get_position() == self.debug_pos:\n print(var, \"lacks HGNC/gene symbol\")\n return []\n \n # Now that we are examining a single gene, check that the consequences\n # for the gene are in the required functional categories.\n variants = [ var for var in variants if var.child.is_lof(gene) or var.child.is_missense(var.child.is_cnv(), gene) ]\n if variants == []:\n return []\n \n for x in variants[0].child.info.symbols:\n try:\n symbol = x.get(gene, ['HGNC', 'SYMBOL', 'ENSG'])\n break\n except KeyError:\n continue\n logging.info(\"{}\\t{}\\tvariants: {}\\trequired_mode: {}\".format(\n family.child.get_id(), symbol, [str(x) for x in variants], gene_inh))\n \n if chrom_inheritance == \"autosomal\":\n finder = Autosomal(variants, family, known_gene, gene, self.cnv_regions)\n elif chrom_inheritance in [\"XChrMale\", \"XChrFemale\", \"YChrMale\"]:\n finder = Allosomal(variants, family, known_gene, gene, self.cnv_regions)\n \n return finder.get_candidate_variants()", "def get_volume_list():\n return parse_list_output(Popen('cinder list --all-tenants'.split(),\n stdout=STDOUT, stderr=STDERR).communicate()[0])", "def get_all_available_variations(self, use_cache: bool=False):\n if use_cache and hasattr(self, '_get_all_available_variations_cache'):\n return self._get_all_available_variations_cache\n\n from .signals import determine_availability\n\n variations = self._get_all_generated_variations()\n responses = determine_availability.send(\n self.event, item=self,\n variations=variations, context=None,\n cache=self.event.get_cache()\n )\n\n for i, var in enumerate(variations):\n var['available'] = var['variation'].active if 'variation' in var else True\n if 'variation' in var:\n if var['variation'].default_price:\n var['price'] = var['variation'].default_price\n else:\n var['price'] = self.default_price\n else:\n var['price'] = self.default_price\n\n # It is possible, that *multiple* restriction plugins change the default price.\n # In this case, the cheapest one wins. As soon as there is a restriction\n # that changes the price, the default price has no effect.\n\n newprice = None\n for receiver, response in responses:\n if 'available' in response[i] and not response[i]['available']:\n var['available'] = False\n break\n if 'price' in response[i] and response[i]['price'] is not None \\\n and (newprice is None or response[i]['price'] < newprice):\n newprice = response[i]['price']\n var['price'] = newprice or var['price']\n\n variations = [var for var in variations if var['available']]\n\n self._get_all_available_variations_cache = variations\n return variations", "def get_merged_variants(self, variants, key):\n # type: (List[vcfio.Variant], str) -> List[vcfio.Variant]\n raise NotImplementedError", "def versions(self, stored=False) -> List['RadsSolutionVersion']:\n\n if stored:\n fspath = self.storage.fspath(self.path)\n if not os.path.isdir(fspath):\n return [] # solution not in storage\n listing = []\n for path in os.listdir(fspath):\n if not os.path.isdir(os.path.join(fspath, path)):\n continue\n listing.append(path)\n else:\n logger.debug(f\"retrieve versions of {self}\")\n listing = self.storage.request_text(f\"{self.path}/releaselisting\").splitlines()\n return sorted(RadsSolutionVersion(self, RadsVersion(l)) for l in listing)", "def variants(\n self,\n *,\n samples=None,\n isolated_as_missing=None,\n alleles=None,\n impute_missing_data=None,\n copy=None,\n left=None,\n right=None,\n ):\n interval = self._check_genomic_range(left, right)\n if impute_missing_data is not None:\n warnings.warn(\n \"The impute_missing_data parameter was deprecated in 0.3.0 and will\"\n \" be removed. Use ``isolated_as_missing=False`` instead of\"\n \"``impute_missing_data=True``.\",\n FutureWarning,\n )\n # Only use impute_missing_data if isolated_as_missing has the default value\n if isolated_as_missing is None:\n isolated_as_missing = not impute_missing_data\n if copy is None:\n copy = True\n # See comments for the Variant type for discussion on why the\n # present form was chosen.\n variant = tskit.Variant(\n self,\n samples=samples,\n isolated_as_missing=isolated_as_missing,\n alleles=alleles,\n )\n if left == 0 and right == self.sequence_length:\n start = 0\n stop = self.num_sites\n else:\n start, stop = np.searchsorted(self.sites_position, interval)\n\n if copy:\n for site_id in range(start, stop):\n variant.decode(site_id)\n yield variant.copy()\n else:\n for site_id in range(start, stop):\n variant.decode(site_id)\n yield variant", "def resolve_variants(self):\n\n def evaluate_clause(clause):\n if 'or' in clause or 'and' in clause:\n raise Exception(\"Reserved keyword 'and || or' used.\")\n v = dict_contains(self.traits, clause)\n return v\n \n def process_effects(variant_name, variant_details):\n \"\"\"\n This nested function handles the effects of a \n given clause.\n \n Right now, the only relevant effect is 'replace',\n which causes a variant to replace an existing variant\n \n \"\"\"\n if 'replaces' in variant_details:\n enabled_variants.remove(variant_details['replaces'])\n enabled_variants.add(variant_name)\n\n if 'cflags' in variant_details:\n if type(variant_details['cflags']) == dict:\n self.config['cflags'] += variant_details['cflags']['gcc']\n else:\n self.config['cflags'] += \" \" + variant_details['cflags']\n # Beginning of main function\n if 'filtered_variants' in self.__dict__:\n return self.filtered_variants\n \n enabled_variants = set(['src'])\n variants = self.get_variants()\n \n for variant in variants:\n assert len(variant) == 1\n for name, details in variant.items():\n if 'when' in details:\n enabled = evaluate_clause(details['when'])\n if enabled:\n process_effects(name, details)\n self.variant_dirs = {}\n for variant_name in enabled_variants:\n self.variant_dirs[variant_name] = join(self.path, variant_name)\n\n self.filtered_variants = [a for a in self.get_variants() if list(a.keys())[0] in enabled_variants]\n return self.filtered_variants", "def get_variants(topic):\n return [topic['variant%s'%i] for i in range(1,3) if not pd.isnull(topic['variant%s'%i])]", "def populate_variants(self, inventory=None):\n self.variants = list()\n\n option_combos = self.generate_option_combos()\n\n for combo in option_combos:\n self.variants.append(Variant(\n self.style_number,\n option_combo=combo,\n inventory=inventory))", "def get_variations_from_woo(self, result, wcapi, instance):\n variants = []\n try:\n params = {\"per_page\":100}\n response = wcapi.get(\"products/%s/variations\" % (result.get(\"id\")),\n params=params)\n variants = response.json()\n\n total_pages = response.headers.get(\"X-WP-TotalPages\")\n if int(total_pages) > 1:\n for page in range(2, int(total_pages) + 1):\n params[\"page\"] = page\n response = wcapi.get(\"products/%s/variations\" % (result.get(\"id\")),\n params=params)\n variants += response.json()\n\n except Exception as e:\n message = \"Json Error : While Import Product Variants from WooCommerce \" \\\n \"for instance %s. \\n%s\" % (instance.name, e)\n return message\n return variants", "def available_versions(self, **kwargs):\n return self.raw_version_data(**kwargs)", "def variant_names(self, language=DEFAULT_LANGUAGE, max_distance: int=25) -> list:\n names = []\n if self.variants is not None:\n for variant in self.variants:\n var_names = code_to_names('variant', variant)\n names.append(self._best_name(var_names, language, max_distance))\n return names", "def select_versions(self):\n return []", "def _get_variants(name):\n names = [name]\n oldname = name\n # Map greek words to unicode characters\n if DOT_GREEK_RE.search(name):\n wordname = name\n while True:\n m = DOT_GREEK_RE.search(wordname)\n if m:\n wordname = wordname[:m.start(1)-1] + m.group(1) + wordname[m.end(1)+1:]\n else:\n break\n symbolname = name\n while True:\n m = DOT_GREEK_RE.search(symbolname)\n if m:\n symbolname = symbolname[:m.start(1)-1] + GREEK_WORDS[m.group(1)] + symbolname[m.end(1)+1:]\n else:\n break\n names = [wordname, symbolname]\n else:\n while True:\n m = GREEK_RE.search(name)\n if m:\n name = name[:m.start(2)] + GREEK_WORDS[m.group(2)] + name[m.end(2):]\n else:\n break\n while True:\n m = UNAMBIGUOUS_GREEK_RE.search(name)\n if m:\n name = name[:m.start(1)] + GREEK_WORDS[m.group(1)] + name[m.end(1):]\n else:\n break\n if not name == oldname:\n names.append(name)\n newnames = []\n for name in names:\n # If last word \\d+, add variants with hyphen and no space preceding\n if NUM_END_RE.search(name):\n newnames.append(NUM_END_RE.sub('-\\g<1>', name))\n newnames.append(NUM_END_RE.sub('\\g<1>', name))\n # If last word [A-Za-z]\\d* add variants with hyphen preceding.\n if ALPHANUM_END_RE.search(name):\n newnames.append(ALPHANUM_END_RE.sub('-\\g<1>', name))\n names.extend(newnames)\n return names", "def _get_vlist(self, vlist):\n if vlist == \"all\":\n return list(range(1, 4095))\n elif vlist == \"none\":\n return []\n elif type(vlist) is not list:\n raise Exception(\"Unexpected vlan list: \" + str(vlist))\n else:\n return vlist", "def get_choices(self, instance):\n if instance.type == BaseParameter.CHOICE_TYPE:\n return [\n x.value\n for x in instance.get_typed_parameter().get_available_choices()\n ]\n else:\n return None", "def variants(self, phrase: Union[str, Phrase]) -> Union[None, List[Phrase]]:\n phrase_string = phrase.phrase_string if isinstance(phrase, Phrase) else phrase\n if phrase_string not in self.has_variants:\n return None\n else:\n return [self.variant_index[variant_string] for variant_string in self.has_variants[phrase_string]]", "def get_flavors(self):\n url = '%s/flavors/detail' % self.catalog['compute']\n res = self.get(url)\n if res['status'] == 200:\n return json.loads(res['body'])['flavors']\n else:\n LOG.error('Get flavors failed: %s %s %s' %\n (res['status'], res['reason'], res['body']))\n raise InvalidResponse(res)", "def VtVariant(list):\n return win32com.client.VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_VARIANT, list)", "def list_installed(self) -> Generator[Path, None, None]:\n LOGGER.verbose(\"checking %s for Terraform versions...\", self.versions_dir)\n return self.versions_dir.rglob(\"*.*.*\")", "def get_complete_volume_info_all():\n\n return_list = []\n try:\n vl, err = get_basic_volume_info_all()\n if err:\n raise Exception(err)\n # print 'vl is', vl\n\n if vl:\n for vol_info_dict in vl:\n\n rd, err = get_complete_volume_info(\n vol_info_dict['name'], vol_info_dict)\n if err:\n raise Exception(err)\n\n return_list.append(rd)\n\n except Exception, e:\n return None, 'Error getting complete volume information for all volumes: %s' % str(e)\n else:\n return return_list, None", "def has_variants(self):\n return bool(self.get_variants())" ]
[ "0.64393675", "0.6411364", "0.626434", "0.6205572", "0.61462814", "0.6107378", "0.6058094", "0.5991796", "0.59116775", "0.5792999", "0.57455677", "0.56936234", "0.56718695", "0.5625359", "0.5617946", "0.5613669", "0.56135285", "0.5612513", "0.5567904", "0.5525392", "0.55252767", "0.5508254", "0.54524535", "0.54414135", "0.5439138", "0.5394585", "0.53931737", "0.5371973", "0.5368702", "0.5337483" ]
0.66466016
0
Fill material_data with data from the selected variant. Must fill material_data.name and material_data.maps. Return a boolean status, and fill self.error to add error messages.
def fetchVariant(self, variant_index, material_data): # Get data saved in fetchVariantList html = self._html variants_data = self._variants_data if variant_index < 0 or variant_index >= len(variants_data): self.error = "Invalid variant index: {}".format(variant_index) return False v = variants_data[variant_index] base_name = html.xpath("//div[@class='information']/h1/text()")[0].replace('#', '') variant_name = v.xpath("text()")[0].strip().replace('|', '') material_data.name = "CC0Textures/" + base_name + "/" + variant_name zip_url = "https://cc0textures.com" + v.attrib['href'][1:] zip_path = self.fetchZip(zip_url, material_data.name, "textures.zip") zip_dir = os.path.dirname(zip_path) namelist = [] with zipfile.ZipFile(zip_path,"r") as zip_ref: namelist = zip_ref.namelist() zip_ref.extractall(zip_dir) # Translate cgbookcase map names into our internal map names maps_tr = { 'col': 'baseColor', 'nrm': 'normal', 'mask': 'opacity', 'rgh': 'roughness', 'met': 'metallic', } for name in namelist: base = os.path.splitext(name)[0] map_type = base.split('_')[-1] if map_type in maps_tr: map_name = maps_tr[map_type] material_data.maps[map_name] = os.path.join(zip_dir, name) return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_material_data(self, material):\n material_yaml_file = glob.glob(os.path.join(material_dir, material + '.yaml'))\n\n inputs = utilities.yaml_reader(material_yaml_file, material_dir, material)\n self.name = inputs['Name']\n self.materialName = material\n self.elements = inputs['Elements']\n self.zaids = inputs['Elemental ZAIDs']\n self.weightFraction = inputs['Elemental Weight Fractions'] if 'Elemental Weight Fractions' in inputs else []\n self.enrichmentZaids = inputs['Elemental Adjustment ZAIDs'] if 'Elemental Adjustment ZAIDs' in inputs else []\n self.enrichmentIsotopes = inputs['Isotopic Adjustment ZAIDs'] if 'Isotopic Adjustment ZAIDs' in inputs else []\n self.enrichmentVector = inputs['Isotopic Weight Percents'] if 'Isotopic Weight Percents' in inputs else []\n self.isotopicAtomPercents = inputs['Isotopic Atom Percents'] if 'Isotopic Atom Percents' in inputs else []\n self.density = inputs['Density']\n self.linearCoeffExpansion = inputs['Linear Coefficient of Expansion']", "def save_material(material, data_class):\n data_class.material_bind[\"version\"] = \"0.7\"\n add_to_json = True\n\n warning_text = (\"Material with same name and same properties already \"\n \"exists in JSON, consider this material or revising your \"\n \"properties\")\n\n for id, check in data_class.material_bind.items():\n if id != \"version\":\n if check[\"name\"] == material.name and \\\n check[\"density\"] == material.density and \\\n check[\"thermal_conduc\"] == material.thermal_conduc and \\\n check[\"heat_capac\"] == material.heat_capac and \\\n check[\n \"thickness_default\"] == material.thickness_default and \\\n check[\"thickness_list\"] == material.thickness_list:\n\n warnings.warn(warning_text)\n print(material.name)\n add_to_json = False\n break\n\n if add_to_json is True:\n data_class.material_bind[\n material.material_id] = collections.OrderedDict()\n data_class.material_bind[\n material.material_id][\"name\"] = material.name\n data_class.material_bind[\n material.material_id][\"density\"] = material.density\n data_class.material_bind[\n material.material_id][\"thermal_conduc\"] = material.thermal_conduc\n data_class.material_bind[\n material.material_id][\"heat_capac\"] = material.heat_capac\n data_class.material_bind[\n material.material_id][\n \"thickness_default\"] = material.thickness_default\n data_class.material_bind[\n material.material_id][\"thickness_list\"] = material.thickness_list\n data_class.material_bind[\n material.material_id][\"solar_absorp\"] = material.solar_absorp\n\n with open(utilities.get_full_path(data_class.path_mat), 'w') as file:\n file.write(json.dumps(\n data_class.material_bind,\n indent=4,\n separators=(',', ': ')))", "def setMaterial(self,massFraction,polymer):\n M = Materials()\n num = self.material['Detector']['mt']\n if polymer == 'PS':\n self.material['Detector']['matString'] = M.GetPSLiF(massFraction,num)\n elif polymer == 'PEN':\n self.material['Detector']['matString'] = M.GetPENLiF(massFraction,num)\n else:\n raise ValueError('Polymer {} is not in the material database'.format(polymer))", "def set_data(self, fragment, number, ratio, std, use):\n if self.has_data(fragment, number):\n self.mdv[fragment][number]['ratio'] = ratio\n self.mdv[fragment][number]['std'] = std\n self.mdv[fragment][number]['use'] = use\n return True\n return False", "def set_material(self, material):\r\n for b in self.buf:\r\n b.set_material(material)", "def validateMaterial(material, adjust=False):\n errors = []\n\n if not material:\n errors.append(ValidateMessage(\"No material defined.\", 'WARNING', material, None, {}))\n return errors, material\n\n if isinstance(material, bpy.types.Object):\n # there are always 18 slots, regardless of whether they are filled or not\n for tex in material.texture_slots:\n if tex is not None:\n try:\n # regular diffuse color texture\n if tex.use_map_color_diffuse:\n # grab the first texture\n material.texture_slots[0].texture.image.filepath.replace('//', '')\n except (KeyError, AttributeError):\n errors.append(\n ValidateMessage(\n \"Diffuse texture incomplete/undefined.\", 'WARNING', material, None, {}\n )\n )\n try:\n # normal map\n if tex.use_map_normal:\n # grab the first texture\n material.texture_slots[0].texture.image.filepath.replace('//', '')\n except (KeyError, AttributeError):\n errors.append(\n ValidateMessage(\n \"Normal texture incomplete/undefined.\", 'WARNING', material, None, {}\n )\n )\n try:\n # displacement map\n if tex.use_map_displacement:\n # grab the first texture\n material.texture_slots[0].texture.image.filepath.replace('//', '')\n except (KeyError, AttributeError):\n errors.append(\n ValidateMessage(\n \"Displacement texture incomplete/undefined.\",\n 'WARNING',\n material,\n None,\n {},\n )\n )\n else:\n if not hasattr(material, \"name\"):\n if adjust:\n material = {'name': 'phobos_error'}\n loglevel = 'WARNING'\n else:\n loglevel = 'ERROR'\n errors.append(\n ValidateMessage(\"Material name not defined.\", 'ERROR', material, None, {})\n )\n return errors, material\n\n if 'diffuse' not in material:\n if adjust:\n material['diffuse'] = (1., 1., 1., 1.)\n loglevel = 'WARNING'\n else:\n loglevel = 'ERROR'\n errors.append(\n ValidateMessage(\"Material diffuse color not defined.\", 'ERROR', material, None, {})\n )\n elif len(material['diffuse']) != 4:\n if adjust:\n if len(material['diffuse']) == 3:\n material['diffuse'] = tuple(material['diffuse'] + [1.])\n loglevel = 'WARNING'\n else:\n loglevel = 'ERROR'\n errors.append(\n ValidateMessage(\n \"Material diffuse color definition insufficient.\", loglevel, material, None, {}\n )\n )\n\n if 'diffuse_intensity' not in material:\n errors.append(\n ValidateMessage(\n \"Material diffuse intensity not defined.\", 'WARNING', material, None, {}\n )\n )\n if adjust:\n material['diffuse_intensity'] = 1.\n return errors, material", "def UpdateUI(self, materialHandle):\r\n \r\n self._materialHandle = materialHandle\r\n \r\n # alpha blend\r\n alphaBlendHandle = Material.GetAlphaBlendProperty(materialHandle)\r\n bEnable, srcBlend, destBlend = MPAlphaBlend.GetAlphaBlend(alphaBlendHandle)\r\n self._cbEnableAlphaBlend.SetValue(bEnable)\r\n self._comboSrcBlend.SetSelection(srcBlend)\r\n self._comboDestBlend.SetSelection(destBlend)\r\n \r\n # alpha test\r\n alphaTestHandle = Material.GetAlphaTestProperty(materialHandle)\r\n bEnable, alphaTestType, ref = MPAlphaTest.GetAlphaTest(alphaTestHandle)\r\n self._cbEnableAlphaTest.SetValue(bEnable)\r\n self._comboAlphaTestType.SetSelection(alphaTestType)\r\n self._editAlphaTestRef.SetValue(str(ref))\r\n \r\n # culling mode\r\n cullingModeHandle = Material.GetCullingModeProperty(materialHandle)\r\n cullingMode = MPCullingMode.GetCullingMode(cullingModeHandle)\r\n self._comboCullingMode.SetSelection(cullingMode)\r\n \r\n # depth state\r\n depthStateHandle = Material.GetDepthStateProperty(materialHandle)\r\n bEnable, bWriteEnable = MPDepthState.GetDepthState(depthStateHandle)\r\n self._cbEnableDepthTest.SetValue(bEnable)\r\n self._cbEnableDepthWrite.SetValue(bWriteEnable)\r\n \r\n # material ambient\r\n ambientHandle = Material.GetAmbientProperty(materialHandle)\r\n bEnable, r, g, b, a = MPAmbient.GetAmbient(ambientHandle)\r\n r, g, b, a = map(lambda n : int(n * 255.0), (r, g, b, a))\r\n self._cbEnableAmbient.SetValue(bEnable)\r\n self._colorMaterialAmbientColor.SetValue(wx.Color(r, g, b, a))\r\n \r\n # material diffuse\r\n diffuseHandle = Material.GetDiffuseProperty(materialHandle)\r\n bEnable, r, g, b, a = MPDiffuse.GetDiffuse(diffuseHandle)\r\n r, g, b, a = map(lambda n : int(n * 255.0), (r, g, b, a))\r\n self._cbEnableDiffuse.SetValue(bEnable)\r\n self._colorMaterialDiffuseColor.SetValue(wx.Color(r, g, b, a))\r\n \r\n # material specular\r\n specularHandle = Material.GetSpecularProperty(materialHandle)\r\n bEnable, r, g, b, a, shiness = MPSpecular.GetSpecular(specularHandle)\r\n r, g, b, a = map(lambda n : int(n * 255.0), (r, g, b, a))\r\n self._cbEnableSpecular.SetValue(bEnable)\r\n self._colorMaterialSpecularColor.SetValue(wx.Color(r, g, b, a))\r\n self._editMaterialSpecularShiness.SetValue(str(shiness))", "def __init__(self, jsondict=None, strict=True):\n \n self.allergenicIndicator = None\n \"\"\" Whether the substance is a known or suspected allergen.\n Type `bool`. \"\"\"\n \n self.alternate = None\n \"\"\" Indicates an alternative material of the device.\n Type `bool`. \"\"\"\n \n self.substance = None\n \"\"\" The substance.\n Type `CodeableConcept` (represented as `dict` in JSON). \"\"\"\n \n super(DeviceDefinitionMaterial, self).__init__(jsondict=jsondict, strict=strict)", "def create_material_data(self):\n for num, zaid in enumerate(self.enrichmentZaids):\n enriched_isotope_dict = {}\n for isoNum, isotopes in enumerate(self.enrichmentIsotopes[num]):\n enriched_isotope_dict[isotopes] = self.enrichmentVector[num][isoNum]\n self.enrichmentDict[zaid] = enriched_isotope_dict\n for num, element in enumerate(self.elements):\n self.elementDict[self.zaids[num]] = Element.Element(element)\n\n if self.isotopicAtomPercents:\n self.atomDensity = self.density\n self.set_atom_fractions()\n else:\n self.set_elemental_enrichment()\n self.set_weight_percent()\n self.atomDensity, self.atomPercent = set_atom_percent(self.weightPercent, self.density,\n self.elementDict)", "def populate_from_qrev_mat(self, mat_data):\n\n # Variables passed to the constructor\n\n if type(mat_data.frequency_hz) is np.ndarray:\n self.frequency_khz = mat_data.frequency_hz\n elif np.isnan(mat_data.frequency_hz):\n self.frequency_khz = None\n else:\n self.frequency_khz = mat_data.frequency_hz\n self.orig_coord_sys = mat_data.origCoordSys\n self.nav_ref = mat_data.navRef\n\n # Data requiring manipulation if only 1 ensemble\n if type(mat_data.u_mps) is float:\n self.raw_vel_mps = mat_data.rawVel_mps.reshape(mat_data.rawVel_mps.shape[0], 1)\n # Coordinate transformed data\n self.coord_sys = np.array([mat_data.coordSys])\n self.u_mps = np.array([mat_data.u_mps])\n self.v_mps = np.array([mat_data.v_mps])\n self.w_mps = np.array([mat_data.w_mps])\n self.d_mps = np.array([mat_data.d_mps])\n\n self.bottom_mode = np.array([mat_data.bottomMode])\n\n # Processed data\n self.u_processed_mps = np.array([mat_data.uProcessed_mps])\n self.v_processed_mps = np.array([mat_data.vProcessed_mps])\n self.processed_source = np.array([mat_data.processedSource])\n self.valid_data = np.array([ mat_data.validData]).astype(bool)\n self.valid_data = self.valid_data.reshape(-1, 1)\n self.smooth_speed = np.array([mat_data.smoothSpeed])\n self.smooth_upper_limit = np.array([mat_data.smoothUpperLimit])\n self.smooth_lower_limit = np.array([mat_data.smoothLowerLimit])\n else:\n self.raw_vel_mps = mat_data.rawVel_mps\n # Coordinate transformed data\n self.coord_sys = mat_data.coordSys\n self.u_mps = mat_data.u_mps\n self.v_mps = mat_data.v_mps\n self.w_mps = mat_data.w_mps\n self.d_mps = mat_data.d_mps\n\n self.bottom_mode = mat_data.bottomMode\n\n # Processed data\n self.u_processed_mps = mat_data.uProcessed_mps\n self.v_processed_mps = mat_data.vProcessed_mps\n self.processed_source = mat_data.processedSource\n self.valid_data = mat_data.validData.astype(bool)\n self.smooth_speed = mat_data.smoothSpeed\n self.smooth_upper_limit = mat_data.smoothUpperLimit\n self.smooth_lower_limit = mat_data.smoothLowerLimit\n\n self.num_invalid = mat_data.numInvalid\n # Error velocity filter\n if type(mat_data.dFilter) is np.ndarray:\n self.d_filter = None\n else:\n self.d_filter = mat_data.dFilter\n\n # Error velocity threshold\n if type(mat_data.dFilterThreshold) is np.ndarray:\n self.d_filter_threshold = None\n else:\n self.d_filter_threshold = mat_data.dFilterThreshold\n\n # Vertical velocity filter\n if type(mat_data.wFilter) is np.ndarray:\n self.w_filter = None\n else:\n self.w_filter = mat_data.wFilter\n\n # Vertical velocity threshold\n if type(mat_data.wFilterThreshold) is np.ndarray:\n self.w_filter_threshold = None\n else:\n self.w_filter_threshold = mat_data.wFilterThreshold\n\n # GPS quality filter\n if type(mat_data.gpsDiffQualFilter) is np.ndarray:\n self.gps_diff_qual_filter = None\n else:\n self.gps_diff_qual_filter = mat_data.gpsDiffQualFilter\n\n # GPS altitude filter\n if type(mat_data.gpsAltitudeFilter) is np.ndarray:\n self.gps_altitude_filter = None\n else:\n self.gps_altitude_filter = mat_data.gpsAltitudeFilter\n\n # GPS altitude threshold\n if type(mat_data.gpsAltitudeFilterChange) is np.ndarray:\n self.gps_altitude_filter_change = None\n else:\n self.gps_altitude_filter_change = mat_data.gpsAltitudeFilterChange\n\n # HDOP filter\n if type(mat_data.gpsHDOPFilter) is np.ndarray:\n self.gps_HDOP_filter = None\n else:\n self.gps_HDOP_filter = mat_data.gpsHDOPFilter\n\n # HDOP max threshold\n if type(mat_data.gpsHDOPFilterMax) is np.ndarray:\n self.gps_HDOP_filter_max = None\n else:\n self.gps_HDOP_filter_max = mat_data.gpsHDOPFilterMax\n\n # HDOP change threshold\n if type(mat_data.gpsHDOPFilterChange) is np.ndarray:\n self.gps_HDOP_filter_change = None\n else:\n self.gps_HDOP_filter_change = mat_data.gpsHDOPFilterChange\n\n # Other filters\n self.smooth_filter = mat_data.smoothFilter\n self.interpolate = mat_data.interpolate\n self.beam_filter = mat_data.beamFilter", "def SetMaterial(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_SetMaterial(self, *args)", "def load_material_library( self, url, materials, baseURL=None ):\n #( resolvedURL, os.path.abspath(filename), file, headers )\n try:\n finalURL, filename, file, headers = loader.Loader( url, baseURL )\n except IOError, err:\n if '/' in url:\n possible = url.split( '/' )[-1]\n try:\n finalURL, filename, file, headers = loader.Loader( \n possible, baseURL \n )\n except IOError, err:\n log.warn(\n \"\"\"Unable to load material library: %s\"\"\",\n url,\n )\n return False\n \n material = None\n for line in file.read().splitlines():\n if line.startswith('#'):\n continue\n values = line.split()\n if not values:\n continue\n\n if values[0] == 'newmtl':\n material = self.defaultMaterial()\n materials[values[1]] = material\n elif material is None:\n log.warn('Expected \"newmtl\" in %s', url)\n continue\n\n try:\n if values[0] == 'Kd':\n material.material.diffuseColor = map(float, values[1:])\n elif values[0] == 'Ka':\n material.material.ambientColor = map(float, values[1:])\n elif values[0] == 'Ks':\n material.material.specularColor = map(float, values[1:])\n elif values[0] == 'Ke':\n material.material.emissiveColor = map(float, values[1:])\n elif values[0] == 'Ns':\n material.material.shininess = float(values[1])\n elif values[0] == 'd':\n material.material.opacity = float(values[1])\n elif values[0] == 'map_Kd':\n if '/' in values[1]:\n img_url = [ values[1], values[1].split('/')[-1] ]\n else:\n img_url = [ values[1] ]\n img_url = [\n urllib.basejoin(baseURL, u )\n for u in img_url\n ]\n texture = basenodes.ImageTexture(url=img_url)\n material.texture = texture\n except:\n log.warn('Parse error in %s.', url)", "def test_has_raw_materials_handles_insufficient_raw_materials(self):\n # Params\n m_raw_materials = self.selection['ingredients']\n d_raw_materials = {\n 'sugar': 0,\n 'butter': 0,\n 'caramel': 15,\n 'dark chocolate': 0,\n 'mint chocolate': 30,\n 'milk chocolate': 30,\n 'light corn syrup': 0,\n 'sweetened condensed milk': 0,\n 'vanilla extract': 0,\n 'Reese\\'s Pieces': 15,\n }\n # Returns\n return_1 = 'Machine Needs Additional: sugar\\n' \\\n 'Machine Needs Additional: butter\\n' \\\n 'Machine Needs Additional: dark chocolate\\n' \\\n 'Machine Needs Additional: light corn syrup\\n' \\\n 'Machine Needs Additional: sweetened condensed milk\\n' \\\n 'Machine Needs Additional: vanilla extract\\n'\n # Calls\n bool_1 = self.chocolate_machine.has_raw_materials(m_raw_materials, d_raw_materials)\n # Asserts\n self.assertEqual(bool_1, return_1)", "def test_has_raw_materials_handles_insufficient_raw_materials(self):\n # Setup\n choice = 'dark cherry'\n selection = CUPCAKE_CHOICES[choice]\n\n # Params\n f_raw_materials = selection['ingredients']\n d_raw_materials = {\n 'sugar': 0,\n 'butter': 0,\n 'dark chocolate': 0,\n 'light corn syrup': 0,\n 'sweetened condensed milk': 0,\n 'vanilla extract': 0,\n 'bing cherries': 10,\n }\n\n # Returns\n return_1 = 'Machine Needs Additional: sugar\\n' \\\n 'Machine Needs Additional: butter\\n' \\\n 'Machine Needs Additional: dark chocolate\\n' \\\n 'Machine Needs Additional: light corn syrup\\n' \\\n 'Machine Needs Additional: sweetened condensed milk\\n' \\\n 'Machine Needs Additional: vanilla extract\\n'\n\n # Calls\n string_1 = has_raw_materials(f_raw_materials, d_raw_materials)\n\n # Asserts\n self.assertEqual(string_1, return_1)", "def updateData(self):\n self.needsData.emit(self.property(\"number\"))", "def fill_data_product(self):\n self.product.fill_data_product(self.list_products, self.mycursor, self.my_database)", "def link_material(obj, mat):\n if not has_material(obj, mat.name):\n obj.data.materials.append(mat)", "def on_material_select_changed(self,combo,data=None):\n self.app.job.reload_job()", "def IsMaterial(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_IsMaterial(self, *args)", "def materials(cls, value: Union[dict, MaterialSelector]):\n if isinstance(value, dict):\n cls._materials.from_dict(value)\n elif isinstance(value, MaterialSelector):\n cls._materials = value\n else:\n msg = \"materials must be MaterialSelector instance or dict\"\n run_log.error(msg)\n raise TypeError(msg)", "def built_in_material(self, built_in_material):\n\n self._built_in_material = built_in_material", "def checkAnalysis(self) -> bool:\n\n if len(self.materials) == 0:\n raise AnalysisError('No material models have been assigned to the analysis')\n\n for material in self.materials:\n if not material.isValid():\n raise AnalysisError('Material ({:s}) is not valid'.format(material.name))\n\n\n return True", "def read_one(family_id, material_id):\n # Query the database for the material\n material = (\n Material.query.join(Family, Family.family_id == Material.family_id)\n .filter(Family.family_id == family_id)\n .filter(Material.material_id == material_id)\n .one_or_none()\n )\n\n # Was a material found?\n if material is not None:\n material_schema = MaterialSchema()\n data = material_schema.dump(material).data\n return data\n\n # Otherwise, nope, didn't find that material\n else:\n abort(404, f\"Material not found for Id: {material_id}\")", "def update_from_dict(self, data: dict) -> \"Device\":\n if \"info\" in data and data[\"info\"]:\n self.info = Info.from_dict(data[\"info\"])\n\n if \"locations\" in data and data[\"locations\"]:\n locations = [Location.from_dict(location) for location in data[\"locations\"]]\n self.locations = locations\n\n return self", "def _init_data(self, data):\n assert type(data) is dict, \"dict expected: %r\" % type(data)\n assert len(data) is 1, \"size of dict should be 1: %r\" % len(data)\n self._name = data.keys()[0]\n self._data = np.asarray(data[self._name])\n self._set = True", "def AddMaterial(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_AddMaterial(self, *args)", "def append_material(self, material):\n # First check if asset attribute exists; if not, define the asset attribute\n if not hasattr(self, \"asset\"):\n self.asset = ET.Element(\"asset\")\n # If the material name is not in shared materials, add this to our assets\n if material.name not in self.shared_materials:\n self.asset.append(ET.Element(\"texture\", attrib=material.tex_attrib))\n self.asset.append(ET.Element(\"material\", attrib=material.mat_attrib))\n # Add this material name to shared materials if it should be shared\n if material.shared:\n self.shared_materials.add(material.name)\n self.shared_textures.add(material.tex_attrib[\"name\"])\n # Update prefix for assets\n add_prefix(root=self.asset, prefix=self.naming_prefix, exclude=self.exclude_from_prefixing)", "def __init__(self, vs, material):\n self.vs = vs\n self.material = material", "def _load_msh(self):\n log_gui.debug(\"_load_msh begin\")\n mod = self._mod\n mesh = mod.load_mesh_from_selection()\n if not mesh:\n mod.launch(GC.ERROR, \"The selected entry is not a mesh \"\n \"or the SMESH component must be activated\")\n return \n mdim = mesh.give_dim()\n if (mdim != self._data.get_dim()):\n mess = self.no_dim_mess\n if type(mdim) is int:\n mess = self.dim_mess % (mdim, self._data.get_dim())\n mod.launch(GC.ERROR, mess)\n return\n \n self.setdata(mesh)\n log_gui.debug(\"_load_msh end\")", "def test_materials_present(self):\n self.assertIsNotNone('Materials' in self.header.parameters.attrs)" ]
[ "0.5540224", "0.5305743", "0.5173134", "0.5126462", "0.5113751", "0.5105645", "0.50716984", "0.5012885", "0.5002888", "0.49937847", "0.491464", "0.48269656", "0.47691706", "0.47614205", "0.4725722", "0.4714299", "0.47136438", "0.47127554", "0.47089148", "0.4701902", "0.4652038", "0.456374", "0.45360506", "0.45310608", "0.452679", "0.45237058", "0.4523561", "0.45194674", "0.4514897", "0.45000097" ]
0.60064197
0
Raises an exception if the tensor rank is not of the expected rank.
def assert_rank(tensor, expected_rank, name=None): if name is None: name = tensor.name expected_rank_dict = {} if isinstance(expected_rank, six.integer_types): expected_rank_dict[expected_rank] = True else: for x in expected_rank: expected_rank_dict[x] = True actual_rank = tensor.shape.ndims if actual_rank not in expected_rank_dict: scope_name = tf.get_variable_scope().name raise ValueError( "For the tensor `%s` in scope `%s`, the actual rank " "`%d` (shape = %s) is not equal to the expected rank `%s`" % (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assert_rank(tensor, expected_rank, name=None):\n if name is None:\n name = tensor.name\n\n expected_rank_dict = {}\n if isinstance(expected_rank, six.integer_types):\n expected_rank_dict[expected_rank] = True\n else:\n for x in expected_rank:\n expected_rank_dict[x] = True\n\n actual_rank = tensor.shape.ndims\n if actual_rank not in expected_rank_dict:\n scope_name = tf.get_variable_scope().name\n raise ValueError(\n \"For the tensor `%s` in scope `%s`, the actual rank \"\n \"`%d` (shape = %s) is not equal to the expected rank `%s`\" %\n (name, scope_name, actual_rank, str(tensor.shape), str(expected_rank)))", "def _validate_rank(self) -> None:\n if isinstance(self.extra_args, list):\n limit = self.mesh.mesh_eigenvalues.shape[0]\n if self.extra_args[0] > limit:\n raise ValueError(f\"rank should be less than or equal to {limit}\")", "def _validate_rank(self, rank: int) -> None:\n if not isinstance(rank, int):\n raise TypeError(\"rank should be an integer\")\n if rank < 0:\n raise ValueError(\"rank cannot be negative\")\n if rank >= self.L**2:\n raise ValueError(f\"rank should be less than {self.L**2}\")", "def _validate_rank(self, rank):\n if rank <= 0 or rank > self.num_users:\n raise RankError(\"Rank must be in the range 0 < rank <= number of users.\")", "def test_does_rank_error(self):\n self.assertRaises(Exception,lambda: cardutils.Card(1,1))", "def test_rank(self):\n self.assertEqual(self.vectors.rank('dog.n.01', 'dog.n.01'), 1)\n self.assertEqual(self.vectors.rank('dog.n.01', 'carnivore.n.01'), 3)", "def testMaskErrorIncompatibleRank1(self):\n\n np_mask = np.ones((2,), dtype=np.float32)\n x = tf.constant(0.0, shape=(2, 8, 6, 5, 5), dtype=np.float32)\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv3D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def test_gets_raised(self):\n with pytest.raises(TypeError, match=\"not take an integer value\"):\n KNeighborsLabelRanker(n_neighbors=\"foo\").fit(self.X, self.Y)\n\n with pytest.raises(ValueError, match=\"must be greater than zero\"):\n KNeighborsLabelRanker(n_neighbors=0).fit(self.X, self.Y)\n\n with pytest.raises(ValueError, match=\"less than or equal\"):\n KNeighborsLabelRanker(n_neighbors=151).fit(self.X, self.Y)\n\n with pytest.raises(ValueError, match=\"Unknown weights\"):\n KNeighborsLabelRanker(weights=\"foo\").fit(self.X, self.Y)\n\n with pytest.raises(ValueError, match=\"Unknown metric\"):\n KNeighborsLabelRanker(metric=\"foo\").fit(self.X, self.Y)", "def testMaskErrorIncompatibleRank2(self):\n\n np_mask = np.ones((5, 2), dtype=np.float32)\n x = tf.constant(0.0, shape=(2, 8, 6, 5, 5), dtype=np.float32)\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv3D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def testMaskErrorIncompatibleRank5(self):\n\n np_mask = np.ones((5, 5, 5, 5, 2), dtype=np.float32)\n x = tf.constant(0.0, shape=(2, 8, 6, 5, 5), dtype=np.float32)\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv3D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def test_inconsistent_rank_inputs_for_importance_weights(self):\n for fw in framework_iterator(frameworks=(\"torch\", \"tf\"), session=True):\n vtrace = vtrace_tf if fw != \"torch\" else vtrace_torch\n if fw == \"tf\":\n inputs_ = {\n \"log_rhos\": tf1.placeholder(\n dtype=tf.float32, shape=[None, None, 1]),\n \"discounts\": tf1.placeholder(\n dtype=tf.float32, shape=[None, None, 1]),\n \"rewards\": tf1.placeholder(\n dtype=tf.float32, shape=[None, None, 42]),\n \"values\": tf1.placeholder(\n dtype=tf.float32, shape=[None, None, 42]),\n # Should be [None, 42].\n \"bootstrap_value\": tf1.placeholder(\n dtype=tf.float32, shape=[None])\n }\n else:\n inputs_ = {\n \"log_rhos\": Box(-1.0, 1.0, (7, 15, 1)).sample(),\n \"discounts\": Box(-1.0, 1.0, (7, 15, 1)).sample(),\n \"rewards\": Box(-1.0, 1.0, (7, 15, 42)).sample(),\n \"values\": Box(-1.0, 1.0, (7, 15, 42)).sample(),\n # Should be [15, 42].\n \"bootstrap_value\": Box(-1.0, 1.0, (7, )).sample()\n }\n with self.assertRaisesRegexp((ValueError, AssertionError),\n \"must have rank 2\"):\n vtrace.from_importance_weights(**inputs_)", "def testMaskErrorIncompatibleRank3(self):\n\n np_mask = np.ones((5, 5, 2), dtype=np.float32)\n x = tf.constant(0.0, shape=(2, 8, 6, 5, 5), dtype=np.float32)\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv3D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def testMaskErrorIncompatibleRank3(self):\n\n np_mask = np.ones((2, 4, 4))\n x = tf.constant(0.0, shape=(2, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv1D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def testMaskErrorIncompatibleRank4(self):\n\n np_mask = np.ones((5, 5, 5, 2), dtype=np.float32)\n x = tf.constant(0.0, shape=(2, 8, 6, 5, 5), dtype=np.float32)\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv3D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def testMaskErrorIncompatibleRank1(self):\n\n np_mask = np.ones((3,))\n x = tf.constant(0.0, shape=(2, 8, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def testMaskErrorIncompatibleRank3(self):\n\n np_mask = np.ones((2, 4, 4))\n x = tf.constant(0.0, shape=(2, 8, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def assert_shape(tensor, shape, name):\n real_shape = tensor.get_shape().as_list()\n same_rank = len(real_shape) == len(shape)\n all_equal = all([(s == r or s == -1) for s, r in zip(shape, real_shape)])\n if not same_rank or not all_equal:\n raise tf.errors.InvalidArgumentError(\n 'Error: Expected tensor %s to have shape %s, but it had shape %s.' %\n (name, str(shape), str(real_shape)))", "def test_is_rank_integer(self):\n self.assertIsInstance(cardutils.Card(10,1).rank, int)", "def test_input_shape_error(self):\n\n def net_func():\n input_value = paddle.to_tensor([[1, 2, 3], [4, 5, 6]])\n paddle.bincount(input_value)\n\n with self.assertRaises(ValueError):\n self.run_network(net_func)", "def testMaskErrorIncompatibleRank2(self):\n\n np_mask = np.ones((3, 3))\n x = tf.constant(0.0, shape=(2, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv1D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def testMaskErrorIncompatibleRank4(self):\n\n np_mask = np.ones((3, 3, 4, 5))\n x = tf.constant(0.0, shape=(2, 8, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def test_verify_non_existing_rank(self):\n data = {'rank_id': 5}\n res = self.post(url='/verify/4', data=data, role='admin')\n self.assertEqual(res.status_code, 401)\n self.assertException(res, exc.EntryNotFound)", "def _check_tensor_shapes(tensors):\n for tensor in tensors:\n tensor = tf.convert_to_tensor(value=tensor)\n tensor.get_shape().assert_has_rank(2)\n tensor.get_shape().assert_is_compatible_with(\n tf.convert_to_tensor(value=tensors[0]).get_shape())", "def __rank__(self) -> int:", "def testMaskErrorIncompatibleRank2(self):\n\n np_mask = np.ones((3, 3))\n x = tf.constant(0.0, shape=(2, 8, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv2D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def test_cvqnn_layers_exception_nlayers(self):\n shapes = expected_shapes(1, 2)\n weights = [np.random.random(shape) for shape in shapes[:-1]]\n weights += [np.random.random((2, shapes[-1][1]))]\n\n dev = DummyDevice(wires=2)\n\n @qml.qnode(dev)\n def circuit():\n qml.CVNeuralNetLayers(*weights, wires=range(2))\n return qml.expval(qml.QuadX(0))\n\n with pytest.raises(ValueError, match=\"The first dimension of all parameters\"):\n circuit()", "def testMaskErrorIncompatibleRank1(self):\n\n np_mask = np.ones((3,))\n x = tf.constant(0.0, shape=(2, 8, 6))\n\n # Test with both numpy arrays and Tensors.\n for mask in (np_mask, tf.convert_to_tensor(np_mask)):\n with self.assertRaises(snt.Error) as cm:\n snt.Conv1D(output_channels=4, kernel_shape=5, mask=mask)(x)\n self.assertTrue(str(cm.exception).startswith(\n \"Invalid mask shape: {}\".format(np_mask.shape)))", "def determine_rank(self, X, err):\n singularValues,_,_,_ = self.compute_svd(X,k=-1)\n ratio = np.array([np.linalg.norm(singularValues[k:]) / np.linalg.norm(singularValues) for k in\n range(len(singularValues) - 1, 0, -1)])\n find_idx = numpy.nonzero(ratio <= err)\n rank = find_idx[0]\n if self.global_rank==0: print('Estimated rank=',rank)\n return rank", "def test_input_dimension(self):\n knn = Knn(n_neighbors=3)\n with self.assertRaises(ValueError): knn.fit(X_train, y_test)", "def test_error_when_provide_negative_data_for_embedding():\n N = 250\n X = torch.randn((N, 5, 15))\n X[0:125, 0, 3] += 20.0\n y = X[:, 0, 3] > 5.0\n y = y.float()\n with pytest.raises(AssertionError):\n rnn = RNN(layers_info=[[\"gru\", 20], [\"lstm\", 8], [\"linear\", 1]], input_dim=15,\n hidden_activations=\"relu\", output_activation=\"sigmoid\", columns_of_data_to_be_embedded=[0],\n embedding_dimensions=[[200, 5]], initialiser=\"xavier\")\n assert solves_simple_problem(X, y, rnn)\n\n X[:, :, 0] = abs(X[:, :, 0]).long()\n rnn = RNN(layers_info=[[\"gru\", 20], [\"lstm\", 8], [\"linear\", 1]], input_dim=15,\n hidden_activations=\"relu\", output_activation=\"sigmoid\", columns_of_data_to_be_embedded=[0],\n embedding_dimensions=[[200, 5]], initialiser=\"xavier\")\n assert solves_simple_problem(X, y, rnn)" ]
[ "0.7573716", "0.69100684", "0.6770358", "0.6650631", "0.6231268", "0.6066077", "0.6011113", "0.59122485", "0.58956087", "0.5846304", "0.58355474", "0.5783303", "0.57702374", "0.5764062", "0.57635987", "0.5751452", "0.5733349", "0.56984454", "0.5661566", "0.565813", "0.56478053", "0.563939", "0.5634446", "0.5626168", "0.56215245", "0.56051093", "0.55836695", "0.5577475", "0.5558132", "0.5553703" ]
0.76834285
1
Creates a `truncated_normal_initializer` with the given range.
def create_initializer(initializer_range=0.02): return tf.compat.v1.truncated_normal_initializer(stddev=initializer_range)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_initializer(initializer_range=0.02):\n return tf.truncated_normal_initializer(stddev=initializer_range)", "def create_initializer(initializer_range=0.02):\n return tf.truncated_normal_initializer(stddev=initializer_range)", "def create_initializer(initializer_range=0.02):\n return tf.truncated_normal_initializer(stddev=initializer_range)", "def truncated_normal_initializer(mean=0.0, stddev=1.0, seed=None,\n dtype=dtypes.float32):\n def _initializer(shape, dtype=_assert_float_dtype(dtype)):\n return random_ops.truncated_normal(shape, mean, stddev, dtype, seed=seed)\n return _initializer", "def _truncated_normal_init(tensor, mean=0, stddev=1):\n\n total_size = tensor.numel()\n\n # determine the scipy random state from the torch seed\n # the numpy seed can be between 0 and 2**32-1\n np_seed = torch.randint(0, 2**32-1, (1, 1)).view(-1).item()\n np_state = RandomState(np_seed)\n # truncates 2 std from mean, since rescaling: a = ((mean-2std)-mean)/std = -2\n samples = tn.rvs(a = -2, b = 2, loc = mean, scale = stddev, size = total_size, random_state = np_state)\n samples = samples.reshape(tuple(tensor.size()))\n init_tensor = torch.from_numpy(samples).type_as(tensor)\n return init_tensor", "def glorot_normal(seed=None):\n return lambda shape, dtype, batch_ndims=0: _initialize( # pylint: disable=g-long-lambda\n shape, dtype, batch_ndims,\n scale=1., mode='fan_avg', distribution='truncated_normal', seed=seed)", "def truncated_normal(size, threshold=1):\n return truncnorm.rvs(-threshold, threshold, size=size)", "def _WeightInit(self, stddev):\n return init_ops.truncated_normal_initializer(stddev=stddev)", "def truncated_normal(size, lower=-2, upper=2):\r\n\r\n return scipy.stats.truncnorm.rvs(lower, upper, size=size)", "def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):\n # type: (Tensor, float, float, float, float) -> Tensor\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)", "def trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.):\n # type: (Tensor, float, float, float, float) -> Tensor\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)", "def random_uniform_initializer(minval=0.0, maxval=1.0, seed=None,\n dtype=dtypes.float32):\n def _initializer(shape, dtype=_assert_float_dtype(dtype)):\n return random_ops.random_uniform(shape, minval, maxval, dtype, seed=seed)\n return _initializer", "def he_normal(seed=None):\n # pylint: disable=line-too-long\n # pylint: enable=line-too-long\n return lambda shape, dtype, batch_ndims=0: _initialize( # pylint: disable=g-long-lambda\n shape, dtype, batch_ndims,\n scale=2., mode='fan_in', distribution='truncated_normal', seed=seed)", "def trunc_normal_(tensor, mean=0.0, std=1.0, a=-2.0, b=2.0):\n return _no_grad_trunc_normal_(tensor, mean, std, a, b)", "def normc_initializer(std=1.0):\n def _initializer(shape, dtype=None, partition_info=None): #pylint: disable=W0613\n out = np.random.randn(*shape).astype(np.float32)\n out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))\n return tf.constant(out)\n return _initializer", "def random_normal_initializer(mean=0.0, stddev=1.0, seed=None,\n dtype=dtypes.float32):\n def _initializer(shape, dtype=_assert_float_dtype(dtype)):\n return random_ops.random_normal(shape, mean, stddev, dtype, seed=seed)\n return _initializer", "def get_trunc_norm(mean, sd, lower=1e-12, upper=1):\n mean = max(lower, min(mean, upper))\n assert sd >= 0\n if sd <= 0:\n sd = 1e-12\n tn = stats.truncnorm((lower - mean) / sd, (upper - mean) / sd, loc=mean, scale=sd).rvs()\n return max(lower, min(tn, upper))", "def truncated_normal_(tensor):\n mean = 0\n std = 1/float(np.sqrt(tensor.shape[0]))\n size = tensor.shape\n tmp = tensor.new_empty(size + (4,)).normal_()\n valid = (tmp < 2) & (tmp > -2)\n ind = valid.max(-1, keepdim=True)[1]\n tensor.data.copy_(tmp.gather(-1, ind).squeeze(-1))\n tensor.data.mul_(std).add_(mean)\n return tensor", "def normalize_range(array, floor=0, ceil=1):\n scaler = MinMaxScaler(feature_range=(floor, ceil), copy=True)\n return scaler.fit_transform(array)", "def _uniform(val_range):\r\n return np.random.uniform(val_range[0], val_range[1])", "def normal_init(self, shape):\n return np.random.normal(size=(shape[0],shape[1]))*0.01", "def init_method_normal(sigma):\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)\n\n return init_", "def init_method_normal(sigma):\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=sigma)\n\n return init_", "def __init__(self, range_noise_m, max_range_m, min_range_m=0.0, **kwargs):\n # Validate range values.\n if range_noise_m < 0.0:\n raise ValueError(\"Range noise should not be negative: %r\" % range_noise_m)\n if min_range_m >= max_range_m:\n raise ValueError(\"min_range_m %s must be less than max_range_m %s\" %\n (min_range_m, max_range_m))\n\n self._range_noise_m = range_noise_m\n self._max_range_m = max_range_m\n self._min_range_m = min_range_m\n self._total_range = max_range_m - min_range_m\n super(RangeNoise, self).__init__(\n scale=range_noise_m / self._total_range,\n clipping_lower_bound=0.0,\n clipping_upper_bound=1.0,\n **kwargs)", "def randrange(n, vmin, vmax):\n return (vmax - vmin) * np.random.rand(n) + vmin", "def scaled_init_method_normal(sigma, num_layers):\n std = sigma / math.sqrt(2.0 * num_layers)\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=std)\n\n return init_", "def scaled_init_method_normal(sigma, num_layers):\n std = sigma / math.sqrt(2.0 * num_layers)\n\n def init_(tensor):\n return torch.nn.init.normal_(tensor, mean=0.0, std=std)\n\n return init_", "def get_standard_normal_distribution():\n return np.random.normal(0, 1)", "def normc_init(std=1.0, axis=0):\n def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613\n out = np.random.randn(*shape).astype(np.float32)\n out *= std / np.sqrt(np.square(out).sum(axis=axis, keepdims=True))\n return tf.constant(out)\n return _initializer", "def sample_from_truncated_normal(mean, std, clip_a, clip_b, size=None):\n a, b = (clip_a - mean) / std, (clip_b - mean) / std\n r = stats.truncnorm.rvs(a, b, size=size)\n return r * std + mean" ]
[ "0.81752145", "0.81752145", "0.8153977", "0.7396352", "0.6658898", "0.65713483", "0.6411789", "0.6391786", "0.63814235", "0.62771004", "0.62771004", "0.6252857", "0.621467", "0.62111574", "0.6149097", "0.6082527", "0.5928889", "0.58472204", "0.5730975", "0.5706199", "0.5700414", "0.5648038", "0.5648038", "0.56245565", "0.56185806", "0.558992", "0.558992", "0.5584823", "0.55661684", "0.5564946" ]
0.824168
0
Maps labels in metadata to assignment types
def label_to_atype(labels): atypes = [] for label in labels: if isinstance(label, AssignmentType): atypes.append(label) if label.lower() == "lxc": atypes.append(AssignmentType.LXC) elif label.lower() == "baremetal": atypes.append(AssignmentType.BareMetal) elif label.lower() == "kvm": atypes.append(AssignmentType.KVM) elif label.lower() == "lxd": atypes.append(AssignmentType.LXD) else: return [AssignmentType.DEFAULT] return atypes
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_labels_and_mapping(self, labels, mapping):\n numbered_classes = list(enumerate(list(labels), start=0))\n if mapping:\n new_mapping = {number: str(mapping[label]) for number, label in numbered_classes}\n else:\n new_mapping = {number: str(label) for number, label in numbered_classes}\n new_labels = [new_mapping[numbered[0]] for numbered in numbered_classes]\n\n return new_labels, new_mapping", "def map_to_per_ntype(self, ids): # -> None:\n ...", "def map_to_per_ntype(self, ids): # -> tuple[Unknown, Unknown]:\n ...", "def map_to_per_ntype(self, ids): # -> tuple[Unknown, Unknown]:\n ...", "def map_to_per_etype(self, ids): # -> tuple[Unknown, Unknown]:\n ...", "def map_to_per_etype(self, ids): # -> tuple[Unknown, Unknown]:\n ...", "def dict_to_label_attr_map(input_dict):\n return {key+':': [key, type(val)] for key, val in input_dict.items()}", "def map_to_per_etype(self, ids): # -> None:\n ...", "def labels(self, label_type = 'basic'):\n\t\tif label_type == None:\n\t\t\treturn {}\n\t\telif label_type == 'basic':\n\t\t\treturn self.dependency_labels()\n\t\telif label_type == 'SAMT':\n\t\t\treturn self.SAMT_labels()\n\t\telif label_type == 'all':\n\t\t\treturn self.label_all()\n\t\telse:\n\t\t\traise ValueError(\"%s is no valid labeltype\" %label_type)", "def match_labels_and_values(_metric_stats, _metric_label, _metric_type):\n _ret = {}\n for _key, _item in _metric_stats.items():\n if isinstance(_item, dict):\n for i, (k, v) in enumerate(_item.items()):\n _ret[\"{}.{}{}\".format(_key, _metric_label[i], _metric_type)] = v\n # match with metric labels if _metric_stat item is a list.\n elif isinstance(_item, list):\n for i in range(len(_item)):\n _ret[\n \"{}.{}{}\".format(_key, _metric_label[i], _metric_type)\n ] = _item[i]\n # check if _metric_stat item is not a dict or list\n else:\n _ret[\"{}.{}{}\".format(_key, _metric_label, _metric_type)] = _item\n return _ret", "def to_mapping(self, dim):\n mim = cifti2.Cifti2MatrixIndicesMap([dim], 'CIFTI_INDEX_TYPE_LABELS')\n for name, label, meta in zip(self.name, self.label, self.meta):\n label_table = cifti2.Cifti2LabelTable()\n for key, value in label.items():\n label_table[key] = (value[0],) + tuple(value[1])\n named_map = cifti2.Cifti2NamedMap(name, cifti2.Cifti2MetaData(meta), label_table)\n mim.append(named_map)\n return mim", "def get_label_set(self, type_str=None):\n return {v.label_str for v in self.node_gen if type_str in (None, v.type_str)}", "def get_image_labels_mapping(images_fp, labels_fp):\n name_map = {}\n\n for f in images_fp():\n image_name = f[0]['file']\n vars = {k.upper():v for k,v in f[0].items() if k!='file' }\n label_name = labels_fp.get_matching(**vars)[0]['file']\n name_map[image_name] = label_name\n return name_map", "def _annotations_to_targets(self, labels):\n roots = ['A','B','C','D','E','F','G']\n natural = zip(roots, [0, 2, 3, 5, 7, 8, 10])\n root_note_map = {}\n for chord, num in natural:\n root_note_map[chord] = num\n root_note_map[chord + '#'] = (num + 1) % 12\n root_note_map[chord + 'b'] = (num - 1) % 12\n\n root_note_map['N'] = 24\n root_note_map['X'] = 24\n \n labels = [c.decode('UTF-8') for c in labels]\n chord_root_notes = [c.split(':')[0].split('/')[0] for c in labels]\n chord_root_note_ids = np.array([root_note_map[crn] for crn in chord_root_notes])\n \n chord_type = [c.split(':')[1] if ':' in c else '' for c in labels]\n chord_type_shift = np.array([12 if 'min' in chord_t or 'dim' in chord_t else 0 for chord_t in chord_type])\n return one_hot(chord_root_note_ids + chord_type_shift, self.num_classes)", "def test_parse_taxonomy_to_otu_metadata_alt_labels(self):\r\n def f(v):\r\n return 1. + float(v)\r\n example_tax = \\\r\n \"\"\"412 PC.635_647\t0.0\r\n319 PC.355_281\t0.970\r\n353 PC.634_154\t0.830\r\n17 PC.607_302\t0.960\r\n13\t0.870\r\n338 PC.593_1314\t0.990\"\"\"\r\n actual = parse_taxonomy_to_otu_metadata(\r\n example_tax.split('\\n'),\r\n labels=['something'],\r\n process_fs=[f])\r\n expected = {'412': {'something': 1.0},\r\n '319': {'something': 1.970},\r\n '353': {'something': 1.830},\r\n '17': {'something': 1.960},\r\n '13': {'something': 1.870},\r\n '338': {'something': 1.990}}\r\n self.assertEqual(actual, expected)", "def parse_taxonomy_to_otu_metadata(\r\n lines, labels=['taxonomy', 'score'], process_fs=[taxa_split, float]):\r\n result = {}\r\n\r\n for line in lines:\r\n line = line.strip()\r\n fields = line.split('\\t')\r\n id_ = fields[0].split()[0]\r\n result[id_] = {}\r\n for i, field in enumerate(fields[1:]):\r\n try:\r\n label = labels[i]\r\n except IndexError:\r\n continue\r\n try:\r\n value = process_fs[i](field)\r\n except IndexError:\r\n raise ValueError(\r\n \"Too few process functions provided (n=%d).\" %\r\n len(process_fs))\r\n result[id_][label] = value\r\n return result", "def set_labels_and_subtypes(\n self,\n labels: Dict[str, str],\n subtypes: Dict[str, List[str]]) -> None:\n\n if labels is None:\n labels = OrderedDict()\n if not isinstance(labels, dict):\n raise TypeError('labels is required to be a dict. Got type {}'.format(type(labels)))\n\n if subtypes is None:\n subtypes = OrderedDict()\n elif not isinstance(subtypes, dict):\n raise TypeError('subtypes is required to be None or a dict. Got type {}'.format(type(subtypes)))\n\n # ensure that every key and value of labels are strings\n for key in labels:\n if not isinstance(key, str):\n raise TypeError(\n 'All keys of labels must be of type string. Got key `{}` of '\n 'type {}'.format(key, type(key)))\n if key == '':\n raise ValueError('The empty string is not a valid label id.')\n value = labels[key]\n if not isinstance(value, str):\n raise TypeError(\n 'All values of labels must be of type string. Got value {} '\n 'for key `{}` of type {}'.format(value, key, type(value)))\n\n # look for inverted fork - multiple parents claiming the same child\n subtypes = self._find_inverted_fork(subtypes, labels)\n # look for cycles\n self._find_cycle(subtypes)\n\n # set the values\n self._labels = labels\n self._subtypes = subtypes\n self._construct_parent_types()\n self._inspect_ids_for_integer()", "def label_mapping(filename):\n\n\t\n\n\n\twith open(filename, 'r') as infile:\n\t\treader = csv.reader(infile)\n\t\tnext(reader, None) # ignore first line since they're column labels\n\n\t\t#filename, artist, title, style, genre, date\n\t\tfor line in reader:\n\t\t\timg = line[0]\n\t\t\tartist = line[1]\n\t\t\tstyle = line[3]\n\t\t\tgenre = line[4]\n\t\t\tdate = re.findall(r'\\d+', line[5]) #parse any unwanted stuff\n\n\t\t\t#img and artist fields always present, no need to check\n\t\t\tartist_labels[img] = artist\n\n\n\t\t\tif style != '' and style in style_check:\n\t\t\t\t#if sum(x == style for x in style_labels.values()) < max_examples: # avoid imbalance\n\t\t\t\tstyle_labels[img] = style\n\n\n\t\t\tif genre != '' and genre in genre_check:\n\t\t\t\t#if sum(x == genre for x in genre_labels.values()) < max_examples:\n\t\t\t\tgenre_labels[img] = genre\n\n\n\t\t\tif len(date) > 0:\n\t\t\t\tbucket_len = 10 #buckets of 10 years\n\t\t\t\tbucket = (int(date[0]) // bucket_len) * bucket_len \n\t\t\t\tperiod = str(bucket) + '-' + str(bucket + (bucket_len - 1))\n\n\t\t\t\tif period in date_check:\n\t\t\t\t\t#if sum(x == period for x in date_labels.values()) <= max_examples:\n\t\t\t\t\tdate_labels[img] = period #parsed_date", "def labels(self) -> dict:\n raise NotImplementedError", "def labels(self, labels: MutableMapping[str, str]):\n self._labels = labels", "def unpack_labels(self, labels,\n is_box = False):\n unpacked_labels = {}\n count = 0\n for level in range(self.min_level, self.max_level + 1):\n feat_size_y = tf.cast(self.image_size[0] / 2 ** level, tf.int32)\n feat_size_x = tf.cast(self.image_size[1] / 2 ** level, tf.int32)\n steps = feat_size_y * feat_size_x * self.anchors_per_location\n if is_box:\n unpacked_labels[level] = tf.reshape(labels[count:count + steps],\n [-1, 4])\n else:\n unpacked_labels[level] = tf.reshape(labels[count:count + steps],\n [feat_size_y, feat_size_x, -1])\n count += steps\n return unpacked_labels", "def extract_read_to_sample_mapping(labels):\r\n sample_id_mapping = {}\r\n\r\n re = compile(r'(\\S+) (\\S+)')\r\n for label in labels:\r\n tmatch = search(re, label)\r\n sample_id = tmatch.group(1)\r\n flowgram_id = tmatch.group(2)\r\n sample_id_mapping[flowgram_id] = sample_id\r\n\r\n return sample_id_mapping", "def _metadata_map():\n return {\n 'date_added': 'dateAdded',\n 'dns_active': 'dnsActive',\n 'last_modified': 'lastModified',\n 'private_flag': 'privateFlag',\n 'whois_active': 'whoisActive',\n 'key_name': 'Key Name',\n 'value_type': 'Value Type',\n 'value_name': 'Value Name',\n 'block': 'Block',\n 'mutex': 'Mutex',\n 'as_number': 'AS Number',\n 'hostname': 'hostName',\n }", "def tag_mapping(data_path, data_type):\n with open(data_path+data_type+\"_labels.txt\", \"r\") as file1:\n tags = [line.split(\" \")[:-1] for line in file1.readlines()]\n dico = create_dico(tags)\n dico[model.START_TAG] = -1\n dico[model.STOP_TAG] = -2\n tag_to_id, id_to_tag = create_mapping(dico)\n print(\"Found %i unique named entity tags\" % len(dico))\n return dico, tag_to_id, id_to_tag", "def field_labels(label_row, datum_row):\n return dict(zip(label_row, datum_row))", "def label_map_gen(df_main):\n # Function to flatten a list of list\n flatten = lambda l: [item for sublist in l for item in sublist]\n labels = list(set(flatten([l.split(' ') for l in df_main['tags'].values])))\n\n # Create list of labels\n label_map = {l: i for i, l in enumerate(labels)}\n return label_map", "def add_labels(data_lists, table_labels):\n labeled_dictionary_collection = {}\n \n\n for symbol, data_list in data_lists.iteritems():\n if len(data_list) > 1:\n labeled_dictionary_collection[symbol] = dict(zip(table_labels,data_list))\n return labeled_dictionary_collection", "def _get_type_mapping():\n return {\n Box.SPACE_NAME: Box,\n Dict.SPACE_NAME: Dict,\n Discrete.SPACE_NAME: Discrete\n }", "def makeMapping(globalMap):\n \n from memops.xml.Implementation import bool2str, str2bool\n\n # Set up top level dictionaries\n loadMaps = globalMap.get('loadMaps')\n mapsByGuid = globalMap.get('mapsByGuid')\n\n abstractTypes = globalMap.get('CCLB').get('abstractTypes')\n exolinks = globalMap.get('CCLB').get('exolinks')\n\n # Class AtomLabel\n currentMap = {}\n abstractTypes['AtomLabel'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'] = currentMap\n loadMaps['CCLB.AtomLabel'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'atomLabels'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.AtomLabel\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute AtomLabel.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute AtomLabel.isotopeCode\n currentMap = {}\n contentMap['isotopeCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00092'] = currentMap\n loadMaps['CCLB.AtomLabel.isotopeCode'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.isotopeCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00092'\n currentMap['name'] = 'isotopeCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AtomLabel.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00003'] = currentMap\n loadMaps['CCLB.AtomLabel.name'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00003'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Attribute AtomLabel.subType\n currentMap = {}\n contentMap['subType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00004'] = currentMap\n loadMaps['CCLB.AtomLabel.subType'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.subType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00004'\n currentMap['name'] = 'subType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['proc'] = 'direct'\n currentMap['default'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute AtomLabel.weight\n currentMap = {}\n contentMap['weight'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00093'] = currentMap\n loadMaps['CCLB.AtomLabel.weight'] = currentMap\n currentMap['tag'] = 'CCLB.AtomLabel.weight'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00093'\n currentMap['name'] = 'weight'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 1.0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Role AtomLabel.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n # End of AtomLabel\n\n currentMap = abstractTypes.get('AtomLabel')\n aList = ['isotopeCode', 'name', 'subType', 'weight']\n currentMap['headerAttrs'] = aList\n aList = ['access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n\n # Class ChemCompLabel\n currentMap = {}\n abstractTypes['ChemCompLabel'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'] = currentMap\n loadMaps['CCLB.ChemCompLabel'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'chemCompLabels'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.ChemCompLabel\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute ChemCompLabel.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute ChemCompLabel.ccpCode\n currentMap = {}\n contentMap['ccpCode'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00073'] = currentMap\n loadMaps['CCLB.ChemCompLabel.ccpCode'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.ccpCode'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00073'\n currentMap['name'] = 'ccpCode'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003')\n\n # Attribute ChemCompLabel.molType\n currentMap = {}\n contentMap['molType'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00072'] = currentMap\n loadMaps['CCLB.ChemCompLabel.molType'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.molType'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-18:23:37_00072'\n currentMap['name'] = 'molType'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024')\n\n # Role ChemCompLabel.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role ChemCompLabel.isotopomers\n currentMap = {}\n contentMap['isotopomers'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:13_00001'] = currentMap\n loadMaps['CCLB.ChemCompLabel.isotopomers'] = currentMap\n currentMap['tag'] = 'CCLB.ChemCompLabel.isotopomers'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:13_00001'\n currentMap['name'] = 'isotopomers'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of ChemCompLabel\n\n currentMap = abstractTypes.get('ChemCompLabel')\n aList = ['ccpCode', 'molType']\n currentMap['headerAttrs'] = aList\n aList = ['isotopomers', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['isotopomers']\n currentMap['children'] = aList\n\n # Class Isotopomer\n currentMap = {}\n abstractTypes['Isotopomer'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'] = currentMap\n loadMaps['CCLB.Isotopomer'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'isotopomers'\n currentMap['objkey'] = 'serial'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.Isotopomer\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute Isotopomer.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute Isotopomer.serial\n currentMap = {}\n contentMap['serial'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00005'] = currentMap\n loadMaps['CCLB.Isotopomer.serial'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.serial'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00005'\n currentMap['name'] = 'serial'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032')\n\n # Attribute Isotopomer.weight\n currentMap = {}\n contentMap['weight'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00006'] = currentMap\n loadMaps['CCLB.Isotopomer.weight'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.weight'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00006'\n currentMap['name'] = 'weight'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['default'] = 1.0\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:54_00009')\n\n # Role Isotopomer.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role Isotopomer.atomLabels\n currentMap = {}\n contentMap['atomLabels'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00001'] = currentMap\n loadMaps['CCLB.Isotopomer.atomLabels'] = currentMap\n currentMap['tag'] = 'CCLB.Isotopomer.atomLabels'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:30:14_00001'\n currentMap['name'] = 'atomLabels'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of Isotopomer\n\n currentMap = abstractTypes.get('Isotopomer')\n aList = ['serial', 'weight']\n currentMap['headerAttrs'] = aList\n aList = ['atomLabels', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['atomLabels']\n currentMap['children'] = aList\n\n # Class LabelingScheme\n currentMap = {}\n abstractTypes['LabelingScheme'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'] = currentMap\n loadMaps['CCLB.LabelingScheme'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme'\n currentMap['type'] = 'class'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'\n currentMap['eType'] = 'cplx'\n currentMap['fromParent'] = 'labelingSchemes'\n currentMap['isTop'] = True\n currentMap['objkey'] = 'name'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.LabelingScheme\n contentMap = {}\n currentMap['content'] = contentMap\n\n # Attribute LabelingScheme.applicationData\n contentMap['applicationData'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:27_00007')\n\n # Attribute LabelingScheme.createdBy\n contentMap['createdBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00002__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.details\n currentMap = {}\n contentMap['details'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00005'] = currentMap\n loadMaps['CCLB.LabelingScheme.details'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.details'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00005'\n currentMap['name'] = 'details'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00036')\n\n # Attribute LabelingScheme.guid\n contentMap['guid'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-09-14-18:48:26_00002')\n\n # Attribute LabelingScheme.isModifiable\n contentMap['isModifiable'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-17-14:16:26_00010__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.lastUnlockedBy\n contentMap['lastUnlockedBy'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:00:59_00003__www.ccpn.ac.uk_Fogh_2007-10-03-14:53:27_00001__www.ccpn.ac.uk_Fogh_2006-09-14-16:28:57_00002')\n\n # Attribute LabelingScheme.longName\n currentMap = {}\n contentMap['longName'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00004'] = currentMap\n loadMaps['CCLB.LabelingScheme.longName'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.longName'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00004'\n currentMap['name'] = 'longName'\n currentMap['hicard'] = 1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00033')\n\n # Attribute LabelingScheme.name\n currentMap = {}\n contentMap['name'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00003'] = currentMap\n loadMaps['CCLB.LabelingScheme.name'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.name'\n currentMap['type'] = 'attr'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00003'\n currentMap['name'] = 'name'\n currentMap['hicard'] = 1\n currentMap['locard'] = 1\n currentMap['data'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037')\n\n # Role LabelingScheme.access\n contentMap['access'] = mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-12-31-09:03:01_00014')\n\n # Role LabelingScheme.chemCompLabels\n currentMap = {}\n contentMap['chemCompLabels'] = currentMap\n mapsByGuid['www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00002'] = currentMap\n loadMaps['CCLB.LabelingScheme.chemCompLabels'] = currentMap\n currentMap['tag'] = 'CCLB.LabelingScheme.chemCompLabels'\n currentMap['type'] = 'child'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:55_00002'\n currentMap['name'] = 'chemCompLabels'\n currentMap['hicard'] = -1\n currentMap['locard'] = 0\n currentMap['eType'] = 'cplx'\n currentMap['implSkip'] = True\n currentMap['content'] = globalMap.get('CCLB').get('abstractTypes')\n # End of LabelingScheme\n\n currentMap = abstractTypes.get('LabelingScheme')\n aList = ['createdBy', 'guid', 'isModifiable', 'lastUnlockedBy', 'name']\n currentMap['headerAttrs'] = aList\n aList = ['details', 'longName']\n currentMap['simpleAttrs'] = aList\n aList = ['chemCompLabels', 'access', 'applicationData']\n currentMap['cplxAttrs'] = aList\n aList = ['chemCompLabels']\n currentMap['children'] = aList\n\n # Out-of-package link to AtomLabel\n currentMap = {}\n exolinks['AtomLabel'] = currentMap\n loadMaps['CCLB.exo-AtomLabel'] = currentMap\n currentMap['tag'] = 'CCLB.exo-AtomLabel'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00018'\n currentMap['name'] = 'AtomLabel'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.AtomLabel\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00037'))\n\n # Out-of-package link to ChemCompLabel\n currentMap = {}\n exolinks['ChemCompLabel'] = currentMap\n loadMaps['CCLB.exo-ChemCompLabel'] = currentMap\n currentMap['tag'] = 'CCLB.exo-ChemCompLabel'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-08-16-14:25:09_00014'\n currentMap['name'] = 'ChemCompLabel'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.ChemCompLabel\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n\n # Out-of-package link to Isotopomer\n currentMap = {}\n exolinks['Isotopomer'] = currentMap\n loadMaps['CCLB.exo-Isotopomer'] = currentMap\n currentMap['tag'] = 'CCLB.exo-Isotopomer'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2006-10-24-11:28:54_00001'\n currentMap['name'] = 'Isotopomer'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.Isotopomer\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:52_00024'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2007-09-12-18:31:28_00003'))\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2006-08-16-14:22:53_00032'))\n\n # Out-of-package link to LabelingScheme\n currentMap = {}\n exolinks['LabelingScheme'] = currentMap\n loadMaps['CCLB.exo-LabelingScheme'] = currentMap\n currentMap['tag'] = 'CCLB.exo-LabelingScheme'\n currentMap['type'] = 'exo'\n currentMap['guid'] = 'www.ccpn.ac.uk_Fogh_2007-01-24-12:23:14_00001'\n currentMap['name'] = 'LabelingScheme'\n currentMap['eType'] = 'cplx'\n currentMap['class'] = ccp.api.molecule.ChemCompLabel.LabelingScheme\n aList = list()\n currentMap['keyMaps'] = aList\n aList.append(mapsByGuid.get('www.ccpn.ac.uk_Fogh_2008-06-30-16:30:50_00001'))", "def get_label_scores_mapping(labels, scores):\n return {label: scores[i] for i, label in enumerate(labels)}" ]
[ "0.6138519", "0.5933744", "0.5866758", "0.5866758", "0.5786512", "0.5786512", "0.57188714", "0.57151353", "0.55421776", "0.5512994", "0.54907334", "0.54420584", "0.54413205", "0.5440561", "0.5437112", "0.5422614", "0.54225236", "0.54204667", "0.54132175", "0.54111546", "0.54094636", "0.53827584", "0.53436357", "0.5328785", "0.5321688", "0.53156227", "0.5305538", "0.5281639", "0.52711546", "0.5270233" ]
0.66578543
0
The style of the input text field.
def style(self) -> InputTextStyle: return self._underlying.style
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _update_color(self, txt):\n color = self.valid_color\n if not self.hasAcceptableInput():\n color = self.invalid_color\n self.setStyleSheet(\"background-color: %s\" % color)", "def style(self):\n return self['style']", "def match_style(self, input_style: str) -> str:\r\n try: # Try to get from the dictionary\r\n return self.get_style_from_styles(input_style)\r\n except KeyError: # If you get a key error, it is not in the dictionary\r\n new_style = input(input_style + '\\nWhat style is this?') # Ask the user what style it is\r\n self.add_style_to_styles(input_style, new_style) # Add this style to the dictionary\r\n return new_style # Return the more readable style\r", "def style(self):\n return self._style", "def style(self):\n return self._style", "def on_lineEdit_temperature_textChanged(self, p0):\n try :\n float(self.lineEdit_temperature.text())\n self.lineEdit_temperature.setStyleSheet(\"background-color: white;\")\n \n except ValueError: \n self.lineEdit_temperature.setStyleSheet(\"background-color: red;\")", "def check_state(self, *args, **kwargs):\n\n # TODO: Implement from\n # http://stackoverflow.com/questions/27159575/pyside-modifying-widget-colour-at-runtime-without-overwriting-stylesheet\n\n sender = self.sender()\n validator = sender.validator()\n state = validator.validate(sender.text(), 0)[0]\n if state == QtGui.QValidator.Acceptable:\n color = 'none' # normal background color\n elif state == QtGui.QValidator.Intermediate:\n color = '#fff79a' # yellow\n else:\n color = '#f6989d' # red\n sender.setStyleSheet('QLineEdit { background-color: %s }' % color)", "def setValue(self,value):\n rgb = QtGui.QColor(value).getRgb()\n self.input.setStyleSheet(\"* { background-color: rgb(%s,%s,%s) }\" % rgb[:3])\n self.input.setText(str(value))", "def style(self):\n return self.container['style']", "def test__TextInputStyle__value():\n for instance in TextInputStyle.INSTANCES.values():\n vampytest.assert_instance(instance.value, TextInputStyle.VALUE_TYPE)", "def text_field(self):\n return self.properties.get('TextField', None)", "def get_style ( self, object ):\n return self.style", "def TextWidget(*args, **kw):\n kw['value'] = str(kw['value'])\n kw.pop('options', None)\n return TextInput(*args,**kw)", "def validateInput(self):\n palette = QPalette()\n validInput = self.sender().hasAcceptableInput()\n if validInput:\n palette.setColor(QPalette.Text, Qt.black)\n else:\n palette.setColor(QPalette.Text, Qt.blue)\n self.sender().setPalette(palette)\n self.hasValidInput.emit(validInput)", "def set_style(self):", "def set_style(line: QLineEdit) -> None:\n line.setStyleSheet(\"QLineEdit{padding: 5px 10px}\")", "def text_style(self) -> TextStyle:\r\n if self.player_profile.is_level_completed(self.level_num):\r\n return self._get_text_style(GuiColor.LEVEL_COMPLETED_COLOR)\r\n if self.player_profile.is_level_unlocked(self.level_num):\r\n return self._get_text_style(GuiColor.LEVEL_UNLOCKED_COLOR)\r\n\r\n return self._get_text_style(GuiColor.LEVEL_LOCKED_COLOR)", "def test__TextInputStyle__name():\n for instance in TextInputStyle.INSTANCES.values():\n vampytest.assert_instance(instance.name, str)", "def __inputBox(gwin,height,word): \r\n prompt=Text(Point(155,height),word)\r\n prompt.setSize(10)\r\n prompt.draw(gwin)\r\n inputBox=Entry(Point(450,height),2)\r\n inputBox.setSize(60)\r\n inputBox.setFill(\"white\")\r\n inputBox.draw(gwin)\r\n return inputBox", "def input_class(field):\r\n return field.field.widget.__class__.__name__.lower()", "def _format (color, style=''):\n _format = QtGui.QTextCharFormat()\n if color != '':\n _format.setForeground(getattr(QtCore.Qt, color))\n if 'bold' in style:\n _format.setFontWeight(QtGui.QFont.Bold)\n if 'italic' in style:\n _format.setFontItalic(True)\n return _format", "def setStyle(self):\r\n self.statusBar.setStyleSheet(\"font:20px\")\r\n self.setStyleSheet(\"font:25px;\")\r\n self.text_area.setStyleSheet(\"\"\"\r\n selection-background-color: rgba(255,211,67,255);\r\n border-color:black;\r\n selection-color:black;\r\n font:20px;\r\n \"\"\")\r\n self.menu_bar.setStyleSheet(\"padding:5px\")\r\n self.file_toolbar.setStyleSheet(\"padding:5px;padding-right:8px;\")\r\n self.edit_toolbar.setStyleSheet(\"padding:5px;padding-right:8px;\")\r\n self.text_area.setAttribute(Qt.WA_StyledBackground)", "def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style='', border_visible=True, border_colour='black',\n border_style='solid', border_width=1, border_radius=0,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_colour= map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')", "def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style='', border_visible=True, border_color='black',\n border_style='solid', border_width=1, border_radius=0,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_color= _map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')", "def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style=None, border_visible=True,\n border_color='black', border_style='solid',\n border_width=1, border_radius=0, padding='0.1cm',\n margin='0.3cm', font_family='', font_size=None,\n font_style='', font_weight='')\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_color=_map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.1cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')", "def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style=None, border_visible=True,\n border_colour='black', border_style='solid',\n border_width=1, border_radius=0, padding='0.1cm',\n margin='0.3cm', font_family='', font_size=None,\n font_style='', font_weight='')\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_colour=map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.1cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')", "def line_style(self):\n return self.container['line_style']", "def field_style(field_name, bushfire=None):\r\n if bushfire:\r\n try:\r\n value = getattr(bushfire, field_name)\r\n if field_name == \"dfes_incident_no\":\r\n return \"\" if value else \"color:red;\"\r\n else:\r\n return \"\"\r\n except:\r\n return \"\"\r\n else:\r\n return \"\"", "def predefined_style(self, style):\n if style == 'minimal':\n self.style(box_style='', border_visible=True, border_color='black',\n border_style='solid', border_width=1, border_radius=0,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n self.save_button.button_style = ''\n self.save_button.font_weight = 'normal'\n elif (style == 'info' or style == 'success' or style == 'danger' or\n style == 'warning'):\n self.style(box_style=style, border_visible=True,\n border_color= _map_styles_to_hex_colours(style),\n border_style='solid', border_width=1, border_radius=10,\n padding='0.2cm', margin='0.3cm', font_family='',\n font_size=None, font_style='', font_weight='')\n self.save_button.button_style = 'primary'\n self.save_button.font_weight = 'bold'\n else:\n raise ValueError('style must be minimal or info or success or '\n 'danger or warning')", "def get_text_color ( self, object ):\n return self.text_color_" ]
[ "0.6297507", "0.6139241", "0.60388416", "0.592602", "0.592602", "0.58029115", "0.5770399", "0.5738665", "0.5722834", "0.5672692", "0.5660816", "0.5595364", "0.5564458", "0.549983", "0.5498407", "0.5497021", "0.5464898", "0.54125243", "0.5386978", "0.5375206", "0.5355189", "0.53251314", "0.5314926", "0.5313844", "0.52912545", "0.5261442", "0.5231371", "0.523005", "0.52253443", "0.5223263" ]
0.78181124
0
Function to determine whether we are allowed to call `get_mpi_pool`.
def can_use_mpi_pool(): return ALLOW_SPAWN or ALREADY_RUNNING_AS_MPI
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def check_pool(self):\n if self.conn.queue_len() < MAX_PROXIES:\n return True\n return False", "def has_answerpool(self):\r\n return hasattr(self, '_has_answerpool')", "def has_mpi(self):\n return bool(self.mpi_runner)", "def has_mpi_peer_processes():\n return mpi4py_available and MPI.COMM_WORLD.Get_size() > 1", "def has_pool ( self ):\n return self._poolstack", "def _should_use_pool(self, kernel_name, kwargs):\n if \"kernel_id\" in kwargs:\n return False\n\n if self.strict_pool_names and kernel_name not in self.kernel_pools:\n raise ValueError(\"Cannot start kernel with name %r\" % (kernel_name,))\n if self.strict_pool_kwargs and (\n kernel_name not in self.pool_kwargs or kwargs != self.pool_kwargs[kernel_name]\n ):\n raise ValueError(\"Cannot start kernel with kwargs %r\" % (kwargs,))\n\n return len(self._pools.get(kernel_name, ())) > 0", "def is_mpi_env():\n try:\n import mpi4py\n except ImportError:\n return False\n\n try:\n import mpi4py.MPI\n except ImportError:\n return False\n\n if mpi4py.MPI.COMM_WORLD.size == 1 and mpi4py.MPI.COMM_WORLD.rank == 0:\n return False\n return True", "def pure_mpi(self):\n return self.has_mpi and not self.has_omp", "def check_not_in_mpiexec(self):\n \n if 'HYDRA_CONTROL_FD' in os.environ or 'PMI_FD' in os.environ:\n self.skip('cannot run the socket tests under mpi process manager')", "def _is_global_pooling(self, input_shape):\n output_shape = self.compute_output_shape(input_shape).as_list()\n return output_shape[1] == 1 and output_shape[2] == 1", "def has_nonempty_pool ( self ):\n return self._poolstack and not self._poolstack[-1].empty()", "def _floating_ip_pool_exists(self, context, name):\n pools = [pool.get('name') for pool in\n self.get_floating_ip_pools(context)]\n if name in pools:\n return True\n\n return False", "def discard_pool ( self ):\n return self.pop_pool() is not None", "def check_pool_exist(pool_name: str) -> bool:\n if not pool_name:\n return False\n return os.path.exists(constant.work_dir + \"/pool/\" + pool_name)", "def is_free(self) -> tuple:\n if self.running_procs >= self.procs_no:\n return (False, None)\n if self.gpus:\n for gpu in self.gpus:\n if self.gpu_running_procs[gpu] < self.per_gpu[gpu]:\n return (True, gpu)\n return (False, None)\n return (True, None)", "def backend_has_free_public_ip(backend):\n ip_pool_rows = IPPoolTable.objects.select_for_update()\\\n .filter(subnet__network__public=True)\\\n .filter(subnet__network__drained=False)\\\n .filter(subnet__deleted=False)\\\n .filter(subnet__network__backend_networks__backend=backend)\n for pool_row in ip_pool_rows:\n pool = pool_row.pool\n if pool.empty():\n continue\n else:\n return True", "def check_multiprocessing():\n\n try:\n import multiprocessing\n except ImportError:\n return False\n return True", "def is_multigpu_child_process():\n return (dist.is_initialized() or \"TORCHELASTIC_RUN_ID\" in os.environ) and os.environ[\"LOCAL_RANK\"] != \"0\"", "def _is_pool_owned(self, pdata):\n svc = '/api/system/v1/version'\n ret = self.rest_get(svc, restclient.Status.OK)\n vdata = jsonutils.loads(ret.data)\n return (vdata['version']['asn'] == pdata['pool']['asn'] and\n vdata['version']['nodename'] == pdata['pool']['owner'])", "def is_azureml_mpirun() -> bool:\n is_openmpi_image: bool = (\n \"OMPI_COMM_WORLD_RANK\" in os.environ\n and \"OMPI_COMM_WORLD_SIZE\" in os.environ\n and \"OMPI_COMM_WORLD_LOCAL_RANK\" in os.environ\n and \"OMPI_COMM_WORLD_LOCAL_SIZE\" in os.environ\n )\n\n is_azureml_mpirun_env: bool = (\n \"AZ_BATCH_MASTER_NODE\" in os.environ\n or \"AZ_BATCHAI_MPI_MASTER_NODE\" in os.environ\n )\n\n return bool(is_openmpi_image and is_azureml_mpirun_env)", "def _pool_exists(self, client_id, pool_name):\n pools = self.__pools.get(client_id, [])\n for pool in pools:\n if pool.name == pool_name:\n return True\n return False", "def check_security_group(self):\n return True", "def is_something_executing(self):\n return self._execution_pool", "def is_all_free(self):\n return self.pool_size == self.pool.qsize()", "def hybrid_mpi_omp(self):\n return self.has_omp and self.has_mpi", "def _workers_available(self) -> bool:\n total_compute_power = sum(self.client.nthreads().values())\n if len(self.futures) < total_compute_power:\n return True\n return False", "def has_permission(self, request, view):\n authenticated = super(IsRpcRacker, self).has_permission(request, view)\n user_groups = getattr(request.user, 'roles', set())\n if not isinstance(user_groups, set):\n user_groups = set(user_groups)\n return authenticated and bool(self.rpc_groups & user_groups)", "def poolCritical(self):\n idle = len(self.pool) - self.__working\n return idle <= 0", "def check_cpu_constrained():\n return psutil.cpu_percent(1) > 75", "def not_required(self, gp: GriddedPerm) -> bool:\n return all(\n any(gp not in req for req in req_list)\n for req_list in self._tiling.requirements\n )" ]
[ "0.666223", "0.65785354", "0.64785296", "0.64087653", "0.63978124", "0.6303463", "0.6174961", "0.6139839", "0.61166644", "0.60749656", "0.58839196", "0.584911", "0.5793402", "0.5737615", "0.57042027", "0.5693275", "0.5663002", "0.5648575", "0.5628458", "0.5582699", "0.55820614", "0.5568921", "0.5545072", "0.55370426", "0.55273587", "0.5506477", "0.54929954", "0.54588395", "0.54470897", "0.5434382" ]
0.8271317
0
Broadcast a result to all workers, dispatching to proper MPI (rather than pickled) communication if the result is a numpy array.
def bcast(result, comm, result_rank): rank = comm.Get_rank() # make sure all workers know if result is an array or not if rank == result_rank: is_ndarray = isinstance(result, np.ndarray) else: is_ndarray = None is_ndarray = comm.bcast(is_ndarray, root=result_rank) # standard (pickle) bcast if not array if not is_ndarray: return comm.bcast(result, root=result_rank) # make sure all workers have shape and dtype if rank == result_rank: shape_dtype = result.shape, str(result.dtype) else: shape_dtype = None shape_dtype = comm.bcast(shape_dtype, root=result_rank) shape, dtype = shape_dtype # allocate data space if rank != result_rank: result = np.empty(shape, dtype=dtype) # use fast communication for main array comm.Bcast(result, root=result_rank) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def Allreduce(\n self,\n sendbuf: Union[DNDarray, torch.Tensor, Any],\n recvbuf: Union[DNDarray, torch.Tensor, Any],\n op: MPI.Op = MPI.SUM,\n ):\n ret, sbuf, rbuf, buf = self.__reduce_like(self.handle.Allreduce, sendbuf, recvbuf, op)\n if buf is not None and isinstance(buf, torch.Tensor) and buf.is_cuda and not CUDA_AWARE_MPI:\n buf.copy_(rbuf)\n return ret", "def ol_mpi_allreduce(data, operator: Union[int, str, None] = None):\n import numba_mpi\n\n if operator is None or isinstance(operator, nb.types.NoneType):\n op_id = -1 # value will not be used\n elif isinstance(operator, nb.types.misc.StringLiteral):\n op_id = Operator.id(operator.literal_value)\n elif isinstance(operator, nb.types.misc.Literal):\n op_id = int(operator)\n else:\n raise RuntimeError(\"`operator` must be a literal type\")\n\n @register_jitable\n def _allreduce(sendobj, recvobj, operator: Union[int, str, None] = None) -> int:\n \"\"\"helper function that calls `numba_mpi.allreduce`\"\"\"\n if operator is None:\n return numba_mpi.allreduce(sendobj, recvobj) # type: ignore\n else:\n return numba_mpi.allreduce(sendobj, recvobj, op_id) # type: ignore\n\n if isinstance(data, types.Number):\n\n def impl(data, operator: Union[int, str, None] = None):\n \"\"\"reduce a single number across all cores\"\"\"\n sendobj = np.array([data])\n recvobj = np.empty((1,), sendobj.dtype)\n status = _allreduce(sendobj, recvobj, operator)\n assert status == 0\n return recvobj[0]\n\n elif isinstance(data, types.Array):\n\n def impl(data, operator: Union[int, str, None] = None):\n \"\"\"reduce an array across all cores\"\"\"\n recvobj = np.empty(data.shape, data.dtype)\n status = _allreduce(data, recvobj, operator)\n assert status == 0\n return recvobj\n\n else:\n raise TypeError(f\"Unsupported type {data.__class__.__name__}\")\n\n return impl", "def imap(self, iterable):\n def get_results():\n \"\"\"Get a result from the worker output queue and try to yield\n results back to the caller.\n\n This yields results back in the order of their associated tasks.\n \"\"\"\n self._recv_result() # blocks\n tasks = self._tasks_in_progress\n results = self._task_results_waiting\n\n for task_id in tasks.keys():\n if task_id not in results:\n break\n\n del tasks[task_id]\n result = results.pop(task_id)\n yield result.value\n\n for result in self._map_to_workers(iterable, get_results):\n yield result", "def runSlaveRun():\n\n np.set_printoptions(linewidth=1000)\n function = None\n options = None\n\n # print(\"Process {}/{} reporting for duty!\".format(rank, size))\n\n function = comm.bcast(function, root=0)\n arguments = comm.scatter(options, root=0)\n\n results = function(*arguments)\n\n comm.Barrier()\n comm.gather(results, root=0)\n comm.Disconnect()", "def _data_parallel_master(self, intermediates):\n\n # Always using same \"device order\" makes the ReduceAdd operation faster.\n # Thanks to:: Tete Xiao (http://tetexiao.com/)\n intermediates = sorted(intermediates, key=lambda i: i[1].sum.get_device())\n\n to_reduce = [i[1][:2] for i in intermediates]\n to_reduce = [j for i in to_reduce for j in i] # flatten\n target_gpus = [i[1].sum.get_device() for i in intermediates]\n\n sum_size = sum([i[1].sum_size for i in intermediates])\n sum_, ssum = ReduceAddCoalesced.apply(target_gpus[0], 2, *to_reduce)\n mean, inv_std = self._compute_mean_std(sum_, ssum, sum_size)\n\n broadcasted = Broadcast.apply(target_gpus, mean, inv_std)\n\n outputs = []\n for i, rec in enumerate(intermediates):\n outputs.append((rec[0], _MasterMessage(*broadcasted[i * 2:i * 2 + 2])))\n\n return outputs", "def mpi_schedule_job_array(csvstore, job_array, mpi_service=MPIService()):\n param_array = job_array.param_array\n job = job_array.job\n try:\n if mpi_service.rank == 0:\n # master\n results = []\n nb_completed_tasks = 0\n nb_tasks = len(param_array)\n for i in range(1, mpi_service.size):\n if len(param_array) > 0:\n task_param = param_array.pop(0)\n mpi_service.comm.send([job, task_param], dest=i, tag=0)\n while nb_completed_tasks < nb_tasks:\n [slave_rank, [start, end, result]] = mpi_service.comm.recv(source=MPI.ANY_SOURCE, tag=0)\n results += result\n nb_completed_tasks += 1\n if len(param_array) > 0:\n task_param = param_array.pop(0)\n mpi_service.comm.send([job, task_param], dest=slave_rank, tag=0)\n print \"All tasks sent\"\n try:\n kill_slaves(mpi_service)\n except Exception as inst:\n print inst\n print \"All tasks completed\"\n return results\n else:\n # slave\n mpi_status = MPI.Status()\n while 1:\n # waiting sending works by master\n print 'Slave ' + str(mpi_service.rank) + ' is ready...'\n [task_job, task_param] = mpi_service.comm.recv(source=0, tag=MPI.ANY_TAG, status=mpi_status)\n if mpi_status.Get_tag() == 1:\n print 'Closed rank ' + str(mpi_service.rank)\n break\n start_date = datetime.datetime.now()\n result = task_job(task_param)\n end_date = datetime.datetime.now()\n print mpi_service.rank, task_param, \"ended\"\n mpi_service.comm.send([mpi_service.rank, [start_date, end_date, result]], dest=0, tag=0)\n\n except:\n if mpi_service.rank == 0:\n print \"Something went wrong, we should log errors.\"\n traceback.print_exc()\n kill_slaves(mpi_service)\n sys.exit(1)", "def getResults(workers):\n results = []\n for worker in workers:\n results += worker.getResults()\n \n return results", "def broadcast(self):\n comm.Barrier()\n\n if rank == 0:\n dim = np.array([self.nspin, self.nkpt, self.nband], dtype=np.int)\n else:\n dim = np.empty(3, dtype=np.int)\n\n comm.Bcast([dim, MPI.INT])\n\n if rank != 0:\n self.EIG = np.empty(dim, dtype=np.float64)\n self.Kptns = np.empty((dim[1],3), dtype=np.float64)\n\n comm.Bcast([self.EIG, MPI.DOUBLE])\n comm.Bcast([self.Kptns, MPI.DOUBLE])", "def test_run_simulator_with_processes_and_numpy_array():\n cluster = LocalCluster(n_workers=2, processes=True, threads_per_worker=1)\n simulator = Simulator(model, sim_shapes=dict(x=(10,)), cluster=cluster)\n\n pars = np.random.random((100, 2))\n sims = dict(x=np.zeros((100, 10)))\n sim_status = np.full(100, SimulationStatus.RUNNING, dtype=np.int)\n\n simulator.run(\n pars=pars,\n sims=sims,\n sim_status=sim_status,\n indices=np.arange(100, dtype=np.int),\n collect_in_memory=True,\n batch_size=20,\n )\n\n assert np.all(sim_status == SimulationStatus.FINISHED)\n assert not np.all(np.isclose(sims[\"x\"].sum(axis=1), 0.0))\n simulator.client.close()\n cluster.close()", "def __call__(self, results):\n\n results = self._mixup_transform(results)\n return results", "def process(self, results):\n raise NotImplementedError", "def _recv(self):\n\n self.had_recv_error = []\n self.recv_exc = {}\n results = []\n import sys;\n #only listen on workers involved in calculation.\n for worker in self.workers[:self.Nsent]:\n if worker in self.had_send_error:\n results.append(None)\n else:\n try:\n sys.stdout.flush()\n results.append(worker.recv())\n except sync_cluster.RemoteError:\n import sys\n err = sys.exc_info()[1]\n # Force the err msg (err[1]) to be a string.\n # This dimishes info content, but makes sure\n # that the sames errors are hashed correctly\n # in the dictionary. (does it?)\n err_type,err_msg, err_traceback = err\n err = err_type,str(err_msg), err_traceback\n self.had_recv_error.append(worker)\n try: self.recv_exc[err].append(worker.id)\n except: self.recv_exc[err] = [worker.id]\n results.append(None)\n except sync_cluster.RemoteCrashError:\n # Gotta be more intelligent here...\n msg = 'Error! Remote worker %d appears to have crashed.' \\\n % worker.id\n raise sync_cluster.RemoteCrashError,msg\n # else handle other errors\n #print\n return tuple(results)", "def gather_qpt_function(self, func_name, *args, **kwargs):\n partial = self.gather_qpt_function_me(func_name, *args, **kwargs)\n\n if i_am_master:\n\n # Contruct an array with the shape of partial,\n # adding a dimension of length nqpt.\n total = np.zeros([self.nqpt] + list(partial.shape[1:]),\n dtype=partial.dtype)\n\n for i, arr in enumerate(partial):\n total[i,...] = arr[...]\n\n active_ranks = self.get_active_ranks()\n if len(active_ranks) > 1:\n for irank in active_ranks[1:]:\n partial = comm.recv(source=irank, tag=irank)\n for arr in partial:\n i += 1\n total[i,...] = arr[...]\n\n elif self.active_worker:\n comm.send(partial, dest=0, tag=rank)\n return\n\n else:\n return\n\n # Now I could broadcast the total result to all workers\n # but right now there is no need to.\n\n return total", "def mpi_allreduce(data, operator: Union[int, str, None] = None):\n if operator:\n return MPI.COMM_WORLD.allreduce(data, op=Operator.operator(operator))\n else:\n return MPI.COMM_WORLD.allreduce(data)", "def Reduce(\n self,\n sendbuf: Union[DNDarray, torch.Tensor, Any],\n recvbuf: Union[DNDarray, torch.Tensor, Any],\n op: MPI.Op = MPI.SUM,\n root: int = 0,\n ):\n ret, sbuf, rbuf, buf = self.__reduce_like(self.handle.Reduce, sendbuf, recvbuf, op, root)\n if buf is not None and isinstance(buf, torch.Tensor) and buf.is_cuda and not CUDA_AWARE_MPI:\n buf.copy_(rbuf)\n return ret", "def all_reduce_data(grads, comm, rank, size):\r\n\r\n sendbuf_grads = []\r\n shapes = []\r\n\r\n # flatten each gradient from ndarray to list\r\n # store the shapes\r\n for grad in grads:\r\n shapes.append(grad.shape)\r\n sendbuf_grads += grad.flatten().tolist()\r\n\r\n # list is immutable and thus cannot be changed inplace by allreduce\r\n # need to convert lists to buffer-like ndarrays\r\n sendbuf_grads = np.array(sendbuf_grads)\r\n recvbuf_grads = np.zeros(len(sendbuf_grads))\r\n comm.Allreduce(sendbuf_grads, recvbuf_grads)\r\n\r\n # recover to a list of correctly shaped ndarray\r\n reduced_grads = []\r\n start = 0\r\n for shape in shapes:\r\n # NOTE: np.prod returns random result when overflow!\r\n num_elems = np.prod(shape)\r\n curr_elems = recvbuf_grads[start:start+num_elems]\r\n reduced_grads.append(np.array(curr_elems).reshape(shape))\r\n start += num_elems\r\n\r\n return reduced_grads\r\n\r\n sendbuf_grads = []\r\n shapes = []\r\n\r\n # flatten each gradient from ndarray to list\r\n # store the shapes\r\n for grad in grads:\r\n shapes.append(grad.shape)\r\n sendbuf_grads += grad.flatten().tolist()\r\n\r\n # list is immutable and thus cannot be changed inplace by allreduce\r\n # need to convert lists to buffer-like ndarrays\r\n sendbuf_grads = np.array(sendbuf_grads)\r\n recvbuf_grads = np.zeros(len(sendbuf_grads))\r\n comm.Allreduce(sendbuf_grads, recvbuf_grads)\r\n\r\n # recover to a list of correctly shaped ndarray\r\n reduced_grads = []\r\n start = 0\r\n for shape in shapes:\r\n # NOTE: np.prod returns random result when overflow!\r\n num_elems = np.prod(shape)\r\n curr_elems = recvbuf_grads[start:start + num_elems]\r\n reduced_grads.append(np.array(curr_elems).reshape(shape))\r\n start += num_elems\r\n\r\n return reduced_grads", "def _allreduce(sendobj, recvobj, operator: Union[int, str, None] = None) -> int:\n if operator is None:\n return numba_mpi.allreduce(sendobj, recvobj) # type: ignore\n else:\n return numba_mpi.allreduce(sendobj, recvobj, op_id) # type: ignore", "def test_distributed_compute(local_registry, loop, dask_array, numpy_array):\n q = local_registry.Quantity(dask_array, units_)\n\n with cluster() as (s, [a, b]):\n with Client(s[\"address\"], loop=loop):\n comps = add_five(local_registry, q)\n res = comps.compute()\n\n assert np.all(res.m == numpy_array)\n assert not dask.is_dask_collection(res)\n assert res.units == units_\n\n assert q.magnitude is dask_array", "def broadcast(data: T, root: int) -> T:\n return collective.broadcast(data, root)", "def master(group_name: str, sum_worker_number: int, multiply_worker_number: int, is_immediate: bool = False):\n proxy = Proxy(\n group_name=group_name,\n component_type=\"master\",\n expected_peers={\n \"sum_worker\": sum_worker_number,\n \"multiply_worker\": multiply_worker_number,\n },\n )\n\n sum_list = np.random.randint(0, 10, 100)\n multiple_list = np.random.randint(1, 10, 20)\n print(\"Generate random sum/multiple list with length 100.\")\n\n # Assign sum tasks for summation workers.\n destination_payload_list = []\n for idx, peer in enumerate(proxy.peers[\"sum_worker\"]):\n data_length_per_peer = int(len(sum_list) / len(proxy.peers[\"sum_worker\"]))\n destination_payload_list.append((peer, sum_list[idx * data_length_per_peer : (idx + 1) * data_length_per_peer]))\n\n # Assign multiply tasks for multiplication workers.\n for idx, peer in enumerate(proxy.peers[\"multiply_worker\"]):\n data_length_per_peer = int(len(multiple_list) / len(proxy.peers[\"multiply_worker\"]))\n destination_payload_list.append(\n (peer, multiple_list[idx * data_length_per_peer : (idx + 1) * data_length_per_peer]),\n )\n\n if is_immediate:\n session_ids = proxy.iscatter(\n tag=\"job\",\n session_type=SessionType.TASK,\n destination_payload_list=destination_payload_list,\n )\n # Do some tasks with higher priority here.\n replied_msgs = proxy.receive_by_id(session_ids, timeout=-1)\n else:\n replied_msgs = proxy.scatter(\n tag=\"job\",\n session_type=SessionType.TASK,\n destination_payload_list=destination_payload_list,\n timeout=-1,\n )\n\n sum_result, multiply_result = 0, 1\n for msg in replied_msgs:\n if msg.tag == \"sum\":\n print(f\"{proxy.name} receive message from {msg.source} with the sum result {msg.body}.\")\n sum_result += msg.body\n elif msg.tag == \"multiply\":\n print(f\"{proxy.name} receive message from {msg.source} with the multiply result {msg.body}.\")\n multiply_result *= msg.body\n\n # Check task result correction.\n assert sum(sum_list) == sum_result\n assert np.prod(multiple_list) == multiply_result", "def scatter_work(array, mpi_rank, mpi_size, root=0, dtype=np.int32):\n if mpi_rank == root:\n print(f\"Scattering array to {mpi_size} ranks\")\n scatter_total = array.size\n mod = scatter_total % mpi_size\n if mod != 0:\n print(\"Padding array for scattering...\")\n pad = -1 * np.ones(mpi_size - mod, dtype=dtype)\n array = np.concatenate((array, pad))\n scatter_total += mpi_size - mod\n assert scatter_total % mpi_size == 0\n assert scatter_total == array.size\n else:\n scatter_total = None\n\n scatter_total = comm.bcast(scatter_total, root=root)\n subset = np.empty(scatter_total//mpi_size, dtype=dtype)\n comm.Scatter(array, subset, root=root)\n\n return subset", "def worker(self, q, return_dict):\n pid = os.getpid()\n while True:\n qqq = q.get()\n if qqq == 'DONE':\n # print('proc =', os.getpid())\n break\n\n (idx, d) = qqq\n mol_id = d[0]\n smi = d[1]\n # print screening processing in every pout step\n if self.pout != 0:\n if idx % self.pout == self.pout-1:\n print(\"processing: \", idx+1, flush=True)\n result_dict = self.simulation_process(idx, mol_id, smi, pid)\n return_dict[idx] = result_dict", "def mpisync(func, comm=MPI.COMM_WORLD):\n def mpifunc(*args, **kwargs):\n if comm.Get_rank() == 0:\n res = func(*args, **kwargs)\n else:\n res = None\n res = comm.bcast(res, root=0)\n return res\n return mpifunc", "def broadcast(self, new_par):\n for client in self.clients:\n client.recv(new_par.copy())", "def alltoall_sendbuffer(\n self, obj: torch.Tensor\n ) -> List[Union[MPI.memory, Tuple[int, int], MPI.Datatype]]:\n mpi_type = self.__mpi_type_mappings[obj.dtype]\n\n nproc = self.size\n shape = obj.shape\n strides = [1] * len(shape)\n strides[-1] = obj.stride()[-1]\n offsets = [0] * len(shape)\n offsets[1:] = [obj.element_size() * stride for stride in obj.stride()[:-1]]\n\n # Step 1: Wrap along axes > 1 (all axes except send_axis and recv_axis\n for i in range(len(shape) - 1, 1, -1):\n mpi_type = mpi_type.Create_vector(shape[i], 1, strides[i]).Create_resized(0, offsets[i])\n mpi_type.Commit()\n\n # Step 2: Create Custom sized vector datatypes, according to rank-specific size along send_axis\n # send_elements has nproc entries, defining how many vectors of mpi_type are stacked together for each process to receive along the send_axis\n send_elements = np.full((nproc,), obj.shape[1] // nproc)\n send_elements[: obj.shape[1] % nproc] += 1\n\n # Create short_Type from the last entry of send_elements\n mpi_short_type = mpi_type.Create_vector(send_elements[-1], 1, strides[1]).Create_resized(\n 0, offsets[1]\n )\n mpi_short_type.Commit()\n # Create long_Type from the first entry of send_elements (wraps one more mpi_type vector than short_Type\n mpi_long_type = mpi_type.Create_vector(send_elements[0], 1, strides[1]).Create_resized(\n 0, offsets[1]\n )\n mpi_long_type.Commit()\n\n # Step 3: Pack short_type and long_type along the recv_axis\n mpi_short_type = mpi_short_type.Create_vector(shape[0], 1, strides[0]).Create_resized(\n 0, send_elements[-1] * obj.stride()[1] * obj.element_size()\n )\n mpi_short_type.Commit()\n mpi_long_type = mpi_long_type.Create_vector(shape[0], 1, strides[0]).Create_resized(\n 0, send_elements[0] * obj.stride()[1] * obj.element_size()\n )\n mpi_long_type.Commit()\n\n # Step 4: Prepare sencounts, senddispls and sendtypes for alltoallw\n # to each process 1 element (=sendcount) of the custom prepared long or short type will be send\n sendcount = [1] * nproc\n tmp_displs = [0] * nproc\n tmp_displs[1:] = np.cumsum(send_elements[:-1])\n element_size = obj.element_size()\n senddispls = [element_size * obj.stride()[1] * d for d in tmp_displs]\n sendtypes = [mpi_short_type] * nproc\n for i in range(obj.shape[1] % nproc):\n sendtypes[i] = mpi_long_type\n\n return self.as_mpi_memory(obj), (sendcount, senddispls), sendtypes", "def __broadcast_like(\n self, func: Callable, buf: Union[DNDarray, torch.Tensor, Any], root: int\n ) -> Tuple[Optional[DNDarray, torch.Tensor]]:\n # unpack the buffer if it is a HeAT tensor\n if isinstance(buf, DNDarray):\n buf = buf.larray\n # convert torch tensors to MPI memory buffers\n if not isinstance(buf, torch.Tensor):\n return func(buf, root), None, None, None\n\n srbuf = buf if CUDA_AWARE_MPI else buf.cpu()\n\n return func(self.as_buffer(srbuf), root), srbuf, srbuf, buf", "def _map_to_workers(self, iterable, result_getter):\n if not self.is_started:\n raise RuntimeError(\"Cannot process inputs: must call start() first.\")\n\n tasks = TaskIterator(iterable)\n task = next(tasks)\n\n while True:\n try:\n self._send_task(task)\n task = next(tasks)\n except Queue.Full:\n for result in result_getter(): # I wish I had `yield from` :(\n yield result\n except StopIteration:\n break\n\n while not self.is_completed:\n for result in result_getter():\n yield result", "def test_compute(local_registry, dask_array, numpy_array):\n q = local_registry.Quantity(dask_array, units_)\n\n comps = add_five(local_registry, q)\n res = comps.compute()\n\n assert np.all(res.m == numpy_array)\n assert not dask.is_dask_collection(res)\n assert res.units == units_\n assert q.magnitude is dask_array", "def _recv(self) -> List[np.ndarray]:", "def get_external_result(self):\n while True:\n if len(self.result_queue) > 0:\n result = copy.deepcopy(self.result_queue[0])\n del self.result_queue[0]\n return result" ]
[ "0.58564526", "0.5782116", "0.5733982", "0.5675335", "0.55820996", "0.55295515", "0.5480815", "0.5480762", "0.543126", "0.542469", "0.54245704", "0.5397666", "0.5393652", "0.53869706", "0.5362338", "0.52945507", "0.52782714", "0.5240479", "0.52378017", "0.52356863", "0.52222115", "0.5201218", "0.5196614", "0.5173303", "0.5160297", "0.51338917", "0.5120369", "0.50927925", "0.50922084", "0.5087859" ]
0.7116765
0
Get the MPI executor pool, with specified number of processes and threads per process.
def get_mpi_pool(num_workers=None, num_threads=1): if (num_workers == 1) and (num_threads == _NUM_THREAD_WORKERS): from concurrent.futures import ProcessPoolExecutor return ProcessPoolExecutor(1) if not QUIMB_MPI_LAUNCHED: raise RuntimeError( "For the moment, quimb programs using `get_mpi_pool` need to be " "explicitly launched using `quimb-mpi-python`.") if USE_SYNCRO: return SynchroMPIPool() if not can_use_mpi_pool(): raise RuntimeError( "`get_mpi_pool()` cannot be explicitly called unless already " "running under MPI, or you set the environment variable " "`QUIMB_MPI_SPAWN=True`.") from mpi4py.futures import MPIPoolExecutor return MPIPoolExecutor(num_workers, main=False, env={'OMP_NUM_THREADS': str(num_threads), 'QUIMB_NUM_MPI_WORKERS': str(num_workers), '_QUIMB_MPI_LAUNCHED': 'SPAWNED'})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_pool(b_dummy=True, num=4):\n if b_dummy:\n pool = ThreadPool(num)\n else:\n pool = ProcessPool(num)\n\n return pool", "def get_executor(max_workers: int) -> Executor:\n return (\n DummyExecutor()\n if max_workers == 1\n else ProcessPoolExecutor(max_workers or None)\n )", "def _get_executor_init(self, workers):\n def pool_fn(seqs):\n pool = get_pool_class(True)(\n workers, initializer=init_pool_generator,\n initargs=(seqs, self.random_seed, get_worker_id_queue()))\n _DATA_POOLS.add(pool)\n return pool\n return pool_fn", "def _get_executor_init(self, workers):\n def pool_fn(seqs):\n pool = get_pool_class(True)(\n workers, initializer=init_pool_generator,\n initargs=(seqs, None, get_worker_id_queue()))\n _DATA_POOLS.add(pool)\n return pool\n\n return pool_fn", "def VariableExecutor(N=None, executor='process', **kwargs):\n \n N = multiprocessing.cpu_count() if N is None else N\n \n if N == 0:\n executor = 'serial'\n\n executors = {\n 'loky': LokyPoolExecutor,\n 'serial': SerialExecutor,\n 'process': ProcessPoolExecutor,\n 'thread': ThreadPoolExecutor,\n 'cuda': CUDAPoolExecutor}\n executor = executors[executor]\n \n log.debug('Launching a {} with {} processes'.format(executor.__name__, N)) \n with executor(N, **kwargs) as pool:\n yield pool", "def __init__(self, parallel_num=4):\n from concurrent.futures import ThreadPoolExecutor\n self.executor = ThreadPoolExecutor(max_workers=parallel_num)", "def new_thread_executor(nthreads=None):\n if nthreads is None:\n nthreads = os.cpu_count() or 4\n return concurrent.futures.ThreadPoolExecutor(nthreads)", "def pool(self) -> asyncpg.pool.Pool:\n return self.bot.pool", "def make_pool(self) -> pool.SimpleConnectionPool:\n\n return pool.SimpleConnectionPool(\n minconn=1, maxconn=self.pool_size, **self._kwargs\n )", "def _get_executor_init(self, workers):\n raise NotImplementedError", "def determine_jobs_per_pool(numpools, totaljobs):\n cluster = os.environ['CC_CLUSTER']\n if cluster in ['graham', 'beluga']:\n jobs_per_pool = math.floor(totaljobs / numpools)\n else:\n jobs_per_pool = totaljobs\n return jobs_per_pool", "def get_pool():\n app = get_app()\n return app['pool']", "def __init__(self, pool_size: float = 10):\n self.pool_size = pool_size", "def pool(self) -> NodePool:\n\n return self._pool", "def current_worker_pool():\n try:\n return worker_thread_data.pool\n except AttributeError:\n return None", "def get_num_parallel_workers():\n return _config.get_num_parallel_workers()", "def pool(self) -> Pool:\n assert self._pool is not None\n return self._pool", "def create(cls, host, port, num_workers):\n event_loop = asyncio.get_event_loop()\n pool_executor = executor.ProcessPoolExecutorWithInit(\n max_workers=num_workers, initializer=_silence_sigint)\n return cls(host, port, event_loop, pool_executor)", "def init_processes(rank, size, backend='gloo'):\n os.environ['MASTER_ADDR'] = '12.12.10.13'\n os.environ['MASTER_PORT'] = '29500'\n dist.init_process_group(backend, rank=rank, world_size=size)", "def remote_executor_factory(\n channels: List[grpc.Channel],\n thread_pool_executor: Optional[futures.Executor] = None,\n dispose_batch_size: int = 20,\n max_fanout: int = 100,\n default_num_clients: int = 0,\n) -> executor_factory.ExecutorFactory:\n py_typecheck.check_type(channels, list)\n if not channels:\n raise ValueError('The list of channels cannot be empty.')\n if thread_pool_executor is not None:\n py_typecheck.check_type(thread_pool_executor, futures.Executor)\n py_typecheck.check_type(dispose_batch_size, int)\n py_typecheck.check_type(max_fanout, int)\n py_typecheck.check_type(default_num_clients, int)\n\n remote_executors = []\n for channel in channels:\n remote_executors.append(\n remote_executor.RemoteExecutor(\n channel=channel,\n thread_pool_executor=thread_pool_executor,\n dispose_batch_size=dispose_batch_size))\n\n def _flat_stack_fn(cardinalities):\n num_clients = cardinalities.get(placements.CLIENTS, default_num_clients)\n return _configure_remote_workers(num_clients, remote_executors)\n\n unplaced_ex_factory = UnplacedExecutorFactory(use_caching=False)\n composing_executor_factory = ComposingExecutorFactory(\n max_fanout=max_fanout,\n unplaced_ex_factory=unplaced_ex_factory,\n flat_stack_fn=_flat_stack_fn,\n )\n\n return ReconstructOnChangeExecutorFactory(\n underlying_stack=composing_executor_factory,\n ensure_closed=remote_executors,\n change_query=_CardinalitiesOrReadyListChanged(\n maybe_ready_list=remote_executors))", "def pool(self):\n return self._properties.get('pool')", "def init_processes(rank, run_id, hosts, backend='gloo'):\n hosts = hosts.split(',')\n os.environ['MASTER_ADDR'] = hosts[0] # first worker is the master worker\n os.environ['MASTER_PORT'] = '29500'\n world_size = len(hosts)\n os.environ['WORLD_SIZE'] = str(world_size)\n os.environ['RANK'] = str(rank)\n dist.init_process_group(backend, rank=rank, world_size=world_size)\n run(rank, world_size, run_id)", "def get_worker_processes(f, args, nproc=None, allow_scalar=False):\n\n import multiprocessing\n num_procs = get_num_processors(nproc)\n\n workers = [\n multiprocessing.Process(target=f, args=args) for _ in range(num_procs)\n ]\n if allow_scalar and len(workers) == 1:\n return workers[0]\n else:\n return workers", "def get_pool(self):\n try:\n return self._pool\n except AttributeError:\n db_url = getattr(settings, self.name)\n self._pool = PostgresConnectionPool.for_url(db_url)\n return self._pool", "def get_pool ( self ):\n if self._poolstack:\n return self._poolstack[-1]\n else:\n return self.get_new_pool ( force=True )", "def _repopulate_pool(self):\n for i in range(self._processes - len(self._pool)):\n w = self.Process(target=worker,\n args=(self._inqueue, self._outqueue,\n self._initializer,\n self._initargs, self._maxtasksperchild,\n self._wrap_exception,\n self._finalizer,\n self._finalargs)\n )\n self._pool.append(w)\n w.name = w.name.replace('Process', 'PoolWorker')\n w.daemon = True\n w.start()\n util.debug('added worker')", "def get_connection_pool(self, params):\r\n cp_params = dict(params)\r\n cp_params.update(self.pool_cls_kwargs)\r\n return self.pool_cls(**cp_params)", "def __init__(self, num_workers, eval_function, timeout=None, maxtasksperchild=None):\n self.eval_function = eval_function\n self.timeout = timeout\n self.pool = Pool(processes=num_workers, maxtasksperchild=maxtasksperchild)", "def __init__(self, pool_size):\n \n self.pool_size=pool_size;", "def make_pool(num_snp):\r\n\tc=0\r\n\tpool=[]\r\n\tfor i in xrange(0,num_snp+1):\r\n\t\ts=make_str(i, num_snp)\r\n\t\tpool+=map(\"\".join, itertools.permutations(s, num_snp))\r\n\treturn list(set(pool))" ]
[ "0.71476984", "0.6638191", "0.6473073", "0.6456225", "0.6444362", "0.61933714", "0.6124338", "0.61074543", "0.6093102", "0.5918116", "0.5846336", "0.5798279", "0.5750463", "0.5717205", "0.56760335", "0.5673281", "0.5671824", "0.5667239", "0.5662297", "0.56489134", "0.5647965", "0.56012124", "0.55950314", "0.5589793", "0.55832016", "0.5566255", "0.5563028", "0.5555959", "0.55491996", "0.5540755" ]
0.7311589
0
Add a new path to look for primitives. The new path will be inserted in the first place of the list, so any primitive found in this new folder will take precedence over any other primitive with the same name that existed in the system before.
def add_primitives_path(path): if path not in _PRIMITIVES_PATHS: if not os.path.isdir(path): raise ValueError('Invalid path: {}'.format(path)) LOGGER.debug('Adding new primitives path %s', path) _PRIMITIVES_PATHS.insert(0, os.path.abspath(path))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_primitives_path(path):\n added = _add_lookup_path(path, _PRIMITIVES_PATHS)\n if added:\n LOGGER.debug('New primitives path added: %s', path)", "def AddPath(self, path):\n self.paths.append(str(path))\n self.paths.sort()", "def _add_one(self, path):\n\n if not type(path).__name__ == \"Path\":\n path = Path(path)\n self._entries.append(path)\n self._clean = False\n self._current = 0", "def add(self, path):\r\n return self.paths.append(path)", "def add(path):\n print(uc.add(path))", "def path(self, new_path):\n if new_path == self.path:\n return\n\n self._path.append(new_path)", "def check_path(self, primitives):\n new_list = []\n mypath = self.get_directory()\n\n for primitive in primitives:\n mypath = mypath + '/'\n primitive_file_name = primitive + '.json'\n if(mypath in primitive and os.path.exists(primitive_file_name)):\n new_list.append(primitive)\n elif(os.path.exists(mypath + primitive_file_name)):\n new_list.append(mypath + primitive)\n if new_list == []:\n raise ValueError(primitives, 'is not found in MLprimitives.')\n return new_list", "def add_path(self, path_name):\n path = PathInfo()\n path._path = path_name\n self._paths.append(path)\n return path", "def test_addPath(self):\n g = Garden()\n g.addPath('foo', 'v1', [\n ('bar', 'a'),\n ('cow', 'b'),\n ])\n self.assertEqual(list(g.pathsRequiring('bar', 'a')), [('foo', 'v1')])\n self.assertEqual(list(g.pathsRequiring('cow', 'b')), [('foo', 'v1')])\n self.assertEqual(list(g.inputsFor('foo', 'v1')), [\n [('bar', 'a'), ('cow', 'b')]\n ])", "def padd(self, path, pos=0):\n path = os.path.normpath(path) # remove double slashes and stuff\n if path in self.path_list:\n print(path, \"already exists. Not appending to \", self.name)\n elif os.path.exists(path):\n self.path_list.insert(pos, path)\n else:\n print(path, \"does not exist! Not appending to \", self.name)\n return\n self.pupdate()", "def append(self, path):\n self.paths.append(path)\n self.time += path.time", "def add_path(self, path, path_item):\n if path not in self._swagger:\n self._swagger[path] = path_item\n else:\n for method, definition in path_item.items():\n if definition is not None:\n setattr(self._swagger[path], method, definition)", "def add(self,path):\n path = os.path.abspath(path)\n self.files[path] = None\n return True", "def add_path(self, path):\n _dir = self._add_dir()\n _special_mkdir('%s/%s' % (self.path, _dir), self.umask)\n return self._add_path(path, _dir)", "def insertNewPath(self, newPath):\n\n indice = self.index\n for state in newPath:\n self.path.insert(indice, state)\n indice += 1", "def add_dir(self, path):\n assert self._root_dir == path or self._root_dir.is_parent_of(path)\n self._dirs.append(path)", "def __add__ (self, path):\n if path:\n return self.__class__ (self.moniker + sep + path)\n else:\n #\n # path is unlikely to be empty for a specific call,\n # but this functionality is used by the create and\n # delete methods which refer to the existing class\n # by default.\n #\n return self", "def add_to_preset_path(self, path):\n if os.path.isfile(path):\n path = os.path.dirname(path)\n self.preset_path.append(path)", "def AddPath(self, path):\n\n if not self.db.GetOneRow('select * from paths where path=\"%s\";'\n % path):\n self.db.ExecuteSql('insert into paths(path, track_id) values(\"%s\", %d);'\n %(path, self.persistant['id']))\n self.db.ExecuteSql('commit;')\n return True\n\n elif FLAGS.verbose:\n print 'Path already stored'\n\n return False", "def putPath(self, path, pathname):\n self.paths[path] = pathname", "def add_path(self):\n\n filename = filedialog.askdirectory(initialdir='/', title='Select Directory')\n self.path_listbox.insert(tk.END, filename)\n self.set_check_paths()", "def main(\n path: str,\n folder: str,\n head: bool,\n separator: str,\n remove_duplicates: bool,\n remove_non_folders: bool,\n remove_non_abs: bool,\n) -> None:\n new_path = common.add(\n path=path,\n folder=folder,\n head=head,\n separator=separator,\n remove_duplicates=remove_duplicates,\n remove_non_folders=remove_non_folders,\n remove_non_abs=remove_non_abs,\n )\n print(new_path)", "def AddPath(*args, **kwargs):\n return _gdi_.GraphicsPath_AddPath(*args, **kwargs)", "def add(self,path):\n out, err, code = self.command( [\"git\", \"add\", path], self.directory )", "def add_to_path(self, additional_path):\n\n return self.__class__(f\"{additional_path}/{self.full_path}\")", "def set(self, new_path):\n\n for i in range(self.depth):\n self.path[i] = new_path[self.max_input*i:self.max_input*(i + 1)]", "def addPath(self, path=[]):\n if path:\n path = [\n self.transform_reverse.transformPoints(pts) for pts in path\n ]\n if self.trace:\n self.path.append(path)\n else:\n self.drawPath(path)", "def addItem(self, path):\n self._editor.addItem(path)", "def add_folder(self, path):\n import os\n\n if path not in self.folders:\n self.folders.append(path)\n if not os.path.exists(path):\n os.makedirs(path)", "def add(self, path, value):\n self.frontier_index += 1 # get a new unique index\n heapq.heappush(self.frontierpq,(value, -self.frontier_index, path))" ]
[ "0.7619783", "0.70127213", "0.69399655", "0.6817209", "0.6505161", "0.63751376", "0.6185189", "0.6095576", "0.60904014", "0.60371226", "0.59809875", "0.5979583", "0.5973543", "0.5948789", "0.5947534", "0.5808803", "0.5796138", "0.57915574", "0.5779705", "0.5742069", "0.5641845", "0.5620303", "0.56124854", "0.56050897", "0.5582306", "0.55747646", "0.5570092", "0.5568409", "0.55661607", "0.5561706" ]
0.79477745
0
Get the list of folders where the primitives will be looked for. This list will include the value of any `entry_point` named `jsons_path` published under the name `mlprimitives`.
def get_primitives_paths(): primitives_paths = list() entry_points = pkg_resources.iter_entry_points('mlprimitives') for entry_point in entry_points: if entry_point.name == 'jsons_path': path = entry_point.load() primitives_paths.append(path) return _PRIMITIVES_PATHS + primitives_paths
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_primitives_paths():\n paths = _load_entry_points('primitives') + _load_entry_points('jsons_path', 'mlprimitives')\n return _PRIMITIVES_PATHS + paths", "def shapes():\n # -- Define a list of locations to search for, starting by\n # -- adding in our builtin shape locations\n paths = [\n os.path.join(\n os.path.dirname(os.path.dirname(__file__)),\n 'shapes',\n ),\n ]\n\n # -- If we have any paths defined by environment\n # -- variables we should add them here\n if constants.PLUGIN_ENVIRONMENT_VARIABLE in os.environ:\n paths.extend(\n os.environ[constants.PLUGIN_ENVIRONMENT_VARIABLE].split(';'),\n )\n\n shape_list = list()\n\n for path in paths:\n for root, _, files in os.walk(path):\n for filename in files:\n if filename.endswith('.json'):\n shape_list.append(\n os.path.join(\n root,\n filename,\n ),\n )\n\n return shape_list", "def check_path(self, primitives):\n new_list = []\n mypath = self.get_directory()\n\n for primitive in primitives:\n mypath = mypath + '/'\n primitive_file_name = primitive + '.json'\n if(mypath in primitive and os.path.exists(primitive_file_name)):\n new_list.append(primitive)\n elif(os.path.exists(mypath + primitive_file_name)):\n new_list.append(mypath + primitive)\n if new_list == []:\n raise ValueError(primitives, 'is not found in MLprimitives.')\n return new_list", "def get_primitives_used(self, pipeline_run):\n primitives = []\n for step in pipeline_run['steps']:\n primitives.append(step['primitive']['python_path'])\n return primitives", "def owncloud_folder_list(node_addon, user_addon, **kwargs):\n path = request.args.get('path')\n return node_addon.get_folders(path=path)", "def get_folder_list():\n if exists_key_store('folders:list'):\n return get_key_store('folders:list')\n else:\n # initialize folder list with root (All)\n set_key_store('folders:counter', 0)\n rpush_key_store('folders:list', {'id': 0, 'parent': -1, 'name': 'All'})\n return get_key_store('folders:list')", "def get_paths(input_folder):\n list_files = []\n conll_folder = glob.glob(input_folder + '/*.json')\n \n for filename in conll_folder:\n list_files.append(filename)\n\n return list_files", "def folder_type(self):\n types = []\n for type in self.folders_type:\n types.append(type)\n return types", "def getFolders(self):\n\n folders = []\n\n for folder in self.metaData.jsonObj['folders']:\n f = HyperLinkResource(folder)\n folders.append(Folder(self._client, f.selfLink))\n\n return folders", "def lib_dirs(self):\r\n ret = []\r\n for x in [y.type for y in self.variables] + [\r\n y.op for y in self.node_order]:\r\n try:\r\n ret += x.c_lib_dirs()\r\n except utils.MethodNotDefined:\r\n pass\r\n return utils.uniq(ret)", "def list_vm_folders(cls, container, datacenter):\n obj = Query.get_obj(container, datacenter)\n folders = []\n\n if hasattr(obj, 'vmFolder'):\n for folder in obj.vmFolder.childEntity:\n if hasattr(folder, 'childType'):\n folders.append(folder.name)\n if hasattr(folder, 'childEntity'):\n for item in folder.childEntity:\n if hasattr(item, 'childType'):\n folders.append(item.parent.name+' -> '+item.name)\n return folders", "def get_folder_list(args):\n\tif not args.folders:\n\t\treturn None\n\n\tif os.path.isfile(args.folders):\n\t\treturn [x.strip() for x in list(open(args.folders, 'r'))]\n\n\telse:\n\t\treturn [x.strip() for x in args.folders.split(',')]", "def _include_dir_list_yaml(\n loader: SafeLineLoader, node: yaml.nodes.Node\n) -> List[JSON_TYPE]:\n loc = os.path.join(os.path.dirname(loader.name), node.value)\n return [load_yaml(f) for f in _find_files(loc, \"*.yaml\")]", "def get_all_paths(why = 'train'):\r\n if why == 'train':\r\n parent_folder = train_parent_folder\r\n if why == 'test':\r\n parent_folder = test_test_folder\r\n sub_folders = glob.glob(parent_folder) # Directories of all languages\r\n image_paths = [glob.glob(sub_folder + '\\*') for sub_folder in sub_folders] # Directories of all characters\r\n image_paths = sum(image_paths, []) # Flatten out the 2D list to a 1D list \r\n return image_paths", "def _get_implicit_folder_imports(self) -> list:\r\n implicit_paths: list = []\r\n\r\n if self.folders_node is None:\r\n return []\r\n\r\n def try_append_path(path: str) -> None:\r\n if os.path.isdir(path) and path not in self.import_paths:\r\n implicit_paths.append(path)\r\n\r\n for folder_node in filter(is_folder_node, self.folders_node):\r\n folder_path: str = os.path.normpath(folder_node.text)\r\n try_append_path(folder_path if os.path.isabs(folder_path) else os.path.join(self.project_path, folder_path))\r\n\r\n return PathHelper.uniqify(implicit_paths)", "def get_run_folders():\n return [os.path.join(f, sf) for f in get_date_folders() for sf in os.listdir(f)]", "def get_model_folders(main_folder):\n folders = []\n for d, sub_ds, files in os.walk(main_folder):\n for sub_d in sub_ds:\n contents = os.listdir(os.path.join(d,sub_d))\n for content in contents:\n if \".pt\" in content:\n folders.append(sub_d)\n break\n return sorted(folders,key=lambda x: int(x.split(\"/\")[-1].split(\"_\")[1]))", "def listFolders(folderRoot):\n return os.listdir(folderRoot)", "def libraryFolders() -> list:\n\tpaths = [steamDir() + '/steamapps/'] # create a list for library paths\n\ttry:\n\t\t# open the file that contains the library paths\n\t\twith open(steamDir() + '/steamapps/libraryfolders.vdf', 'r') as file:\n\t\t\tlibrary = Property.parse(file, 'libraryfolders.vdf').as_dict()\n\t\t\t# remove useless stuff\n\t\t\tlibrary['libraryfolders'].pop('timenextstatsreport')\n\t\t\tlibrary['libraryfolders'].pop('contentstatsid')\n\texcept Exception as e:\n\t\traise ConfigError(f'Error while reading steam library file: {e}')\n\n\t# check for other library paths, if the dict is empty, there's no one\n\tif len( library['libraryfolders'] ) != 0:\n\t\tfor i in range( len( library['libraryfolders'] ) ):\n\t\t\tpaths.append( library['libraryfolders'][ i ] + '/steamapps/' ) # append the path\n\n\t# return the \"compiled\" list of libraries\n\treturn paths", "def demo_paths(self):\n base_path = os.path.join(self.module.__path__[0], 'demo')\n paths = []\n if os.path.isdir(base_path):\n for item in os.listdir(base_path):\n # TODO: support examples which is not auto-loaded\n if not os.path.isdir(os.path.join(base_path, 'examples')):\n paths.append(os.path.join(base_path, item))\n return paths", "def list_dirs(self):\n return self.list_groups()", "def get_skins_and_extensions(base_dir):\n ext_paths = []\n for subdir in ['extensions', 'skins']:\n for name in os.listdir(os.path.join(base_dir, subdir)):\n if os.path.isdir(os.path.join(base_dir, subdir, name)):\n ext_paths.append(os.path.join(subdir, name))\n return ext_paths", "def _GetResourceLoaders():\n loaders = []\n\n # Add all paths to list if they are specified on the command line (will warn\n # if any are invalid).\n # Otherwise add members of the default list iff they exist.\n if FLAGS['data_search_paths'].present:\n for path in FLAGS.data_search_paths:\n loaders.append(FileResourceLoader(path))\n else:\n for path in FLAGS.data_search_paths:\n if os.path.isdir(path):\n loaders.append(FileResourceLoader(path))\n loaders.extend(DEFAULT_RESOURCE_LOADERS)\n return loaders", "def folders(self):\n from office365.sharepoint.folders.folder_collection import FolderCollection\n return self.properties.get(\"Folders\",\n FolderCollection(self.context, ResourcePath(\"Folders\", self.resource_path)))", "def paths(self):\n rc = []\n for pg in self.path_groups:\n rc.extend(pg.paths)\n return rc", "def syspaths(self):\n res = []\n for path, jsmodule in self.jsmodules.items():\n if jsmodule.js_lib_path != \"\":\n js_lib_path = os.path.dirname(jsmodule.js_lib_path.rstrip(\"/\")) # get parent\n if not js_lib_path in res:\n res.append(js_lib_path)\n return res", "def compute_theme_directories(self) -> list[str]:\n lm = g.app.loadManager\n table = lm.computeThemeDirectories()[:]\n directory = g.os_path_normslashes(g.app.theme_directory)\n if directory and directory not in table:\n table.insert(0, directory)\n # All entries are known to exist and have normalized slashes.\n return table", "def toolpaths_list(self) -> List[dict]:\n self.__logger.debug('Eva.toolpaths_list called')\n return self.__http_client.toolpaths_list()", "def listdirs(self):\n return self.list_groups()", "def dir_list(load):\n if \"env\" in load:\n # \"env\" is not supported; Use \"saltenv\".\n load.pop(\"env\")\n\n ret = []\n\n if \"saltenv\" not in load:\n return ret\n\n saltenv = load[\"saltenv\"]\n metadata = _init()\n\n if not metadata or saltenv not in metadata:\n return ret\n\n # grab all the dirs from the buckets cache file\n for bucket in _find_dirs(metadata[saltenv]):\n for dirs in bucket.values():\n # trim env and trailing slash\n dirs = _trim_env_off_path(dirs, saltenv, trim_slash=True)\n # remove empty string left by the base env dir in single bucket mode\n ret += [_f for _f in dirs if _f]\n\n return ret" ]
[ "0.7141229", "0.6743173", "0.6618696", "0.615481", "0.6012607", "0.59987366", "0.5982601", "0.5922148", "0.5847463", "0.582099", "0.5768875", "0.57664824", "0.5757282", "0.56904095", "0.5656275", "0.56554615", "0.56318593", "0.5618304", "0.5606496", "0.55484414", "0.554087", "0.5521658", "0.55205685", "0.5510712", "0.5505972", "0.54784435", "0.5468451", "0.5466911", "0.5450803", "0.5438875" ]
0.76567644
0
Locate and load the JSON annotation of the given primitive. All the paths found in PRIMTIVE_PATHS will be scanned to find a JSON file with the given name, and as soon as a JSON with the given name is found it is returned.
def load_primitive(name): for base_path in get_primitives_paths(): parts = name.split('.') number_of_parts = len(parts) for folder_parts in range(number_of_parts): folder = os.path.join(base_path, *parts[:folder_parts]) filename = '.'.join(parts[folder_parts:]) + '.json' json_path = os.path.join(folder, filename) if os.path.isfile(json_path): with open(json_path, 'r') as json_file: LOGGER.debug('Loading primitive %s from %s', name, json_path) return json.load(json_file) raise ValueError("Unknown primitive: {}".format(name))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _load_annotation(json_path):\n # Open the file containing the annotation\n with open(json_path) as annotation_file:\n\n # Parse the AI2D annotation from the JSON file into a dictionary\n annotation = json.load(annotation_file)\n\n # Return the annotation\n return annotation", "def load_json(path, prop_name):\n data_file= open(path)\n data = json.load(data_file)\n prop = data['features'][0]['properties'][prop_name]\n return prop", "def _localloadjson(path: str) -> JSONType:\n with open(path, encoding=\"utf-8\") as fh:\n return json.load(fh)", "def load_primitive(name):\n primitive = _load(name, get_primitives_paths())\n if not primitive:\n raise ValueError(\"Unknown primitive: {}\".format(name))\n\n return primitive", "def load(path):\n \n with codecs.open(path, 'r', **rparams) as f:\n print ' > loading... {}'.format(path)\n if '.json' in path:\n obj = json.load(f, object_hook=json_numpy_obj_hook)\n elif '.pkl' in path:\n obj = pickle.load(file=f)\n else:\n # check the file referenced is sensible\n obj_id = [k for k in flocs.keys() if k in path]\n if obj_id is None or len(obj_id) != 1: raise ValueError(\n '{} not found in the path: \\n {}'.format(flocs.keys(), path))\n return obj", "def load_json(path: Path) -> Any:\n with path.open() as f:\n return json.load(f)", "def load_json(path, name):\n if 'txt' not in name:\n name += '.json'\n with open(os.path.join(path, name), 'r') as json_file:\n return json.load(json_file)", "def read_json(self, json_name):\r\n with open(json_name, 'r') as infile:\r\n self.pet_file = json.load(infile) # load existing json file\r\n self.pet_file_name = json_name # set name to passed name\r", "def fromJSON(self, path='') -> dict:\n try:\n return(importJSON(path))\n except Exception as error:\n print(f\"Error: self.fromJSON({path}) -> {error}\")", "def _load(self, json_str, filepath):\n # pylint: disable=protected-access\n return self.json_o._load(json_str, filepath)", "def pose_json(json_path: str):\n raise NotImplementedError", "def init_panoptic_json(self, json_path: str):\n with open(json_path) as json_file:\n json_data = json.load(json_file)\n for entry in json_data['annotations']:\n self.panoptic_json[entry['image_id']] = entry['segments_info']", "def json_get(name, key):\n cmpd_file = name + '.json'\n data_dirs = [dir for dir in os.listdir('.') if dir.endswith('_data')]\n dir = data_dirs[0]\n for fname in os.listdir(dir):\n if fname.endswith(name + '.json'):\n with open(os.path.join(dir,fname)) as f:\n data = json.load(f)\n return data[key]", "def get_primitives_paths():\n paths = _load_entry_points('primitives') + _load_entry_points('jsons_path', 'mlprimitives')\n return _PRIMITIVES_PATHS + paths", "def get_primitives_paths():\n primitives_paths = list()\n entry_points = pkg_resources.iter_entry_points('mlprimitives')\n for entry_point in entry_points:\n if entry_point.name == 'jsons_path':\n path = entry_point.load()\n primitives_paths.append(path)\n\n return _PRIMITIVES_PATHS + primitives_paths", "def load_json_obj(path: str) -> RAW_CFG:\n with fsspec.open(path) as json_file:\n return json.load(json_file)", "def _load(name, paths):\n for base_path in paths:\n parts = name.split('.')\n number_of_parts = len(parts)\n\n for folder_parts in range(number_of_parts):\n folder = os.path.join(base_path, *parts[:folder_parts])\n filename = '.'.join(parts[folder_parts:]) + '.json'\n json_path = os.path.join(folder, filename)\n\n if os.path.isfile(json_path):\n with open(json_path, 'r') as json_file:\n LOGGER.debug('Loading %s from %s', name, json_path)\n return json.load(json_file)", "def load_file(self):\n self._check_setup()\n json_str = self.get_json_file()\n if json_str is None:\n return\n\n if not self._is_json_str():\n with open(json_str, 'r') as f:\n jf = json.load(f)\n else:\n jf = json.loads(json_str)\n\n\n self.jf = jf\n\n target = jf['target']\n if isinstance(target, str):\n target = eval(target)\n\n goal = jf['goal']\n if isinstance(goal, str):\n goal = eval(goal)\n\n self.gen_target_pos = np.array(target)\n self.gen_goal_pos = np.array(goal)\n\n if 'place_walls' in jf:\n self.place_walls = jf['place_walls']\n\n if self.get_is_rnd():\n self.rnd_map = jf['rnd']\n self.env_jf = jf['env']", "def load_contains(path: str) -> Dict[str, List[str]]:\n with open(path, 'r', encoding='utf8') as f:\n return json.load(f)", "def test_load_path(parser):\n doc = parser.load(pathlib.Path('jsonexamples') / 'small' / 'demo.json')\n doc.at_pointer('/Image/Width')", "def find_shape(name):\n for path in shapes():\n shape_name = os.path.basename(path).replace('.json', '')\n\n if shape_name == name:\n return path\n\n return None", "def open_json(json_path):\n with open('label_map.json', 'r') as f:\n json_dict = json.load(f)\n return json_dict", "def from_path(cls, path: str):\n with open(path) as f:\n return json.load(f)", "def load_json_fixture(filename: str) -> Any:\n return json.loads(load_fixture(f\"jellyfin/{filename}\"))", "def check_path(self, primitives):\n new_list = []\n mypath = self.get_directory()\n\n for primitive in primitives:\n mypath = mypath + '/'\n primitive_file_name = primitive + '.json'\n if(mypath in primitive and os.path.exists(primitive_file_name)):\n new_list.append(primitive)\n elif(os.path.exists(mypath + primitive_file_name)):\n new_list.append(mypath + primitive)\n if new_list == []:\n raise ValueError(primitives, 'is not found in MLprimitives.')\n return new_list", "def __load_json(self, path):\n try:\n with Path(path).open('r') as f:\n return json.load(f)\n except ValueError as ve:\n six.raise_from(ValueError(\"error while loading the fixture %s\" % path), ve)", "def load_json(path):\n import json\n\n def _load_json(*args, **kwargs):\n with open(path) as data_file:\n return json.load(data_file)\n\n return _load_json()", "def load_from_geojson(self, filename_or_url):", "def _load_annotations(self):\n annotation_file = self._filepath(self.ANNOTATION_FILE)\n with open(annotation_file) as f:\n json_data = json.load(f)\n\n return json_data", "def _get_annotation(cls, name):\n return cls.__annotations__.get(name)" ]
[ "0.5557436", "0.5461464", "0.53854406", "0.531877", "0.51922894", "0.5177836", "0.5138669", "0.51056015", "0.51037264", "0.504644", "0.50388736", "0.50041217", "0.49636927", "0.49334192", "0.49191824", "0.4907879", "0.48960394", "0.48271298", "0.48257434", "0.47950384", "0.47693214", "0.47669145", "0.47412547", "0.4738723", "0.4731714", "0.46990013", "0.4691904", "0.46852666", "0.4666567", "0.46399024" ]
0.70710224
0
Progressive widening beam search to find a node. The progressive widening beam search involves a repeated beam search, starting with a small beam width then extending to progressively larger beam widths if the target node is not found. This implementation simply returns the first node found that matches the termination condition. `G` is a NetworkX graph. `source` is a node in the graph. The search for the node of interest begins here and extends only to those nodes in the (weakly) connected component of this node. `value` is a function that returns a real number indicating how good a potential neighbor node is when deciding which neighbor nodes to enqueue in the breadthfirst search. Only the best nodes within the current beam width will be enqueued at each step. `condition` is the termination condition for the search. This is a function that takes a node as input and return a Boolean indicating whether the node is the target. If no node matches the termination
def progressive_widening_search(G, source, value, condition, initial_width=1): # Check for the special case in which the source node satisfies the # termination condition. if condition(source): return source # The largest possible value of `i` in this range yields a width at # least the number of nodes in the graph, so the final invocation of # `bfs_beam_edges` is equivalent to a plain old breadth-first # search. Therefore, all nodes will eventually be visited. # # TODO In Python 3.3+, this should be `math.log2(len(G))`. log_m = math.ceil(math.log(len(G), 2)) for i in range(log_m): width = initial_width * pow(2, i) # Since we are always starting from the same source node, this # search may visit the same nodes many times (depending on the # implementation of the `value` function). for u, v in nx.bfs_beam_edges(G, source, value, width): if condition(v): return v # At this point, since all nodes have been visited, we know that # none of the nodes satisfied the termination condition. raise nx.NodeNotFound('no node satisfied the termination condition')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def BreadthFirstSearch(graph, source):\r\n \r\n # Dictionary dataInfo will be used to store the information about each vertex. (Ancestors, descendants, distance from source, and color)\r\n dataInfo = {} \r\n \r\n # List queue will be used to store the vertices currently in the queue, these vertices will all be gray.\r\n queue = []\r\n \r\n # Loops through the vertices in the graph, creates a key in the dictionary for each vertice, with default values.\r\n for vertex in graph[\"V\"]:\r\n dataInfo[str(vertex)] = {\"ancestor\": \"\", \"descendants\": [], \"distance\": \"\", \"color\": \"white\"}\r\n \r\n # At key source (variable) in dataInfo dictionary, key ancestor is set to have no value other than \"NA\" (as it is the starting point), and distance to 0 (as it will always be zero as it is the source).\r\n dataInfo[str(source)][\"ancestor\"] = \"NA\"\r\n dataInfo[str(source)][\"distance\"] = 0\r\n\r\n def symmetricVertex(edge, otherVertex):\r\n \r\n \"\"\"\r\n Function symmetricVertex takes arguments edge, a list of an edge from the graph dictionary, and otherVertex, an integer that is the other vertex in the edge with the sourceVertex. The function will return the point other than the otherVertex, and will be used to find adjacent vertices relative to the current vertex in the queue. Example: edge ([1, 2]), otherVertex (1), the function will return 2.\r\n \"\"\"\r\n \r\n for num in edge:\r\n if num != otherVertex:\r\n return num\r\n \r\n\r\n def pathFinder(graph, sourceVertex):\r\n \r\n \"\"\"\r\n Function pathFinder takes arguments graph, a dictionary, with the same keys for the edges and the vertices and sourceVertex, an integer. The function will loop through all of the edges in the graph and find adjacent vertices relative to the current sourceVertex. sourceVertex values will be in the queue. The function will edit dictionaries and lists, not return any value.\r\n \"\"\"\r\n \r\n # List removeEdges will be used to store the edges that will be removed from the graph dictionary after the loop ends. Makes the code more efficient, as you don't want to loop through a million vertices every time, now do you?\r\n removeEdges = []\r\n \r\n # Loop through edges in the graph, will be used to find adjacent vertices.\r\n for edge in graph[\"E\"]:\r\n \r\n # If the sourceVertex is in the edge and the edge is not discovered yet, then edit and change values in the main dictionary, dataInfo.\r\n if (sourceVertex in edge) and (dataInfo[str(symmetricVertex(edge, sourceVertex))] != \"gray\"):\r\n otherVertex = symmetricVertex(edge, sourceVertex)\r\n \r\n # Adds variable otherVertex to the descendants of the sourceVertex.\r\n dataInfo[str(sourceVertex)][\"descendants\"].append(otherVertex)\r\n \r\n # Updates key(otherVertex) to correct values. Ancestor is always the sourceVertex, the distance is always the distance of sourceVertex incremented by one, and the color is updated to gray as it is added to the queue.\r\n dataInfo[str(otherVertex)] = {\"ancestor\": sourceVertex, \"descendants\": [], \"distance\": (dataInfo[str(sourceVertex)][\"distance\"] + 1), \"color\": \"gray\"}\r\n \r\n # Edge includes two discovered edges, so it will be removed to stop redundancy. It is added to the removeEdges list.\r\n removeEdges.append(edge)\r\n \r\n # Appends the discovered vertex to the queue.\r\n queue.append(otherVertex)\r\n \r\n # After the loop ends, the edges that contain the source vertex have been exhausted, so the color is updated to black.\r\n dataInfo[str(sourceVertex)][\"color\"] = \"black\" \r\n \r\n # If the sourceVertex is in the queue, it is removed, as all of the edges containing it have been exhausted.\r\n if sourceVertex in queue:\r\n queue.remove(sourceVertex)\r\n \r\n # Loop through the edges in the removeEdges list, each edge will be removed.\r\n for edge in removeEdges:\r\n graph[\"E\"].remove(edge)\r\n \r\n # The function pathFinder is called on the graph and the source vertex, which sets up the queue.\r\n pathFinder(graph, source)\r\n \r\n # While the list queue contains values, the pathFinder function is called on the graph, and the queue value at index 0.\r\n while len(queue) != 0:\r\n pathFinder(graph, queue[0])\r\n \r\n # Loop below is for formatting of the data, makes it easier to read.\r\n for key in dataInfo:\r\n print \"Vertex: \" + key + \", Distance: \" + str(dataInfo[key][\"distance\"]) + \", Ancestor: \" + str(dataInfo[key][\"ancestor\"]) + \", Descendants: \" + str(dataInfo[key][\"descendants\"]) + \", Color: \" + str(dataInfo[key][\"color\"]) + \".\" \r\n \r\n # Returns dictionary dataInfo.\r\n return dataInfo", "def search(state, cut_value, game, prune=True):\n print (game.step)\n f = open(\"search_tree_\" + str(game.step) + \".txt\", 'w')\n\n def max_value(state, alpha, beta, depth, node):\n start = time.time()\n if game.cutoff(state, depth):\n return game.evaluate(state), None\n val = -inf\n action = None\n pre_val = game.evaluate(state)\n print (\"pre \" + str(pre_val))\n for a, s in game.successors(state):\n #print (str(a))\n cur_val = game.evaluate(s)\n #print (str(a) + ':' + str(cur_val))\n node_child = Search_node(node, a, cur_val)\n node.add_child(node_child)\n if cur_val > pre_val + cut_value:\n v, _ = min_value(s, alpha, beta, depth + 1, node_child)\n f.write(\"a: \" + str(a) + \"; v: \" + str(v) + \"; depth:\" + \\\n str(depth) + \"; alpha:\" + str(alpha) + \"; beta:\" + str(beta) \\\n + \" \\n\")\n else:\n v = cur_val\n if v > val:\n val = v\n action = a\n if prune:\n if v >= beta:\n return v, a\n alpha = max(alpha, v)\n end = time.time()\n print(\"max t:\" + str(end - start))\n return val, action\n\n def min_value(state, alpha, beta, depth, node):\n if game.cutoff(state, depth):\n return game.evaluate(state), None\n val = inf\n action = None\n pre_val = game.evaluate(state)\n print (\"min pre \" + str(pre_val))\n for a, s in game.successors(state):\n cur_val = game.evaluate(s)\n node_child = Search_node(node, a, cur_val)\n node.add_child(node_child)\n if cur_val < pre_val - cut_value:\n v, _ = max_value(s, alpha, beta, depth + 1, node_child)\n # f.write(\"a: \" + str(a) + \"; v: \" + str(v) + \"; depth:\" + \\\n # str(depth) + \"; alpha:\" + str(alpha) + \"; beta:\" + str(beta) + \" \\n\")\n else:\n v = cur_val\n if v < val:\n val = v\n action = a\n if prune:\n if v <= alpha:\n return v, a\n beta = min(beta, v)\n return val, action\n\n root_node = Search_node(None, None, 0)\n\n _, action = max_value(state, -inf, inf, 0, root_node)\n root_node.print_tree()\n f.close()\n return action", "def ss_breadth_first_search_tree(\n graph: ScipyGraph, source_node: NodeID, depth_limit: int\n ) -> Tuple[NumpyNodeMap, NumpyNodeMap]:\n\n is_directed = ScipyGraph.Type.compute_abstract_properties(\n graph, {\"is_directed\"}\n )[\"is_directed\"]\n node_list: np.ndarray = graph.node_list\n depth_limit = len(node_list) - 1 if depth_limit == -1 else depth_limit\n source_node_position = np.flatnonzero(node_list == source_node).item()\n bfs_tree_csr = ss.csgraph.breadth_first_tree( # depth_limit is not used here!\n graph.value, source_node_position, directed=is_directed\n ).astype(bool)\n\n # Calcuate Depths\n depths = np.full(len(node_list), depth_limit + 1, dtype=int)\n depths[source_node_position] = 0\n current_node_positions = np.array([source_node_position], dtype=int)\n for depth in range(1, depth_limit + 1):\n selector = np.zeros(len(node_list), dtype=bool)\n selector[current_node_positions] = True\n current_node_positions = selector @ bfs_tree_csr\n if not current_node_positions.any():\n break\n depths[current_node_positions] = depth\n\n # Calculate Parents\n parents = np.empty(len(node_list), dtype=int)\n bfs_tree_coo = bfs_tree_csr.tocoo()\n parents[source_node_position] = source_node\n parents[bfs_tree_coo.col] = bfs_tree_coo.row\n\n # Ensure depth_limit\n valid_nodes = graph.node_list\n valid_depths_selector = depths <= depth_limit\n depths = depths[valid_depths_selector]\n parents = parents[valid_depths_selector]\n valid_nodes = valid_nodes[valid_depths_selector]\n depths_nodes = valid_nodes.copy()\n parents_nodes = valid_nodes.copy()\n\n node2depth = NumpyNodeMap(depths, depths_nodes)\n node2parent = NumpyNodeMap(parents, parents_nodes)\n\n return node2depth, node2parent", "def concrete_search(self, limit):\n frontier = Frontier_SortedList.Frontier_SortedList()\n closed = {}\n initial_node = TreeNode.TreeNode(\n id=0,\n state=self.problem.initial_state,\n cost=0,\n node_depth=0,\n f=None,\n parent=None,\n last_action=None,\n )\n initial_node.f = self.__f_strategy(initial_node)\n id = 1\n frontier.insert(initial_node)\n solution = False\n while not solution and not frontier.is_empty():\n actual_node = frontier.remove()\n pruned = False\n if self.problem.is_goal(actual_node.state):\n solution = True\n else:\n if self.pruning == 1:\n pruned = self.check_node_pruning_1st_prune(actual_node, closed)\n if not pruned:\n closed[actual_node.state.create_md5()] = abs(actual_node.f)\n\n if self.pruning in [0, 1]:\n if not pruned:\n if actual_node.node_depth < limit:\n frontier, id = self.expand_node(id, actual_node, frontier)\n\n if self.pruning == 2:\n if actual_node.node_depth < limit:\n list_nodes, id = self.expand_node_2nd_prune(id, actual_node)\n for node in list_nodes:\n md5 = node.state.create_md5()\n if md5 not in closed or closed[md5] > abs(node.f):\n closed[md5] = abs(node.f)\n frontier.insert(node)\n if solution:\n return self.solution(actual_node)\n else:\n return None", "def beam_search(graph, start, goal, beam_width):\n\n final = []\n next_level = [[start]]\n\n # Process node list\n while next_level:\n # Sort nodes by heuristic and take the best ones\n curr_level = sorted(\n next_level,\n reverse=True,\n key=lambda p: graph.get_heuristic(p[-1], goal)\n )[-beam_width:]\n next_level = []\n\n # Process node list\n while curr_level:\n path = curr_level.pop()\n\n # Exit if a path is found which reaches the goal\n if path[-1] == goal:\n final = path\n break\n\n # Push the new paths onto the list\n connected = graph.get_connected_nodes(path[-1])\n for node in connected:\n # Ignore previously visited nodes\n if node not in path:\n next_level.append(path + [node])\n else:\n # If a path hasn't been found at the current level, search the next\n # level\n continue\n break\n\n # Return the final path or initial empty list\n return final", "def uninformed_search(start, end, graph):\n\n class SearchNode():\n def __init__(self, step_cost, name, predecessor):\n self.path_cost = predecessor.path_cost + step_cost if predecessor is not None else 0\n self.step_cost = step_cost\n self.name = name\n self.predecessor = predecessor\n def __repr__(self):\n return self.predecessor.name + \"->\" + self.name + \"=\" + self.path_cost\n\n class Problem():\n def __init__(self, start, end, graph, goal_predicate):\n self.start = start\n self.end = end\n self.graph = graph\n self.is_goal = goal_predicate\n self.visited_nodes = []\n\n nodes_expanded = 0\n nodes_generated = 0\n max_nodes_in_memory = 0\n\n def tree_search(problem, fringe):\n nonlocal nodes_generated\n nonlocal nodes_expanded\n nonlocal max_nodes_in_memory\n\n # create the initial node\n nodes_generated = 1\n fringe = [SearchNode(0, problem.start, None)]\n\n while len(fringe) > 0:\n # keep track of some metrics\n max_nodes_in_memory = max(max_nodes_in_memory, len(fringe))\n nodes_expanded += 1\n\n node = fringe.pop(0)\n while node.name in problem.visited_nodes:\n # ran out of nodes in the fringe\n if len(fringe) == 0:\n return None\n\n node = fringe.pop(0)\n\n if problem.is_goal(node):\n return node\n \n # make sure we never visit this node again, since we'll be expanding it\n problem.visited_nodes.append(node.name)\n\n # keep the fringe sorted by the path cost\n fringe.extend(expand(node, problem))\n fringe = sorted(\n fringe, \n key=lambda node: node.path_cost\n )\n\n return None\n\n def expand(node, problem):\n nonlocal nodes_generated\n nodes = []\n for edge in problem.graph.edges(node.name):\n nodes.append(SearchNode(edge.weight, edge.destination, node))\n \n nodes_generated += len(nodes)\n return nodes\n\n initial_problem = Problem(start, end, graph, lambda x: x.name == end)\n result = tree_search(initial_problem, [])\n\n # convert the resulting nested structure into an actual path of (start, end, cost)\n def walk(node):\n pred = node.predecessor\n if pred is None:\n return []\n \n path = walk(pred)\n path.append((pred.name, node.name, node.step_cost))\n return path\n\n path = walk(result) if result is not None else None\n return (path, nodes_expanded, nodes_generated, max_nodes_in_memory)", "def bfs(graph, initial_node, dest_node):\n return queue_search(graph, initial_node, dest_node, queue.Queue())", "def find_BFS(self, value):\n to_visit = [self]\n while to_visit:\n curr = to_visit.pop(0) # BFS -> .pop(0) -> queue \n if curr.value == value:\n return curr\n to_visit.extend(curr.children)", "def search(self, value):\r\n node = self.head\r\n while node:\r\n if node.value == value:\r\n return node\r\n node = node.next\r\n raise ValueError('Value not found')", "def bfs(self, start, target):\n \n for u in range(self.V):\n self.color[u] = WHITE\n self.depth[u] = -1\n\n q = deque()\n q.append(start)\n self.depth[start] = 0\n\n self.pred[start] = -1\n\n while q:\n u = q.popleft()\n self.color[u] = BLACK\n\n for v in range(self.V):\n if self.color[v] == WHITE and self.capacity[u,\n v] - self.flow[u, v] > 0:\n self.color[v] = GRAY\n q.append(v)\n self.pred[v] = u\n self.depth[v] = self.depth[u] + 1\n\n return self.color[target] == BLACK", "def bfs(self, source, target):\n source.color = TriColor.WHITE\n target.color = TriColor.WHITE\n\n Q = deque()\n Q.append(source)\n\n while len(Q) > 0:\n v = Q.popleft()\n if v.color == TriColor.BLACK:\n # a previously finished vertex\n # used when graph vertices (e.g. `self.neighbors_of()` is calculated dynamically)\n continue\n else:\n v.color = TriColor.BLACK # mark finished\n if v == target:\n # re-assign `target` in case `Vertex.__eq__` has been overridden\n target = v\n break\n\n for w, _ in self.neighbors_of(v, color=TriColor.WHITE):\n w.color = TriColor.GRAY # mark discovered\n w.bfs_parent = v\n Q.append(w)\n\n S = [] # holds the shortest path, or empty if None\n u = target\n if u.color == TriColor.BLACK:\n while u is not None:\n S.append(u)\n u = u.bfs_parent\n\n if len(S) > 0:\n path = S[::-1]\n distance = len(path)\n else:\n path = None\n distance = None\n return path, distance", "def breadth_first_search(self, target: Dict) -> Optional[Node]:\n assist_queue = deque()\n assist_queue.append(self.root_node)\n while assist_queue:\n current_node: Node = assist_queue.popleft()\n flag = True\n for k, v in target.items():\n flag = flag and getattr(current_node, k) == v\n if not flag:\n break\n if flag:\n return current_node\n if current_node.children:\n for child in current_node.children:\n assist_queue.append(child)\n return None", "def shortest_path(source, target):\n #although lecture checks for goal when a node is popped off the frontier, efficiency of search can be improved\n #by checking for a goal as nodes are ADDED. If goal detected, don't add it to frontier, just return the solution\n #immediately\n\n #create start point\n start = Node(state = source, parent = None, action = None)\n frontier = QueueFrontier()\n frontier.add(start)\n\n #create explored set\n explored = set()\n\n while True:\n #if nothing left in frontier, no path exists\n if frontier.empty():\n return None\n\n #choose a node from the frontier\n node = frontier.remove()\n #if node is goal, we have solution\n\n #add neighbors 2 frontier using function THATS ALR THERE DUMMY\n for (movie, star) in neighbors_for_person(node.state):\n newNode = Node(state = star, parent = node, action=movie)\n if not frontier.contains_state(newNode) and newNode.state not in explored:\n if newNode.state == target:\n #reverse the solution\n solution = []\n while newNode.parent is not None:\n actionTuple = (newNode.action, newNode.state)\n solution.append(actionTuple)\n newNode = newNode.parent\n solution.reverse()\n return solution\n else: frontier.add(newNode)\n\n #mark state as explored\n explored.add(node.state)", "def bellman_fords_shortest_path(graph: Graph[T], source_vertex_data: T) -> \\\n Tuple[bool, Dict[Vertex[T], int], Dict[Vertex[T], Vertex[T]]]:\n\n vertex_distance_mapping: Dict[Vertex[T], int] = defaultdict(lambda: maxsize) # vertex_weight_mapping\n vertex_parent_mapping: Dict[Vertex[T], Vertex[T]] = dict()\n source_vertex: Vertex[T] = graph.get_vertex(source_vertex_data)\n\n vertex_distance_mapping[source_vertex] = 0\n vertex_parent_mapping[source_vertex] = None\n\n # Relax all the edges (V-1)th time.\n # Why (V-1) times? - https://www.youtube.com/watch?v=-mOEd_3gTK0&feature=youtu.be&list=PLrmLmBdmIlpu2f2g8ltqaaCZiq6GJvl1j&t=785\n for i in range(0, len(graph.vertices)-1): # run it (V-1) times... for i=0: i<(V-1); i++\n relax_edges(graph.edges, vertex_distance_mapping, vertex_parent_mapping)\n\n # Relax all the edges for one more time(Vth time) to check if there is any -ve weight cycle present.\n has_negative_weight_cycle: bool = relax_edges(graph.edges, vertex_distance_mapping, vertex_parent_mapping,\n check_negative_weight_cycle=True)\n if has_negative_weight_cycle:\n return has_negative_weight_cycle, dict(), dict()\n\n return has_negative_weight_cycle, vertex_distance_mapping, vertex_parent_mapping", "def binary_search(model, target):\n # target = 0.70\n # threshold = model.get_model().args.threshold\n step = 0.01\n # step_min = 0.0001\n status = 1.0\n stop = 0.001\n counter = 1\n max_iter = 100\n flops = get_flops(model)\n params = get_parameters(model)\n\n while abs(status - target) > stop and counter <= max_iter:\n status_old = status\n # calculate flops and status\n model.set_parameters()\n flops_prune = get_flops(model)\n status = flops_prune / flops\n params_prune = get_parameters(model)\n params_compression_ratio = params_prune / params\n\n string = 'Iter {:<3}: current step={:1.8f}, current threshold={:2.8f}, status (FLOPs ratio) = {:2.4f}, ' \\\n 'params ratio = {:2.4f}.\\n'\\\n .format(counter, step, model.pt, status, params_compression_ratio)\n print(string)\n\n if abs(status - target) > stop:\n # calculate the next step\n flag = False if counter == 1 else (status_old >= target) == (status < target)\n if flag:\n step /= 2\n # calculate the next threshold\n if status > target:\n model.pt += step\n elif status < target:\n model.pt -= step\n model.pt = max(model.pt, 0)\n\n counter += 1\n # deal with the unexpected status\n if model.pt < 0 or status <= 0:\n print('Status {} or threshold {} is out of range'.format(status, model.pt))\n break\n else:\n print('The target compression ratio is achieved. The loop is stopped')", "def bfs(board, start_position, start_actions, predicate):\n queue = Queue()\n visited = set()\n start_node = PositionNode(None, start_position, None)\n visited.add(start_position)\n # start actions are actions that have not been pruned\n for action in start_actions:\n next_pos = start_node.next(action)\n visited.add(next_pos)\n node = PositionNode(start_node, next_pos, action)\n queue.put(node)\n\n while not queue.empty():\n node = queue.get()\n if predicate.test(board, node.position):\n return node\n for action in [Action.Up.value, Action.Down.value, Action.Left.value, Action.Right.value]:\n next_pos = node.next(action)\n if valid_agent_position(board, next_pos) and next_pos not in visited:\n queue.put(PositionNode(node, next_pos, action))\n visited.add(next_pos)\n return None # no goal node found", "def graph_search(problem, fringe):\n counter = 0\n closed = {}\n fringe.append(Node(problem.initial))\n max_depth=0\n while fringe:\n node = fringe.pop()\n # Print some information about search progress\n if node.depth>max_depth:\n max_depth=node.depth\n if max_depth<50 or max_depth % 1000 == 0:\n pid = os.getpid()\n py = psutil.Process(pid)\n memoryUse = py.memory_info()[0]/1024/1024\n print('Reached depth',max_depth,\n 'Open len', len(fringe),\n 'Node expanse', counter,\n 'Memory used (MBytes)', memoryUse)\n\n if problem.goal_test(node.state):\n return node, counter\n serial = node.state.__str__()\n if serial not in closed:\n counter += 1\n closed[serial] = True\n fringe.extend(node.expand(problem))\n return None", "def _bfs_nodes(cls, graph, source, size, **kwargs):\n if size < 1:\n return iter(())\n\n return itertools.chain(\n (source,),\n itertools.islice((v for u, v in nx.bfs_edges(graph, source)), size-1)\n )", "def graph_search(state, goal_state, height_limit):\n frontier = []\n heapq.heappush(frontier, problem_search_node.SearchNode(state, None, None, 0, 0))\n explored_set = set()\n while frontier:\n current_node = heapq.heappop(frontier)\n if utils.is_goal_state(current_node.state, goal_state):\n return utils.create_path_to_goal(current_node)\n explored_set.add(current_node.state.key())\n actions = current_node.state.possible_actions(explored_set, height_limit)\n for node in [current_node.child_node(action, height_limit, cons_heuristic, goal_state) for action in actions]:\n heapq.heappush(frontier, node)\n return (-1, [])", "def best_first_graph_search(self, problem, f):\n f = memoize(f, 'f')\n # Set starting node\n node = SearchNode(problem.initial)\n # If the goal is reached, return the resulting node\n if problem.goal_test(node.state):\n return node\n\n # Set priority queue to organize nodes\n # in order of lowest f\n frontier = PriorityQueue(min, f)\n # Append the first node\n frontier.append(node)\n # Initialize empty set\n explored = set()\n # While the frontier is not empty\n while frontier:\n # Get the first node with lowest f\n node = frontier.pop()\n # Check if node is goal\n if problem.goal_test(node.state):\n return node\n # Add the state to the explored set\n explored.add(tuple(node.state))\n # For every child in the expanded node\n for child in node.expand(problem):\n # If the child is not a repeat child append it\n if child.state not in explored and child not in frontier:\n frontier.append(child)\n # If the child is in the frontier\n # This statement basically just filters out children that\n # have the same state but lower path costs\n elif child in frontier:\n # Select that child\n incumbent = frontier[child]\n # If one child is has a lower path cost\n if f(child) < f(incumbent):\n # Remove the child that is farther\n del frontier[incumbent]\n frontier.append(child)\n return None", "def dijkstra_search(graph, initial_node, dest_node):\n return queue_search(graph, initial_node, dest_node, queue.PriorityQueue(), True)", "def depth_limited_search(problem, limit):\n\n # in this case, we simply use a list to keep track of nodes we\n # traversed, instead of the data structure, Stack.\n path = list()\n visited = set() # as before, to prevent duplicated nodes\n root = problem.get_initial_state()\n\n def rec_dls(state, action, depth):\n\n visited.add(state)\n\n # if it is a goal\n if problem.goal_test(state):\n path.append((state, action))\n return path\n\n # or if it reaches a certain depth, but not a goal\n elif depth == 0:\n visited.remove(state)\n return None\n\n else:\n path.append([state, action])\n for successor, action, cost in problem.get_successors(state):\n if successor not in visited:\n # recursively expands the deepest node\n res = rec_dls(successor, action, depth-1)\n if res is not None:\n return res\n path.pop()\n visited.remove(state)\n\n # \"Stared From the Bottom\" (root)\n result = rec_dls(root, 'None', limit)\n # return the path if the we DID have achieved something\n if result is not None:\n return path", "def tree_search(problem, frontier):\n compteur = 0\n stop = False\n frontier.append(Node(problem.initial))\n while frontier and not stop:\n compteur+=1\n node = frontier.pop()\n if problem.goal_test(node.state):\n return node\n if(compteur <= limit):\n frontier.extend(node.expand(problem))\n else:\n stop = True\n \n return None", "def bfs(graph, source):\n visited = [False] * len(graph.graph)\n print(visited)\n\n result = \"\"\n queue = []\n\n queue.append(source)\n visited[source] = True\n\n while queue:\n source = queue.pop(0)\n result += str(source)\n\n while graph.graph[source] is not None:\n data = graph.graph[source].vertex\n if not visited[data]:\n queue.append(data)\n visited[data] = True\n graph.graph[source] = graph.graph[source].next\n return result", "def general_search(fringe, visited, limiting_depth):\n node_to_be_explored = fringe[0]\n node_state = node_to_be_explored['state']\n visited[node_state] = node_to_be_explored\n if goal_test(node_to_be_explored['state']):\n return generate_path(node_to_be_explored, visited)\n current_depth = node_to_be_explored['depth']\n if current_depth == limiting_depth:\n return False\n children = [\n {\n 'state': child_state,\n 'parent': node_state,\n 'depth': current_depth + 1,\n }\n for child_state in operator(node_state)]\n for child in children:\n if child['state'] in visited:\n continue\n fringe_copy = [child] + fringe[1:]\n visited_copy = visited.copy()\n solution = general_search(fringe_copy, visited_copy, limiting_depth)\n if solution:\n return solution\n else:\n continue\n return False", "def action(self):\n\n self.start_timer()\n\n minimax_probability = self.norm.cdf(self.root.branching)\n use_minimax = boolean_from_probability(minimax_probability)\n if self.time_consumed > 53:\n # Time is starting to run low, use the faster option\n use_minimax=True\n\n if self.time_consumed < 59:\n if self.root.turn < 4:\n result = book_first_four_moves(self.root)\n elif use_minimax:\n result = minimax_paranoid_reduction(self.root)\n else:\n result = monte_carlo_tree_search(\n self.root,\n playout_amount=3,\n node_cutoff=4,\n outer_cutoff=4,\n num_iterations=1200,\n turn_time=0.75,\n exploration_constant=1.7,\n use_slow_culling=False,\n verbosity=0,\n use_prior=True,\n num_priors=4,\n use_fast_prune_eval=False,\n use_fast_rollout_eval=False,\n )\n else:\n result = greedy_choose(self.root)\n\n self.end_timer()\n\n return result", "def bfs_path(G, source, destination):\n vertex_dict = dict(nx.bfs_predecessors(G, source))\n queue = deque()\n queue.append(destination)\n while queue[-1] != source:\n try:\n queue.append(vertex_dict[queue[-1]])\n except KeyError:\n print(f\"Source: {source}, Dest: {destination}\")\n print(f\"Key {queue[-1]} not found in\")\n print_dict(\"bfs\", vertex_dict)\n break\n queue.reverse()\n return queue", "def getPath(\n self,\n source,\n dest,\n as_nodes=False,\n ):\n\n self.dist = {} # A map from nodes to their labels (float)\n self.predecessor = {} # A map from a node to a node\n\n # Initialize the distance labels to \"infinity\"\n\n vertices = self.g.nodes()\n for vertex in vertices:\n self.dist[vertex] = self.inf\n self.predecessor[vertex] = source\n\n # Further set up the distance from the source to itself and\n # to all one hops away.\n\n self.dist[source] = 0.0\n if self.g.is_directed():\n outEdges = self.g.out_edges([source])\n else:\n outEdges = self.g.edges([source])\n for edge in outEdges:\n self.dist[edge[1]] = self.g[edge[0]][edge[1]][self.wt]\n\n s = set(vertices)\n s.remove(source)\n currentMin = self._findMinNode(s)\n if currentMin == None:\n return None\n s.remove(currentMin)\n while currentMin != dest and len(s) != 0 and currentMin != None:\n if self.g.is_directed():\n outEdges = self.g.out_edges([currentMin])\n else:\n outEdges = self.g.edges([currentMin])\n for edge in outEdges:\n opposite = edge[1]\n if self.dist[currentMin] + self.g[edge[0]][edge[1]][self.wt] \\\n < self.dist[opposite]:\n self.dist[opposite] = self.dist[currentMin] \\\n + self.g[edge[0]][edge[1]][self.wt]\n self.predecessor[opposite] = currentMin\n s.add(opposite)\n\n currentMin = self._findMinNode(s)\n\n # print \"Current min node {}, s = {}\".format(currentMin, s)\n\n if currentMin == None:\n return None\n s.remove(currentMin)\n\n # Compute the path as a list of edges\n\n currentNode = dest\n predNode = self.predecessor.get(dest)\n node_list = [dest]\n done = False\n path = []\n while not done:\n path.append((predNode, currentNode))\n currentNode = predNode\n predNode = self.predecessor[predNode]\n node_list.append(currentNode)\n done = currentNode == source\n node_list.reverse()\n if as_nodes:\n return node_list\n else:\n return path", "def search(self, val):\n search_through = self.head\n while search_through:\n if val == search_through.data:\n return search_through\n else:\n search_through = search_through.next\n return search_through", "def breadth_first_graph_search(problem):\n node = Node(problem.initial)\n if problem.goal_test(node.state):\n return node\n frontier = collections.deque([node])\n explored = set()\n while frontier:\n node = frontier.popleft()\n explored.add(node.state)\n for child in node.expand(problem):\n if child.state not in explored and child not in frontier:\n if problem.goal_test(child.state):\n return child\n frontier.append(child)\n return None" ]
[ "0.589111", "0.55604035", "0.5548636", "0.5295215", "0.5288581", "0.52520144", "0.52117807", "0.5199523", "0.51886535", "0.514051", "0.51387787", "0.5101167", "0.50885487", "0.50710326", "0.5070418", "0.5047442", "0.50232387", "0.50172395", "0.50024635", "0.49968472", "0.49940723", "0.49922055", "0.49819648", "0.49622446", "0.49611008", "0.4949222", "0.49233875", "0.48833016", "0.48832554", "0.4878826" ]
0.83419585
0
Search for a node with high centrality. In this example, we generate a random graph, compute the centrality of each node, then perform the progressive widening search in order to find a node of high centrality.
def main(): G = nx.gnp_random_graph(100, 0.5) centrality = nx.eigenvector_centrality(G) avg_centrality = sum(centrality.values()) / len(G) def has_high_centrality(v): return centrality[v] >= avg_centrality source = 0 value = centrality.get condition = has_high_centrality found_node = progressive_widening_search(G, source, value, condition) c = centrality[found_node] print('found node {0} with centrality {1}'.format(found_node, c))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def node_edge_centrality(\n H,\n f=lambda x: np.power(x, 2),\n g=lambda x: np.power(x, 0.5),\n phi=lambda x: np.power(x, 2),\n psi=lambda x: np.power(x, 0.5),\n max_iter=100,\n tol=1e-6,\n):\n from ..algorithms import is_connected\n\n # if there aren't any nodes or edges, return an empty dict\n if H.num_nodes == 0 or H.num_edges == 0 or not is_connected(H):\n return {n: np.nan for n in H.nodes}, {e: np.nan for e in H.edges}\n # if the hypergraph is not connected,\n # this metric doesn't make sense and should return nan.\n # if not is_connected(H):\n # return {n: np.nan for n in H.nodes}, {e: np.nan for e in H.edges}\n\n n = H.num_nodes\n m = H.num_edges\n x = np.ones(n) / n\n y = np.ones(m) / m\n\n I, node_dict, edge_dict = incidence_matrix(H, index=True)\n\n check = np.inf\n\n for iter in range(max_iter):\n u = np.multiply(x, g(I @ f(y)))\n v = np.multiply(y, psi(I.T @ phi(x)))\n # multiply by the sign to try and enforce positivity\n new_x = np.sign(u[0]) * u / norm(u, 1)\n new_y = np.sign(v[0]) * v / norm(v, 1)\n\n check = norm(new_x - x) + norm(new_y - y)\n if check < tol:\n break\n x = new_x.copy()\n y = new_y.copy()\n else:\n warn(\"Iteration did not converge!\")\n return {node_dict[n]: new_x[n] for n in node_dict}, {\n edge_dict[e]: new_y[e] for e in edge_dict\n }", "def closenessCentrality(graph, numberOfPoints):\n c_closeness = nx.closeness_centrality(graph)\n c_closeness = heapq.nlargest(numberOfPoints, list(c_closeness.values()))\n return c_closeness", "def find_local_community(G, seed_node, weight, debug_log=False):\n nodes_in_community = seed_node if isinstance(seed_node, list) else [seed_node]\n modularity = edge_modularity(G, nodes_in_community=nodes_in_community, weight=weight)\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n if debug_log:\n print('==========\\nInitial community has nodes:', nodes_in_community)\n print('Neighbor edges:', neighbor_edges)\n print('Modularity = %f' % modularity)\n while neighbor_edges:\n # Compute the edge_modularity for each neighbor edge,\n # suppose the neighbor edge is added to the community\n mod_max, c_max, e_max = 0, None, None\n for e in neighbor_edges:\n # edges in the current community\n edges_in_temp_community = list(G.subgraph(nodes_in_community).edges)\n # append the candidate edge\n edges_in_temp_community.append(e)\n nodes_in_temp_community = list(G.edge_subgraph(edges_in_temp_community).nodes)\n mod_temp = edge_modularity(G, nodes_in_community=nodes_in_temp_community, weight=weight)\n if mod_temp > mod_max:\n mod_max, c_max, e_max = mod_temp, nodes_in_temp_community, e\n if mod_max > modularity:\n if debug_log:\n print('==========\\nEdge', e_max, 'and node', set(e_max).difference(nodes_in_community), 'are added to the community')\n\n # Update the community and the corresponding neighbor edges\n nodes_in_community = c_max\n modularity = mod_max\n neighbor_edges = get_neighbor_edges(G, nodes_in_community=nodes_in_community)\n\n if debug_log:\n print('The community has nodes:', nodes_in_community)\n print('Modularity = %f' % mod_max)\n print('Neighbor edges:', neighbor_edges)\n else:\n break\n return nodes_in_community, modularity", "def findWeakNode(self):\r\n weak = [0, 1] #LIste point le plus faible et nombre éléments connexes si l'on retire ce point\r\n for sommet in range(self.n):\r\n print(f\"Try NODE={sommet}\")\r\n newGraph = self.retireSommet(sommet)\r\n nGpesConnexes = newGraph.countConnexe()\r\n if weak[1] < nGpesConnexes:\r\n weak = [sommet, nGpesConnexes]\r\n return weak[0], weak[1]", "def best_node(self):\n nodes = self._all_nodes()\n sorted_nodes, _ = self.scorer.sort(nodes)\n return sorted_nodes[0]", "def betweennessCentrality(graph, numberOfPoints):\n c_betweenness = nx.betweenness_centrality(graph)\n c_betweenness = heapq.nlargest(\n numberOfPoints, list(\n c_betweenness.values()))\n return c_betweenness", "def betweenness_centrality(self, node):\n l=[]\n b=0\n for i in vertices:\n if i!=node:\n l.append(i)\n comb=list(itertools.combinations(l,2))\n \n for c in comb:\n count=0\n l=self.all_shortest_paths(c[0],c[1])\n if l==None:\n print(c)\n for i in range(len(l)):\n if node in l[i]:\n count+=1\n b+=count/len(l)\n\n return b", "def __generate_central_nodes(self,k=3):\n if k < 3:\n k = 3\n \n self.__logger.info(\"CENTRAL_NODES: Try to seek {} nodes which are currently central\".format(k)) \n res = [n for n,_ in sorted(nx.betweenness_centrality(self.G).items(),key=itemgetter(1),reverse=True)[:4*k]]\n self.__logger.info(\"CENTRAL_NODES: Generated top {} central nodes (according to betweeness centrality)\".format(len(res)))\n \n self.__logger.info(\"CENTRAL_NODES: Sample {} items from the candidates as was requested\".format(k))\n tmp = list(res)\n random.shuffle(tmp)\n return tmp[0:k]", "def harmonicCentrality(graph, numberOfPoints):\n c_harmonic = nx.harmonic_centrality(graph)\n c_harmonic = heapq.nlargest(numberOfPoints, list(c_harmonic.values()))\n return c_harmonic", "def findNode(self, target: hash.hash.Hash):\n for bucket in self.buckets:\n if bucket.inRange(nodeID):\n for node in bucket:\n if node.hash == target:\n return node\n \n return None\n return None", "def top_k_betweenness_centrality(self):\n d={}\n l=[]\n for v in vertices:\n a=self.betweenness_centrality(v)\n d[v]=a\n l.append(a)\n m=max(l)\n l1=[]\n for key in d:\n if d[key]==m:\n l1.append(key)\n\n return l1", "def search_cluster_by_node(self, target):\n for i in range(len(self.result)):\n cluster = self.result[i]\n for node in cluster.get_nodes():\n if target == node:\n return i\n return None", "def test_expected_growth(self):\r\n\r\n graph = nx.lollipop_graph(4, 1)\r\n graph.add_edge(4, 2)\r\n\r\n c = [3, 4]\r\n result = clique.search(c, graph, iterations=100)\r\n assert result == [0, 1, 2, 3]", "def select(self):\n best_qsa_star_add = -99999\n best_node = None\n for a, c in self.children.items():\n qsa = c.wins / c.visits\n if c.visits_amaf == 0:\n qsa_tilde = 0\n else:\n qsa_tilde = c.wins_amaf / c.visits_amaf\n bsa = sqrt(self.k / (self.visits + self.k))\n qsa_star = (1 - bsa) * qsa + bsa * qsa_tilde\n qsa_star_add = qsa_star + 0.2 * self.c * sqrt(log(self.visits) / c.visits)\n if qsa_star_add > best_qsa_star_add:\n best_qsa_star_add = qsa_star_add\n best_node = c\n return best_node", "def nearest_neighbor(self, xRand):\n # TODO: Make this more efficient?\n #within a neighborhood of XRand, determine the lowest cost to go\n minCost = np.inf\n minNode = None\n\n for node in self.Tree:\n\n cost = self.compute_dist(node.state_time[0:6], xRand)\n\n if cost < minCost:\n minNode = node\n minCost = cost\n\n return minNode", "def nearest_neighbor(self, xRand):\n # TODO: Make this more efficient?\n #within a neighborhood of XRand, determine the lowest cost to go\n minCost = np.inf\n minNode = None\n\n for node in self.Tree:\n\n cost = self.compute_dist(node.state_time[0:6], xRand)\n\n if cost < minCost:\n minNode = node\n minCost = cost\n\n return minNode", "def search_best_goal_node(self):\n\n dist_to_goal_list = [self.calc_dist_to_goal(n.x, n.y) for n in self.node_list]\n goal_indexes = [\n dist_to_goal_list.index(i)\n for i in dist_to_goal_list\n if i <= self.expand_dis\n ]\n\n safe_goal_indexes = []\n for goal_index in goal_indexes:\n t_node = self.steer(self.node_list[goal_index], self.goal_node)\n if self.check_collision(t_node, self.obstacle_list):\n safe_goal_indexes.append(goal_index)\n\n if not safe_goal_indexes:\n return None\n\n min_cost = min([self.node_list[i].cost for i in safe_goal_indexes])\n for i in safe_goal_indexes:\n if self.node_list[i].cost == min_cost:\n return i\n\n return None", "def _find_lowest_cost_node(self) -> str:\n lowest_cost = float(\"inf\")\n lowest_cost_node = None\n for node in self.costs:\n cost = self.costs[node]\n if cost < lowest_cost and node not in self.closed_nodes:\n lowest_cost = cost\n lowest_cost_node = node\n return lowest_cost_node", "def calculate_best_way(self) -> int:\n node = self._find_lowest_cost_node()\n while node:\n cost = self.costs[node]\n neighbors = self.graph[node]\n for neighbor in neighbors.keys():\n node_cost = cost + neighbors[neighbor]\n if self.costs[neighbor] > node_cost:\n self.costs[neighbor] = node_cost\n self.parents[neighbor] = node\n self.closed_nodes.append(node)\n node = self._find_lowest_cost_node()\n\n return self.costs[\"fin\"]", "def degreeCentrality(graph, numberOfPoints):\n c_degree = nx.degree_centrality(graph)\n c_degree = heapq.nlargest(numberOfPoints, list(c_degree.values()))\n return c_degree", "def compute_centrality_for_nodes(graph: nx.Graph):\n nodes_centralities = {}\n degree_centralities = nx.degree_centrality(graph)\n betweeness_centralities = nx.betweenness_centrality(graph, normalized=True)\n closeness_centralities = nx.closeness_centrality(graph)\n for node in graph.nodes:\n closeness = closeness_centralities[node]\n degree = degree_centralities[node]\n betweeness = betweeness_centralities[node]\n nodes_centralities[node] = {\n \"degree\": degree,\n \"closeness\": closeness,\n \"betweeness\": betweeness\n }\n\n return nodes_centralities", "def closeness_centrality(self):\n try:\n self.logger.info('正在计算网络的接近中心性 ...')\n return self.order_dict(nx.closeness_centrality(self.G), index=1)\n except Exception as e:\n self.logger.error(\"计算失败,原因:{0}\".format(e))", "def find_significant_children(tree, node):\n if node not in tree.children:\n return None\n smax = 1\n c1, c2 = tree.children[node]\n sch = c1, c2\n while tree.population[c1] > 1 or tree.population[c2] > 1:\n if tree.population[c1] >= tree.population[c2]:\n small, big = c2, c1\n else:\n small, big = c1, c2\n if tree.population[small] >= smax:\n smax = tree.population[small]\n sch = small, big\n c1, c2 = tree.children[big]\n return sch", "def centrality(self):\n\n raise NotImplementedError", "def getBestCluster():\r\n global bestCluster\r\n return bestCluster", "def kaltzCentrality(graph, numberOfPoints):\n c_eigenvector = nx.katz_centrality(graph)\n c_eigenvector = heapq.nlargest(\n numberOfPoints, list(\n c_eigenvector.values()))\n return c_eigenvector", "def test_DRGEP_GraphSearchHalve(self):\n graph = nx.DiGraph()\n graph.add_nodes_from(['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', \n 'L', 'M', 'N', 'O'\n ])\n\n graph.add_weighted_edges_from([('A','B', .5), ('B','C',.5), ('C','D',.5), \n ('D','E',.5), ('E','F',.5)\n ])\n\n\n subgraph = nx.DiGraph([(u,v,d) for u,v,d in graph.edges(data=True) if d['weight'] > 0])\n\n\n #temporary solution\n max_dic = graph_search_drgep(subgraph, 'A')\n\n assert 'A' in max_dic\n assert max_dic['A']==1\n assert max_dic['B']==.5\n assert max_dic['C']==.25\n assert max_dic['D']==.125\n assert max_dic['E']==.0625\n assert max_dic['F']==.03125", "def find_near_nodes(self, new_node):\n number_nodes = len(self.node_list) + 1\n r = self.connect_circle_dist * math.sqrt(\n (math.log(number_nodes) / number_nodes)\n )\n\n # if expand_dist exists, search vertices in a range no more than expand_dist\n if hasattr(self, \"expand_dis\"):\n r = min(r, self.expand_dis)\n dist_list = [\n (node.x - new_node.x) ** 2 + (node.y - new_node.y) ** 2\n for node in self.node_list\n ]\n near_indexes = [dist_list.index(i) for i in dist_list if i <= r ** 2]\n return near_indexes", "def find_min_node(self):\n min_energy = 10 ** 10\n min_id = -1\n for node in self.node:\n if node.energy < min_energy:\n min_energy = node.energy\n min_id = node.id\n return min_id", "def get_rendezvous_node(nodes, key):\n # TODO\n \n highest_node = None\n weights = []\n for node in nodes:\n x = node + key\n x = x.encode()\n w = hashlib.md5(x).hexdigest()\n weights.append((w, node))\n\n _, highest_node = max(weights)\n return highest_node" ]
[ "0.6254127", "0.59585714", "0.5949099", "0.58462274", "0.583967", "0.58372754", "0.5818489", "0.5792716", "0.5791913", "0.5777286", "0.5737924", "0.5728161", "0.57143867", "0.5713368", "0.5711858", "0.5711858", "0.56890774", "0.5639402", "0.56360054", "0.56196046", "0.56116855", "0.56033397", "0.5591637", "0.55657136", "0.55643725", "0.55642027", "0.55583984", "0.5540426", "0.5535528", "0.5516003" ]
0.8351035
0
Don't emit for subclasses of dict, with __reversed__ implemented.
def test_dict_ancestor_and_reversed(): from collections import OrderedDict class Child(dict): def __reversed__(self): return reversed(range(10)) seq = reversed(OrderedDict()) return reversed(Child()), seq
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __reversed__(self):\n\t\treturn reversed(self.__dict__.values())", "def reverse_dicts(self):\n\t\tself.rev_worddict = {self.worddict[word]: word for word in self.worddict}\n\t\tself.rev_classdict = {self.classdict[cl]: cl for cl in self.classdict}", "def clear(self):\n super(ReadOnlyDict, self).clear() # pragma: no cover", "def _freeze_mapping(d):\r\n d = dict(d)\r\n for k in d.keys():\r\n if hasattr(d[k], '__getitem__') and hasattr(d[k], 'keys'):\r\n d[k] = _freeze_mapping(d[k])\r\n d = types.MappingProxyType(d)\r\n return d", "def is_dictionary_subclass(obj):\n return (hasattr(obj, '__class__') and\n issubclass(obj.__class__, dict) and not is_dictionary(obj))", "def _rev_dict(d):\n return {v: k for k, v in d.items()}", "def dfilter(d: dict, *keys: Iterable, reverse=False) -> dict:\n return {k: v for k, v in d.items() if k in keys and not reverse or k not in keys and reverse}", "def __delitem__(self, key):\n super(ReadOnlyDict, self).__delitem__(key)", "def pop(self, key, *args):\n return super(ReadOnlyDict, self).pop(key, *args) # pragma: no cover", "def nonull_dict(self):\n return {k: v for k, v in self.dict.items() if v and k != '_codes'}", "def handle_dict(self, object, name, old, new):\n if old is not Uninitialized:\n unregister = self.next.unregister\n for obj in old.values():\n unregister(obj)\n\n register = self.next.register\n for obj in new.values():\n register(obj)", "def __methodDict(cls, _dict):\n baseList = list(cls.__bases__)\n baseList.reverse()\n for _super in baseList:\n __methodDict(_super, _dict)\n for key, value in cls.__dict__.items():\n if type(value) == types.FunctionType:\n _dict[key] = value", "def popitem(self):\n return super(ReadOnlyDict, self).popitem()", "def test_invert_dict(self):\r\n self.assertEqual(invert_dict({}), {})\r\n self.assertEqual(invert_dict({'3':4}), {4:['3']})\r\n self.assertEqual(invert_dict(\\\r\n {'a':'x','b':1,'c':None,'d':('a','b')}), \\\r\n {'x':['a'],1:['b'],None:['c'],('a','b'):['d']})\r\n self.assertRaises(TypeError, invert_dict, {'a':['a','b','c']})\r\n d = invert_dict({'a':3, 'b':3, 'c':3, 'd':'3', 'e':'3'})\r\n self.assertEqual(len(d), 2)\r\n assert 3 in d\r\n d3_items = d[3][:]\r\n self.assertEqual(len(d3_items), 3)\r\n d3_items.sort()\r\n self.assertEqual(''.join(d3_items), 'abc')\r\n assert '3' in d\r\n d3_items = d['3'][:]\r\n self.assertEqual(len(d3_items), 2)\r\n d3_items.sort()\r\n self.assertEqual(''.join(d3_items), 'de')", "def __reversed__(self): # real signature unknown; restored from __doc__\n pass", "def inverse_update(self, data):\n if not isinstance(data, dict) or not isinstance(self, transforms.MapTransform):\n return data\n d = dict(data)\n for k in self.key_iterator(data):\n transform_key = transforms.TraceableTransform.trace_key(k)\n if transform_key not in data or not data[transform_key]:\n continue\n d = transforms.sync_meta_info(k, data, t=False)\n return d", "def test_dict_unsorted(self):\n xmlns = {\n '_': utils.NETCONF_NAMESPACE,\n 'nm': 's'\n }\n\n xml_node = utils.generate_xml_node(\n self.UNSORTED_DICT,\n xmlns,\n 'rpc'\n )\n\n xml_node_string = etree.tostring(\n xml_node, pretty_print=False\n )\n\n self.assertEqual(\n json.dumps(xmltodict.parse(xml_node_string.decode('utf-8')),\n indent=4, sort_keys=True),\n json.dumps(xmltodict.parse(\"\"\"<rpc xmlns:nm=\"s\" xmlns=\"urn\"\"\" +\n \"\"\":ietf:params:xml:ns:netconf\"\"\" +\n \"\"\":base:1.0\"><a>\"\"\" +\n \"\"\"<b><c>d</c></b></a></rpc>\"\"\"),\n indent=4, sort_keys=True)\n )", "def popitem(self):\r\n result = super(EmittingWeakKeyDefaultDict, self).popitem()\r\n if self.emitter:\r\n self.emitter.emit()\r\n return result", "def handle_dict_items(self, object, name, old, new):\n raise NotImplementedError", "def inverse(dict_):\n idict = dict([(value,key) for key,value in dict_.iteritems()])\n if len(idict)!=len(dict_):\n raise ValueError(\"Dictionary has no inverse (values not unique).\")\n return idict", "def reverse(dictionary):\n return {b: a for a, b in dictionary.items()}", "def __iter__(self) -> 'Dictionary':\n return copy.deepcopy(self)", "def quacks_like_dict(object):\n return isinstance(object, collections.Mapping)", "def clear(self):\r\n result = super(EmittingWeakKeyDefaultDict, self).clear()\r\n if self.emitter:\r\n self.emitter.emit()\r\n return result", "def pop(self, key, *args):\r\n result = super(EmittingWeakKeyDefaultDict, self).pop(key, *args)\r\n if self.emitter:\r\n self.emitter.emit()\r\n return result", "def override(self, **meta: Any):\n to_restore: JsonDict = {}\n to_remove = set()\n\n for key, value in meta.items():\n if key in self.meta:\n to_restore[key] = self.meta[key]\n else:\n to_remove.add(key)\n self.meta[key] = value\n\n try:\n yield self\n finally:\n for key in to_remove:\n del self.meta[key]\n self.meta.update(to_restore)", "def to_representation(self, data):\n items = super(DictSerializer, self).to_representation(data)\n return {item[self.dict_key]: item for item in items}", "def _handle_dump_unknown(self, data, original):\n for key, val in original.items():\n if key not in self.fields:\n data[key] = val\n return data", "def quacks_like_dict(object):\n return isinstance(object, Mapping)", "def handle_dict_items(self, object, name, old, new):\n self.handle_dict(object, name, new.removed, new.added)\n\n if len(new.changed) > 0:\n # If 'name' refers to the '_items' trait, then remove the '_items'\n # suffix to get the actual dictionary trait.\n #\n # fixme: Is there ever a case where 'name' *won't* refer to the\n # '_items' trait?\n if name.endswith(\"_items\"):\n name = name[: -len(\"_items\")]\n\n dict = getattr(object, name)\n unregister = self.next.unregister\n register = self.next.register\n for key, obj in new.changed.items():\n unregister(obj)\n register(dict[key])" ]
[ "0.64410263", "0.6156265", "0.5848026", "0.5798487", "0.56366265", "0.55165565", "0.54997116", "0.54988617", "0.5477811", "0.54397434", "0.5428426", "0.5412256", "0.53976214", "0.5380523", "0.5374235", "0.53430724", "0.52987653", "0.5297441", "0.5287265", "0.5274791", "0.52694094", "0.5251096", "0.5234997", "0.52339655", "0.5233749", "0.523106", "0.52162194", "0.5178635", "0.5157446", "0.5153035" ]
0.70229346
0
Don't emit when reversing enum classes
def test_dont_emit_for_reversing_enums(): from enum import IntEnum class Color(IntEnum): RED = 1 GREEN = 2 BLUE = 3 for color in reversed(Color): yield color
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enum(**enums):\n reverse = dict((value, key) for key, value in iteritems(enums))\n enums['reverse_mapping'] = reverse\n return type('Enum', (), enums)", "def reverse_enum(\n enum_to_reverse: Union[\n Type[SMOOTHIE_G_CODE],\n Type[MAGDECK_G_CODE],\n Type[TEMPDECK_G_CODE],\n Type[THERMOCYCLER_G_CODE],\n Type[HEATER_SHAKER_G_CODE],\n ]\n) -> Dict:\n # I don't know what is going on with mypy, it is complaining\n # about keys not existing as an attribute. I am not calling it\n # as an attribute. I am calling it as a function.\n members = enum_to_reverse.__members__.keys()\n values = [enum_to_reverse[member] for member in members]\n return dict(zip(values, members))", "def __invert__(self):\n new = self._value\n for _, val in self._nameToValue.items():\n if val & new:\n new -= val\n else:\n new += val\n return type(self)(new)", "def enum_command(ctx, field, old, new):\n try:\n with ctx.obj[\"reader\"] as reader, ctx.obj[\"writer\"] as writer:\n writer.copy_schema(reader)\n writer.prepare_encode_cache()\n writer.rename_enum(field, old.encode(\"utf-8\"), new.encode(\"utf-8\"))\n writer.write(reader)\n except Exception:\n click.secho(\"Failed!\", fg=\"red\", bold=True, err=True)\n raise\n else:\n click.secho(\"Done!\", fg=\"green\", err=True, bold=True)", "def enum(self):\r\n raise NotImplementedError", "def __invert__(self):\n return self.reverse()", "def test_enum_aliases():\n class RedundantStatus(OrderedStrEnum):\n\n draft = 'draft'\n unpublished = 'draft'\n published = 'published'\n archived = 'archived'\n\n __order__ = 'draft, unpublished, published, archived'\n\n yield (tools.eq_, RedundantStatus.draft.ordinal, RedundantStatus.unpublished.ordinal)\n yield (tools.eq_, RedundantStatus.draft, RedundantStatus.unpublished)\n yield (tools.assert_less, RedundantStatus.unpublished, RedundantStatus.archived)", "def class1_reversed():\n return Class1(is_reversed=True)", "def __reversed__(self):\n\t\treturn reversed(self.__dict__.values())", "def to_python(self, value):\n if isinstance(value, self.enum_class):\n return value\n value = super(self.__class__, self).to_python(value)\n if isinstance(value, int):\n return self.enum_class(value)\n assert value is None\n return None", "def test_direction(self):\n self.check_validation_error(\"value is not a valid enumeration member; permitted: '<', '>'\", direction=\"<>\")", "def __invert__(self):\r\n if self.field.characteristic == 2:\r\n return runtime.invert(self)\r\n\r\n return super().__invert__()", "def class1_not_reversed():\n return Class1(is_reversed=False)", "def __reversed__(self): # real signature unknown; restored from __doc__\n pass", "def enums(self) -> Mapping[str, wrappers.EnumType]:\n return collections.OrderedDict([\n (k, v) for k, v in self.all_enums.items()\n if not v.meta.address.parent\n ])", "def traverse_enum(type_context, enum_proto, visitor):\n return visitor.visit_enum(enum_proto, type_context)", "def invert(self):\n raise NotImplementedError()", "def __iter__(self):\n return iter(self._enums)", "def test_enum(self):\n i = Organism(state='LIVING')\n print(i)\n print(i.state)\n print(i.state.code)\n print(i.state.code.text)\n print(type(i.state))\n print(StateEnum.LIVING)\n assert str(i.state) == 'LIVING'\n assert i.state.code == StateEnum.LIVING\n obj = json.loads(json_dumper.dumps(i))\n assert obj['state'] == 'LIVING'\n obj = yaml.safe_load(yaml_dumper.dumps(i))\n assert obj['state'] == 'LIVING'\n reconstituted = json_loader.loads(json_dumper.dumps(i), target_class=Organism)\n print(f'RECONSTITUTED = {reconstituted}')\n assert reconstituted.state.code == StateEnum.LIVING", "def test_type_builder_handles_enumerations_with_uppercase_values():\n schema = [\n SchemaEnum(\n name=\"UppercaseEnum\",\n value_type=\"string\",\n values=[\"HELLO_WORLD\", \"UPPERCASE_VALUE\", \"SOME_VALUE\"],\n ),\n ]\n\n build_result = build_types(schema)\n\n assert len(build_result) == 1\n assert build_result[0] == EnumDefinition(\n name=\"UppercaseEnum\",\n values=[\n (\"HELLO_WORLD\", \"HELLO_WORLD\"),\n (\"UPPERCASE_VALUE\", \"UPPERCASE_VALUE\"),\n (\"SOME_VALUE\", \"SOME_VALUE\"),\n ],\n depends_on=set(),\n )", "def testEnumField_ForwardReference(self):\n global MyMessage\n global ForwardEnum\n global ForwardMessage\n try:\n class MyMessage(messages.Message):\n\n forward = messages.EnumField('ForwardEnum', 1)\n nested = messages.EnumField('ForwardMessage.NestedEnum', 2)\n inner = messages.EnumField('Inner', 3)\n\n class Inner(messages.Enum):\n pass\n\n class ForwardEnum(messages.Enum):\n pass\n\n class ForwardMessage(messages.Message):\n\n class NestedEnum(messages.Enum):\n pass\n\n self.assertEquals(ForwardEnum,\n MyMessage.field_by_name('forward').type)\n\n self.assertEquals(ForwardMessage.NestedEnum,\n MyMessage.field_by_name('nested').type)\n\n self.assertEquals(MyMessage.Inner,\n MyMessage.field_by_name('inner').type)\n finally:\n try:\n del MyMessage\n del ForwardEnum\n del ForwardMessage\n except: # pylint:disable=bare-except\n pass", "def enum(*sequential, **named):\n enums = dict(zip(sequential, range(len(sequential))), **named)\n reverse = dict((value, key) for key, value in list(enums.items()))\n enums['reverse_mapping'] = reverse\n return type(str('Enum'), (), enums)", "def test_inheritance_ordinals():\n # Subclassing non-empty (i.e. concrete) enum is already disallowed;\n # but, rather than disallow access to the concrete property on abstract\n # classes, instead mimic other enum properties, which just return their\n # empty results, and ensure that the getter works correctly.\n class SpinOrderedQuarks(OrderedStrEnum):\n\n UP = 'u'\n DOWN = 'd'\n CHARM = 'c'\n STRANGE = 's'\n TOP = 't'\n BOTTOM = 'b'\n\n # Test base first to challenge cache inheritance\n base_ordinals = OrderedStrEnum.__member_ordinals__\n tools.eq_(base_ordinals, {})\n\n quark_ordinals = SpinOrderedQuarks.__member_ordinals__\n tools.assert_true(quark_ordinals)\n tools.assert_is_instance(quark_ordinals, dict)\n tools.assert_not_equal(quark_ordinals, base_ordinals)", "def __invert__(self):\n return self.negated()", "def __invert__(cls):\n try:\n return cls.__inverse__\n except:\n # TODO: more descriptive\n raise err.VinoError('no inverse class was set')", "def list_reverse_enum(string):\n for index in reversed(xrange(len(string))):\n yield index, string[index]", "def opposite(self):\n if self.direction == 8: return Direction(8)\n n = self.direction + 4\n if n >= 8: n -= 8\n return Direction(n)", "def reverse(self): # real signature unknown; restored from __doc__\n pass", "def __invert__(self) -> Seq:\n return self.reverse_complement()", "def on_enum_parse(self, ctx):\n return None" ]
[ "0.6425592", "0.6408745", "0.585184", "0.58211386", "0.5819746", "0.5771788", "0.5760759", "0.5676922", "0.56572753", "0.56236005", "0.5623336", "0.56230915", "0.5613023", "0.55931896", "0.5510386", "0.5510211", "0.55055887", "0.54664016", "0.54496217", "0.54444015", "0.5390334", "0.5369443", "0.53691167", "0.5356327", "0.5356006", "0.5354796", "0.5349598", "0.5346305", "0.531686", "0.5304957" ]
0.79641455
0
Count the number of occurrences of "A", "C", "G" and "T" in dna_string.
def n_count(dna_string): a_count = 0 c_count = 0 g_count = 0 t_count= 0 for nuc in dna_string: if nuc.upper() == 'A': a_count += 1 elif nuc.upper() == 'C': c_count += 1 elif nuc.upper() == 'G': g_count += 1 elif nuc.upper() == 'T': t_count += 1 else: continue print(a_count, c_count, g_count, t_count)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def count_nucleotides(dna, nucleotide):\n count = 0\n for char in dna:\n if char == nucleotide:\n count += 1\n return count", "def count_nucleotides(dna, nucleotide):\n return dna.count(nucleotide)", "def test_counts(self):\n # test DNA seq\n orig = \"AACCGGTTAN-T\"\n seq = self.DNA(orig)\n # no gaps, no ambiguities\n got = seq.counts()\n expect = dict(A=3, C=2, G=2, T=3)\n self.assertEqual(dict(got), expect)\n # gaps allowed\n got = seq.counts(allow_gap=True)\n expect = dict(A=3, C=2, G=2, T=3)\n expect.update({\"-\": 1})\n self.assertEqual(dict(got), expect)\n # ambig allowed\n got = seq.counts(include_ambiguity=True)\n expect = dict(A=3, C=2, G=2, T=3, N=1)\n self.assertEqual(dict(got), expect)\n # ambig and gap allowed\n got = seq.counts(include_ambiguity=True, allow_gap=True)\n expect = dict(A=3, C=2, G=2, T=3, N=1)\n expect.update({\"-\": 1})\n self.assertEqual(dict(got), expect)\n\n # test DNA seq motif length of 2\n got = seq.counts(motif_length=2)\n expect = dict(AA=1, CC=1, GG=1, TT=1)\n self.assertEqual(dict(got), expect)\n # gap allowed\n got = seq.counts(motif_length=2, allow_gap=True)\n expect = dict(AA=1, CC=1, GG=1, TT=1)\n expect.update({\"-T\": 1})\n # ambig allowed\n got = seq.counts(motif_length=2, include_ambiguity=True)\n expect = dict(AA=1, CC=1, GG=1, TT=1, AN=1)\n self.assertEqual(dict(got), expect)\n # ambig and gap allowed\n got = seq.counts(motif_length=2, include_ambiguity=True, allow_gap=True)\n expect = dict(AA=1, CC=1, GG=1, TT=1, AN=1)\n expect.update({\"-T\": 1})\n self.assertEqual(dict(got), expect)\n\n # test base -- no concept of ambiguity, but understands gap\n orig = \"AACCGGTTAN-T\"\n seq = self.SEQ(orig)\n got = seq.counts()\n expect = dict(A=3, C=2, G=2, T=3, N=1)\n self.assertEqual(dict(got), expect)\n\n # handle '?'\n orig = \"AACCGGTTAN-T?\"\n seq = self.DNA(orig)\n got = seq.counts()\n expect = dict(A=3, C=2, G=2, T=3)\n self.assertEqual(dict(got), expect)\n got = seq.counts(allow_gap=True, include_ambiguity=True)\n expect.update({\"-\": 1, \"N\": 1, \"?\": 1})\n self.assertEqual(dict(got), expect)", "def test_count_gaps(self):\n self.assertEqual(self.RNA(\"\").count_gaps(), 0)\n self.assertEqual(self.RNA(\"ACUGUCAGUACGHSDKCUCDNNS\").count_gaps(), 0)\n self.assertEqual(self.RNA(\"GUACGUACAKDC-SDHDSK\").count_gaps(), 1)\n self.assertEqual(self.RNA(\"-DSHUHDS\").count_gaps(), 1)\n self.assertEqual(self.RNA(\"UACHASADS-\").count_gaps(), 1)\n self.assertEqual(self.RNA(\"---CGAUgCAU---ACGHc---ACGUCAGU---\").count_gaps(), 12)", "def is_valid_sequence(dna):\n num_char = 0\n \n for char in dna:\n if not char in 'ATCG':\n num_char += 1\n\n return num_char == 0", "def count_nucleobases(dnas, nucleobase):\n total_nucleobase = nucleobase + \": \"\n\n for index in range(len(dnas[0])):\n total = 0\n\n for dna in dnas:\n if dna[index] == nucleobase:\n total += 1\n total_nucleobase += str(total) + \" \"\n\n return total_nucleobase", "def count_ambig(curr_seq, valid_chars='ATCG'):\r\n up_seq = curr_seq.upper()\r\n total = 0\r\n for vchar in valid_chars:\r\n total += up_seq.count(vchar)\r\n return len(curr_seq) - total", "def custom_count(string1, search_string):\n count = 0\n for index in range(0, len(string1)):\n phrase = string1[index:index + len(search_string)]\n count += (phrase == search_string)\n return count", "def get_char_counts(string):\n counts = {}\n for char in iter(string):\n counts[char] = 1 if not char in counts.keys() else counts[char] + 1\n return counts", "def test_count_ambig(self):\r\n s = 'ACC'\r\n s2 = 'RNY'\r\n s3 = 'NA'\r\n self.assertEqual(count_ambig(s), 0)\r\n self.assertEqual(count_ambig(s2), 3)\r\n self.assertEqual(count_ambig(s3), 1)\r\n self.assertEqual(count_ambig(''), 0)", "def count_abbas(str):\r\n i = 0\r\n count = 0\r\n for i in range(0, len(str)):\r\n if str.startswith(\"abba\", i):\r\n count += 1\r\n return count", "def get_length(dna):\n return len (dna)", "def count(pattern, string, overlapping=True, sensitive=True, regexp=False):\n return len(SE.findall(pattern, string, overlapping, sensitive, regexp))", "def countingValleys(n, s):\n\n elevation = 0\n valleys = 0\n\n for char in s:\n if char == 'U':\n elevation +=1\n elif char == 'D':\n if elevation == 0:\n valleys += 1\n elevation -= 1\n\n return valleys", "def numberOfSubstrings(self, s: str) -> int:\n i = 0\n res = 0\n d = {c:0 for c in 'abc'}\n \n for j, val in enumerate(s):\n d[val] += 1\n while all(d.values()):\n d[s[i]] -= 1\n i += 1\n res += i\n \n return res", "def letterSpace(text):\n\n count = 0\n alphabet = string.ascii_lowercase + string.ascii_uppercase\n for char in text:\n if char in alphabet:\n count += 1\n return count", "def count_nucleotides(strand: str) -> dict:\n return dict(Counter(strand))", "def pattern_to_number(dna):\n assert (is_dna(dna))\n idx = 'ACGT'\n return sum(idx.index(dna_base) * 4 ** i for i, dna_base in enumerate(dna[::-1]))", "def count_nucleotides(mat):\n\n final_counts = np.ones((4, mat.shape[1]))\n\n for i in range(len(mat[0, :])):\n cur_nucleotides = np.ones((4, 1))\n a_count = 0\n c_count = 0\n g_count = 0\n t_count = 0\n for j in range(len(mat[:, 0])):\n if mat[j, i] == 'A':\n a_count = a_count + 1\n elif mat[j, i] == 'C':\n c_count = c_count + 1\n elif mat[j, i] == 'G':\n g_count = g_count + 1\n elif mat[j, i] == 'T':\n t_count = t_count + 1\n cur_nucleotides = np.array([a_count, c_count, g_count, t_count])\n final_counts[:, i] = cur_nucleotides\n return final_counts", "def getAlphaNumCharCount(sdata):\n\tacount = 0\n\tncount = 0\n\tscount = 0\n\tocount = 0\n\tassertEqual(type(sdata), str, \"input must be string\")\n\tfor c in sdata:\n\t\tif c.isnumeric():\n\t\t\tncount += 1\n\t\telif c.isalpha():\n\t\t\tacount += 1\n\t\telif c.isspace():\n\t\t\tscount += 1\n\t\telse:\n\t\t\tocount += 1\n\tr = (acount, ncount, ocount)\n\treturn r", "def get_length(dna):\n return len(dna)", "def count(self):\n string_count = 0\n string = ['abc', 'xyz', 'aba', '1221']\n for elements in string:\n length = len(elements) \n if length >= 2:\n if elements[0] == elements[-1]: \n string_count +=1\n print(\"String count :\", string_count)", "def n_content(dna):\n ncount = dna.count('N') + dna.count('n') + \\\n dna.count('X') + dna.count('x')\n if ncount == 0:\n return 0.0\n return float(ncount) / float(len(dna))", "def count_nucleic_acids(self):\n n = 0\n for chain in self.iter_chains():\n n += chain.count_nucleic_acids()\n return n", "def countLetters(inputtedString):\n\n counted = 0\n inBetween = list(inputtedString)\n i = 0\n alphabet = 'abcdefghijklmnopqrstuvwxyz'\n\n while i < len(inBetween):\n if inBetween[i] in alphabet:\n counted += 1\n i += 1\n\n return counted", "def count(seats: List[str]) -> int:\n # Map dimensions\n m = len(seats)\n n = len(seats[0]) if m else 0\n \n count = 0\n \n # Count locations filled with \"#\"\n for i in range(m):\n for j in range(n):\n if seats[i][j] == \"#\":\n count += 1\n\n return count", "def count(self, base):\n return self._dna.count(base)", "def n_neg(seq):\n\n # Convert to all upper case\n seq = seq.upper()\n\n # Check the valiality of sequence\n for aa in seq:\n if aa not in bioinfo_dicts.aa.keys():\n raise RuntimeError(aa + ' is not a valid amino acid.')\n # Count E and D and return Count\n return seq.count('D') + seq.count('E')", "def get_letter_counts(str_):\n return dict(Counter(str_))", "def tally_letters(string):\n output = dict()\n for char in list(string):\n freq = output.get(char, 0)\n output[char]= freq+1\n return output" ]
[ "0.7195303", "0.68514836", "0.6521141", "0.6516966", "0.64682543", "0.6450685", "0.63887256", "0.62657034", "0.62168676", "0.6204442", "0.6142755", "0.6090269", "0.60803914", "0.60583884", "0.6036575", "0.5997457", "0.5997136", "0.5991749", "0.5989443", "0.59876466", "0.5983888", "0.5962644", "0.59622985", "0.5960882", "0.5958983", "0.595663", "0.5950293", "0.5949216", "0.59397656", "0.59349614" ]
0.84504104
0
Loads all the spine meshes from the spines directory
def load_spine_meshes(self): # Load all the template spines and ignore the verbose messages of loading nmv.utilities.disable_std_output() self.spine_meshes = nmv.file.load_spines(nmv.consts.Paths.SPINES_MESHES_HQ_DIRECTORY) nmv.utilities.enable_std_output() # Create the material material = nmv.shading.create_material( name='%spine_material', color=self.options.mesh.spines_color, material_type=self.options.mesh.material) # Apply the shader for spine_object in self.spine_meshes: # Apply the shader to each spine mesh nmv.shading.set_material_to_object(spine_object, material)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_meshes(self):\n for meta_mesh in self.gltf.meshes:\n # Returns a list of meshes\n meshes = meta_mesh.load(self.materials)\n self.meshes.append(meshes)\n\n for mesh in meshes:\n self.scene.meshes.append(mesh)", "def add_spines_to_morphology(self):\n\n # A list of the data of all the spines that will be added to the neuron morphology\n spines_list = list()\n\n # Remove the internal samples, or the samples that intersect the soma at the first\n # section and each arbor\n nmv.skeleton.ops.apply_operation_to_morphology_partially(\n *[self.morphology,\n self.options.morphology.axon_branch_order,\n self.options.morphology.basal_dendrites_branch_order,\n self.options.morphology.apical_dendrite_branch_order,\n nmv.skeleton.ops.get_random_spines_on_section,\n self.options.mesh.random_spines_percentage,\n spines_list])\n\n # Keep a list of all the spines objects\n spines_objects = []\n\n # Load all the template spines and ignore the verbose messages of loading\n self.load_spine_meshes()\n\n nmv.logger.info('Cloning and integrating spines')\n building_timer = nmv.utilities.timer.Timer()\n building_timer.start()\n\n # Load the synapses from the file\n number_spines = len(spines_list)\n for i, spine in enumerate(spines_list):\n\n # Show progress\n nmv.utilities.time_line.show_iteration_progress('\\t* Spines', i, number_spines)\n\n # Emanate a spine\n spine_object = self.emanate_spine(spine, i)\n\n # Add the object to the list\n spines_objects.append(spine_object)\n\n # Done\n nmv.utilities.time_line.show_iteration_progress(\n '\\t* Spines', number_spines, number_spines, done=True)\n\n # Report the time\n building_timer.end()\n nmv.logger.info('Spines: [%f] seconds' % building_timer.duration())\n\n # Delete the template spines\n nmv.scene.ops.delete_list_objects(self.spine_meshes)\n\n # Return the spines objects list\n return spines_objects", "def importFolder(self, path, unify=True):\n self.fnames = [f for f in os.listdir(path) if f.endswith('.stl')]\n self.shapes = [AmpObject(os.path.join(path, f), 'limb', unify=unify) for f in self.fnames]\n for s in self.shapes:\n s.lp_smooth(3, brim=True)", "def load_meshes_from(self, med_fname):\n from salome import lcc\n from SMESH import SMESH_Gen\n sstd = self.sstd\n ceng = lcc.FindOrLoadComponent(\"FactoryServer\", \"SMESH\")\n eng = ceng._narrow(SMESH_Gen)\n eng.SetCurrentStudy(sstd)\n cmeshes = eng.CreateMeshesFromMED(med_fname)[0]\n meshes = []\n for cmesh in cmeshes:\n meshes.append(self.attach_mesh_from(cmesh))\n return meshes", "def load_data(self):\n\t\ti = 0\n\n\t\tpaths = glob.glob(self.file_path+'/rollout_*')\n\t\tself.rollouts = []\n\n\n\t\tfor path in paths:\n\t\t\tdata_point = np.load(path,encoding='latin1')\n\t\t\tself.rollouts.append(data_point)\n\n\t\treturn paths", "def _load_forks(self):\n self.forks = self.p_constants[\"NPHILOSOPHERS\"] * [0]\n for i in xrange(self.p_constants[\"NPHILOSOPHERS\"]):\n x, y, angle = self._get_fork_coord(i)\n self.forks[i] = self._load_model(\n \"fork\", scale=[3, 3, 3], pos=[x, y, self.fork_height], H=rad2deg(angle) + 90)", "def __load_geo(self):\n pass\n # process any splines? and turn them into arcs\n # http://www.mathopenref.com/constcirclecenter.html\n # find max dist between points\n # double it\n # select two segments\n # draw normal lines\n # find intersections, that is the center", "def read_shorelines(dir):\n shps = glob('{}/**/*.shp'.format(dir), recursive=True)\n df = gpd.GeoDataFrame(pd.concat([gpd.read_file(f) for f in shps]))\n return df", "def load_splines(fname):\n fname = str(fname)\n fname = abs_fname_(fname)\n\n sr = splinelibpy.Reader()\n\n ext = os.path.splitext(fname)[1]\n \n if ext == \".iges\":\n loaded_splines = sr.read_iges(fname)\n elif ext == \".xml\":\n loaded_splines = sr.read_xml(fname)\n elif ext == \".itd\":\n loaded_splines = sr.read_irit(fname)\n else:\n raise ImportError(\n \"We can only import < .iges | .xml | .itd > spline files\"\n )\n\n splines = []\n # Format s => [weights, degrees, knot_vectors, control_points]\n for s in loaded_splines:\n if s[0] is None:\n # Bbspline.\n tmp_spline = BSpline()\n tmp_spline.degrees = s[1]\n tmp_spline.knot_vectors = s[2]\n tmp_spline.control_points = s[3]\n\n splines.append(tmp_spline)\n \n else:\n # Make nurbs\n tmp_spline = NURBS()\n tmp_spline.weights = s[0]\n tmp_spline.degrees = s[1]\n tmp_spline.knot_vectors = s[2]\n tmp_spline.control_points = s[3]\n\n splines.append(tmp_spline)\n\n return splines", "def load_many_images(paths):\r\n \r\n lpop = __g.pop\r\n \r\n for k in __g.keys()[1:]:\r\n lpop(k)\r\n \r\n if type(paths) == str or type(paths) == tuple and len(paths) == 2 and type(paths[0]) == int:\r\n __g[1] = Surface(paths)\r\n elif type(paths) == list:\r\n for p in range(1, len(paths) + 1):\r\n __g[p] = Surface(paths[p-1])", "def loadMeltCurves(self, fluorescenceFilepath, contentsMapFilepath):\n #populates the relevant instance variables for the analysis\n self.name = fluorescenceFilepath\n self.plate = DSFPlate(fluorescenceFilepath, contentsMapFilepath)\n self.wells = self.plate.wells\n self.originalPlate = DSFPlate(fluorescenceFilepath, contentsMapFilepath)\n self.removeOutliers()\n self.findMeanCurves()\n return", "def load(self):\n self.index = nmslib.init(method='hnsw', space='cosinesimil')\n self.index.loadIndex(c.index_path('hnsw.index'))\n self.ys = joblib.load(\"%s.ys\" % self.index_file_prefix)", "def _load_all_cubes(self, files_to_load):\n if self.process_workers > 1:\n arguments = [[self, load_file] for load_file in files_to_load]\n pool = multiprocessing.Pool(processes=self.process_workers)\n try:\n all_cubelists = pool.map(run_load_file, arguments)\n pool.close()\n pool.join()\n except KeyboardInterrupt:\n pool.terminate()\n else:\n all_cubelists = []\n for load_file in files_to_load:\n cubelist = self._load_file(load_file)\n if cubelist:\n all_cubelists.append(cubelist)\n \n all_cubes = []\n for cubelist in all_cubelists:\n for cube in cubelist:\n all_cubes.append(cube)\n\n if len(all_cubes) == 0:\n raise UserWarning('No data loaded.')\n \n # Gather universal information from the first cube.\n if self.xy_coords is None:\n self.xy_coords = [coord.name() \n for coord in get_xy_coords(\n all_cubes[0])]\n if self._area_inst.bounds_range is None:\n self._area_inst.bounds_range = self._area_inst.\\\n get_cube_area_bounds(all_cubes[0],\n self.xy_coords)\n if self.area_bounds is None:\n self.area_bounds = self._area_inst.get_cube_area_bounds(\n all_cubes[0],\n self.xy_coords)\n self.time_unit = all_cubes[0].coord(self.time_coord).units\n \n return iris.cube.CubeList(all_cubes)", "def load_from_file(self):\n with open(self.filename) as infile:\n for x, line in enumerate(infile):\n for y, c in enumerate(line):\n if c == path_char:\n self.paths.append(Position(y * size_sprite, x * size_sprite))\n elif c == start_char:\n self.start = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x * size_sprite))\n elif c == end_char:\n self.end = Position(y * size_sprite, x * size_sprite)\n self.paths.append(Position(y * size_sprite, x * size_sprite))\n elif c == '0':\n self.wall0.append(Position(y * size_sprite, x * size_sprite))\n elif c == '1':\n self.wall1.append(Position(y * size_sprite, x * size_sprite))\n elif c == '2':\n self.wall2.append(Position(y * size_sprite, x * size_sprite))\n elif c == '3':\n self.wall3.append(Position(y * size_sprite, x * size_sprite))\n elif c == '4':\n self.wall4.append(Position(y * size_sprite, x * size_sprite))\n elif c == '5':\n self.wall5.append(Position(y * size_sprite, x * size_sprite))\n elif c == '6':\n self.wall6.append(Position(y * size_sprite, x * size_sprite))\n elif c == '7':\n self.wall7.append(Position(y * size_sprite, x * size_sprite))\n elif c == '8':\n self.wall8.append(Position(y * size_sprite, x * size_sprite))\n elif c == '9':\n self.wall9.append(Position(y * size_sprite, x * size_sprite))\n # -tc- Le placement aléatoire des objets se fait bien une seule fois,\n # -tc- je ne vois pas de soucis ici\n self.objects_to_find = sample(self.paths, 3)\n # -tc- Ne pas utiliser print pour débugger mais un debugger\n print(self.paths)\n\n # -tc- return inutile et pas utilisé. Ce n'est pas comme cela qu'on procède pour retourner \n # -tc- plusieurs valeurs.\n return self.paths and self.wall0 and self.wall1 and self.wall2 and self.wall3 and self.wall4 and self.wall5 and self.wall6 and self.wall7 and self.wall8 and self.wall9 and self.objects_to_find and self.start and self.end", "def load(gmshfile, scale, dx, dy, dz):\n\n # noinspection PyPep8Naming,PyShadowingNames\n def getAveNormals(nodes, elems):\n \"\"\"Calcula las normales promedio por cada vertice\"\"\"\n nodetrilist = []\n for nodenum in range(len(nodes)):\n nodetrilist.append([])\n for elemnum in range(len(elems)):\n if nodenum in elems[elemnum]:\n nodetrilist[nodenum].append(elemnum)\n avenorms = []\n for tri in nodetrilist:\n aveNi = 0.0\n aveNj = 0.0\n aveNk = 0.0\n denom = max(float(len(tri)), 1)\n for elem in tri:\n vert1 = [nodes[elems[elem][0]][0], nodes[elems[elem][0]][1],\n nodes[elems[elem][0]][2]]\n vert2 = [nodes[elems[elem][1]][0], nodes[elems[elem][1]][1],\n nodes[elems[elem][1]][2]]\n vert3 = [nodes[elems[elem][2]][0], nodes[elems[elem][2]][1],\n nodes[elems[elem][2]][2]]\n normals = getNormals(vert1, vert2, vert3)\n aveNi += normals[0]\n aveNj += normals[1]\n aveNk += normals[2]\n avenorms.append([aveNi / denom, aveNj / denom, aveNk / denom])\n return avenorms\n\n # noinspection PyPep8Naming\n def getNormals(vertA, vertB, vertC):\n \"\"\"Calcula las normales por cada 3 vertices\"\"\"\n xA = vertA[0]\n xB = vertB[0]\n xC = vertC[0]\n yA = vertA[1]\n yB = vertB[1]\n yC = vertC[1]\n zA = vertA[2]\n zB = vertB[2]\n zC = vertC[2]\n ABx = xB - xA\n ABy = yB - yA\n ABz = zB - zA\n BCx = xC - xB\n BCy = yC - yB\n BCz = zC - zB\n Nx = ABy * BCz - ABz * BCy\n Ny = ABz * BCx - ABx * BCz\n Nz = ABx * BCy - ABy * BCx\n VecMag = math.sqrt(Nx ** 2 + Ny ** 2 + Nz ** 2)\n Ni = Nx / VecMag\n Nj = Ny / VecMag\n Nk = Nz / VecMag\n return [Ni, Nj, Nk]\n\n # Lee el archivo\n try:\n infile = open(gmshfile)\n except:\n raise Exception(\"el archivo del modelo no existe\")\n\n # Crea el modeo\n try:\n gmshlines = infile.readlines()\n readnodes = False\n readelems = False\n skipline = 0\n elems = []\n lnum = 0\n nnodes = 0\n for line in gmshlines:\n if \"$Nodes\" in line:\n readnodes = True\n skipline = 2\n nnodes = int(gmshlines[lnum + 1].strip())\n nodes = []\n for i in range(nnodes):\n nodes.append(99999.9)\n elif \"$EndNodes\" in line:\n readnodes = False\n skipline = 1\n elif \"$Elements\" in line:\n readelems = True\n skipline = 2\n elif \"$EndElements\" in line:\n readelems = False\n skipline = 1\n if skipline < 1:\n if readnodes:\n nXYZ = line.strip().split()\n nodenum = int(nXYZ[0]) - 1\n nX = float(nXYZ[1]) * scale + dx\n nY = float(nXYZ[2]) * scale + dy\n nZ = float(nXYZ[3]) * scale + dz\n if neg_normal:\n nZ *= -1\n nodes[nodenum] = [nX, nY, nZ]\n elif readelems:\n n123 = line.split()\n if n123[1] == \"2\":\n n1 = int(n123[-3]) - 1\n n2 = int(n123[-1]) - 1\n n3 = int(n123[-2]) - 1\n elems.append([n1, n2, n3])\n else:\n skipline -= 1\n lnum += 1\n triarray = []\n normarray = []\n avenorms = []\n nodeavenorms = getAveNormals(nodes, elems)\n for elem in elems:\n vert1 = [nodes[elem[0]][0], nodes[elem[0]][1],\n nodes[elem[0]][2]]\n vert2 = [nodes[elem[1]][0], nodes[elem[1]][1],\n nodes[elem[1]][2]]\n vert3 = [nodes[elem[2]][0], nodes[elem[2]][1],\n nodes[elem[2]][2]]\n avenorm0 = nodeavenorms[elem[0]]\n avenorm1 = nodeavenorms[elem[1]]\n avenorm2 = nodeavenorms[elem[2]]\n normals = getNormals(vert1, vert2, vert3)\n triarray.append(vert1)\n triarray.append(vert2)\n triarray.append(vert3)\n normarray.append(normals)\n normarray.append(normals)\n normarray.append(normals)\n avenorms.append(avenorm0)\n avenorms.append(avenorm1)\n avenorms.append(avenorm2)\n return triarray, normarray, avenorms\n\n except:\n raise Exception(\"error al cargar el modelo\")", "def load_paths(self, paths):\n paths = list(str(p) for p in paths)\n\n # This is where more cleverness will go if/when needed.\n\n return SimpleFitsCollection(\n paths,\n hdu_index=self.hdu_index,\n blankval=self.blankval,\n )", "def load_gloves(self, dir):\n self.word2vec = {}\n glove_file = os.path.join(dir, 'glove.6B.'+str(self.dim_embed)+'d.txt')\n with open(glove_file, encoding=\"utf8\") as f:\n for line in f:\n l = line.split()\n self.word2vec[l[0]] = [float(x) for x in l[1:]]\n self.word2vec[\"<RARE>\"] = [0. for i in range(self.dim_embed)]\n self.word2vec[\"<EMPTY>\"] = [0. for i in range(self.dim_embed)]", "def load_poems(self):\n file = open(self.name, \"r\")\n content = file.readlines()\n for i in content:\n self.add_msg_and_index(i.strip())", "def load_slurm_folder(p):\n filter_function = lambda f: True if \".out\" in f else False\n slurm_dict = {\"runs\": []}\n for f in filter(filter_function, os.listdir(p)):\n slurm_dict[\"runs\"].append(load_slurm_data(os.path.join(p, f)))\n exit(\"Success!\")", "def get_meshes(path='../../../models', cutoff=None):\r\n\r\n bodies = collections.deque()\r\n for file_name in os.listdir(path):\r\n try:\r\n mesh = trimesh.load(os.path.join(path, file_name))\r\n split = mesh.split()\r\n bodies.extend(split)\r\n if len(split) > 1:\r\n bodies.append(mesh)\r\n except BaseException:\r\n continue\r\n\r\n if cutoff is not None and len(bodies) > cutoff:\r\n return np.array(bodies)\r\n\r\n for _i in range(100):\r\n cylinder = trimesh.creation.cylinder(\r\n radius=np.random.random() * 100,\r\n height=np.random.random() * 1000,\r\n sections=int(np.clip(np.random.random() * 720,\r\n 20,\r\n 720)))\r\n\r\n capsule = trimesh.creation.capsule(\r\n radius=np.random.random() * 100,\r\n height=np.random.random() * 1000,\r\n count=np.clip(np.random.random(2) * 720,\r\n 20,\r\n 720).astype(int))\r\n bodies.append(cylinder)\r\n bodies.append(capsule)\r\n for _i in range(10):\r\n bodies.append(trimesh.creation.random_soup(\r\n int(np.clip(np.random.random() * 1000,\r\n 20,\r\n 1000))))\r\n bodies.append(trimesh.creation.icosphere())\r\n bodies.append(trimesh.creation.uv_sphere())\r\n bodies.append(trimesh.creation.icosahedron())\r\n\r\n return np.array(bodies)", "def load_senzory_locations(file_name):\n check_file_existence(file_name)\n _, ext = os.path.splitext(file_name)\n if ext == '.mat':\n return load_senzory_locations_from_matlab(file_name)\n elif ext == '.csv':\n return load_senzory_locations_from_csv(file_name)\n else:\n raise ValueError(\"Unknown file type at {}. Expected .mat or .csv\".format(file_name))", "def _load_sources(self):\n self.point_sources= []\n if os.path.exists(os.path.join(self.folder,'pickle.zip')):\n pzip = zipfile.ZipFile(os.path.join(self.folder,'pickle.zip'))\n files = ['pickle/HP12_%04d.pickle' %i for i in range(1728)]\n assert all(f in pzip.namelist() for f in files), 'Improper model zip file'\n opener = pzip.open\n else:\n files = glob.glob(os.path.join(self.folder, 'pickle', '*.pickle'))\n files.sort()\n opener = open\n self.nside = int(np.sqrt(len(files)/12))\n if len(files) != 12*self.nside**2:\n msg = 'Number of pickled ROI files, %d, found in folder %s, not consistent with HEALpix' \\\n % (len(files),os.path.join(self.folder, 'pickle'))\n raise Exception(msg)\n \n ####self.global_sources = sources.GlobalSourceList() # allocate list to index parameters for global sources\n self.extended_sources=[] # list of unique extended sources\n self.changed=set() # to keep track of extended models that are different from catalog\n moved=0\n nfreed = 0\n self.tagged=set()\n source_names =[]\n for i,file in enumerate(files):\n p = pickle.load(opener(file))\n index = int(os.path.splitext(file)[0][-4:])\n assert i==index, 'logic error: file name %s inconsistent with expected index %d' % (file, i)\n roi_sources = p.get('sources', {}) # don't know why this needed\n extended_names = {} if (self.__dict__.get('extended_catalog') is None) else self.extended_catalog.names\n for key,item in roi_sources.items():\n if key in extended_names: continue\n if key in source_names:\n #if not self.quiet: print ('SkyModel warning: source with name %s in ROI %d duplicates previous entry: ignored'%(key, i))\n continue\n source_names.append(key)\n skydir = item['skydir']\n if self.update_positions is not None:\n ellipse = item.get('ellipse', None)\n ts = item['ts']\n if ellipse is not None and not np.any(np.isnan(ellipse)) :\n fit_ra, fit_dec, a, b, ang, qual, delta_ts = ellipse\n if qual<5 and a < 0.2 and \\\n ts>self.update_positions and delta_ts>0.1:\n skydir = SkyDir(float(fit_ra),float(fit_dec))\n moved +=1\n self.tagged.add(i)\n \n ps = sources.PointSource(name=key,\n skydir=skydir, model= sources.convert_model(item['model']),\n ts=item['ts'],band_ts=item['band_ts'], index=index)\n if sources.validate(ps,self.nside, self.filter):\n self._check_position(ps) # check that it is not coincident with previous source(warning for now?)\n self.point_sources.append( ps)\n # make a list of extended sources used in the model \n names = p.get('diffuse_names')\n for name, oldmodel in zip(names, p['diffuse']):\n model = sources.convert_model(oldmodel) # convert from old Model version if necessary \n key = name.split('_')[0]\n if key in self.diffuse_dict:\n self.diffuse_dict.add_model(index, name, model)\n elif self.extended_catalog_name=='ignore': \n continue\n else:\n try:\n es = self.extended_catalog.lookup(name) if self.extended_catalog is not None else None\n except Exception as msg:\n print ('Skymodel: Failed to create model for %s' %name)\n raise\n if es is None:\n #raise Exception( 'Extended source %s not found in extended catalog' %name)\n print ('SkyModel warning: Extended source %s not found in extended catalog, removing' %name)\n continue\n if self.hpindex(es.skydir)!=index: continue\n \n if es.model.name!=model.name:\n if name not in self.changed:\n if not self.quiet: print ('SkyModel warning: catalog model %s changed from %s for source %s: keeping change'%\\\n (es.model.name, model.name, name))\n self.changed.add(name)\n es.smodel=es.model=model #update with current fit values always\n if sources.validate(es,self.nside, self.filter): #lambda x: True): \n self.extended_sources.append(es)\n # check for new extended sources not yet in model\n self._check_for_extended()\n if self.update_positions and moved>0:\n print ('updated positions of %d sources, healpix ids in tagged' % moved)", "def load_poses(self):\n print('Loading poses for sequence ' + self.sequence + '...')\n\n pose_file = os.path.join(self.pose_path, self.sequence + '.txt')\n\n # Read and parse the poses\n try:\n self.T_w_cam0 = []\n with open(pose_file, 'r') as f:\n for line in f.readlines():\n T = np.fromstring(line, dtype=float, sep=' ')\n T = T.reshape(3, 4)\n T = np.vstack((T, [0, 0, 0, 1]))\n self.T_w_cam0.append(T)\n print('done.')\n\n except FileNotFoundError:\n print('Ground truth poses are not avaialble for sequence ' +\n self.sequence + '.')", "def load_vecs():\n global VECTORIZER\n global CECTORIZER\n\n v_file = os.path.join(TMP_DIR, 'vectorizer.pickle')\n d_file = os.path.join(TMP_DIR, 'dectorizer.pickle')\n f_file = os.path.join(TMP_DIR, 'freq.pickle')\n\n if os.path.isfile(v_file) and os.path.isfile(d_file):\n with open(v_file, 'rb') as f:\n VECTORIZER = pickle.load(f)\n with open(d_file, 'rb') as f:\n CECTORIZER = pickle.load(f)\n return True\n\n return False", "def test_load_system(self, name, file_name):\n json_file = os.path.join(self.geometry_dir, file_name)\n system = MultiBodySystem.from_json(json_file)", "def load_boxes(self, data):\r\n\r\n # worldbox represents the total map area\r\n self.worldbox = self.Box((0, 0), (len(data[0]) * self.cellwidth, len(data) * self.cellwidth))\r\n\r\n # create a box corresponding to each character/cell in the map file\r\n tl_x = 0\r\n tl_y = 0\r\n for row in data:\r\n for cell in row:\r\n if cell == \".\":\r\n self.wallboxes += [self.Box((tl_x, tl_y), (tl_x + self.cellwidth, tl_y + self.cellwidth))]\r\n elif cell == \"x\":\r\n self.targetboxes += [self.Box((tl_x, tl_y), (tl_x + self.cellwidth, tl_y + self.cellwidth))]\r\n tl_x += self.cellwidth\r\n tl_x = 0\r\n tl_y += self.cellwidth", "def load_all_from_path(self, path):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\t#111: handle expanded paths\n\t\tpath = os.path.abspath(path)\n\t\t#http://stackoverflow.com/questions/301134/dynamic-module-import-in-python\n\t\tif os.path.abspath(path) == self.shutit_main_dir:\n\t\t\treturn\n\t\tif not os.path.exists(path):\n\t\t\treturn\n\t\tif os.path.exists(path + '/STOPBUILD') and not self.build['ignorestop']:\n\t\t\tself.log('Ignoring directory: ' + path + ' as it has a STOPBUILD file in it. Pass --ignorestop to shutit run to override.',level=logging.DEBUG)\n\t\t\treturn\n\t\tfor sub in glob.glob(os.path.join(path, '*')):\n\t\t\tsubpath = os.path.join(path, sub)\n\t\t\tif os.path.isfile(subpath):\n\t\t\t\tself.load_mod_from_file(subpath)\n\t\t\telif os.path.isdir(subpath):\n\t\t\t\tself.load_all_from_path(subpath)", "def load_and_shape_data(self, path_to_load):\n\n # Initialize the dictionary for the loaded files\n loaded_file = {}\n if '.csv' in path_to_load:\n loaded_file[self.mode_name] = load_file(path_to_load)\n else:\n files_to_load = get_paths(path_to_load, ext='')\n # Load files and get names without file extension or directory\n for f in files_to_load:\n f_name = f.split('/')[-1].split('.')[0]\n if f_name in self.required_files or f_name in self.full_roi_list:\n loaded_file[f_name] = load_file(f)\n\n # Initialize matrices for features\n shaped_data = {}.fromkeys(self.required_files)\n for key in shaped_data:\n shaped_data[key] = np.zeros(self.required_files[key])\n\n # Populate matrices that were no initialized as []\n for key in shaped_data:\n if key == 'structure_masks':\n # Convert dictionary of masks into a tensor (necessary for tensorflow)\n for roi_idx, roi in enumerate(self.full_roi_list):\n if roi in loaded_file.keys():\n np.put(shaped_data[key][roi_idx], loaded_file[roi], int(1))#self.num_rois * loaded_file[roi] + roi_idx\n elif key == 'possible_dose_mask':\n np.put(shaped_data[key], loaded_file[key], int(1))\n elif key == 'voxel_dimensions':\n shaped_data[key] = loaded_file[key]\n else: # Files with shape\n np.put(shaped_data[key], loaded_file[key]['indices'], loaded_file[key]['data'])\n\n return shaped_data", "def populate_memes() -> None:\n\n for i in range(len(GD_MEMES)):\n GD_MEMES[i][0] = os.path.join(GD_PATH, GD_MEMES[i][0])", "def load_path(path: str) -> List[object]:\n if not os.path.isdir(path):\n raise ValueError(\"{} is not a directory\".format(path))\n\n objs: List[object] = list()\n for file_name in os.listdir(path):\n if os.path.splitext(file_name)[1].lower() in [\".yaml\", \".yml\"]:\n objs = objs + load_file(os.path.join(path, file_name))\n return objs" ]
[ "0.6232682", "0.61770344", "0.60120726", "0.5890612", "0.58509433", "0.5817586", "0.56453604", "0.5583689", "0.5582022", "0.5480866", "0.5473984", "0.54697216", "0.54214287", "0.54079974", "0.5405352", "0.5390358", "0.5379387", "0.53608745", "0.5338698", "0.5331834", "0.53287417", "0.53268695", "0.5312694", "0.5312554", "0.53122795", "0.5291602", "0.528979", "0.5280207", "0.52723175", "0.52444786" ]
0.8019577
0
Emanates a spine at a random position on the dendritic tree.
def emanate_spine(self, spine, id): # Select a random spine from the spines list spine_template = random.choice(self.spine_meshes) # Get a copy of the template and update it spine_object = nmv.scene.ops.duplicate_object(spine_template, id) # Rename the spine spine_object.name = '%s_spine_%d' % (self.options.morphology.label, id) # Scale the spine spine_scale = spine.size * random.uniform(1.25, 1.5) nmv.scene.ops.scale_object_uniformly(spine_object, spine_scale) # Translate the spine to the post synaptic position nmv.scene.ops.set_object_location(spine_object, spine.post_synaptic_position) # Rotate the spine towards the pre-synaptic point nmv.scene.ops.rotate_object_towards_target( spine_object, Vector((0, 0, -1)), spine.pre_synaptic_position * (1 if random.random() < 0.5 else -1)) # Adjust the shading nmv.shading.adjust_material_uv(spine_object, 5) # Return a reference to the spine return spine_object
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mutate_point_trig(mutated_genome):\n seed = random.randint(0,1)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if seed == 0:\n move_point_trig(mutated_genome,index)\n else: #seed == 1:\n shift_point_trig(mutated_genome,index)", "def position_from_seed(seed):\n random.seed(seed)\n ascii_character_sum = sum(bytearray(seed, \"utf8\")) # Sums the ASCII values of every character\n offset = random.randint(1, 100)\n start_position = (math.log(ascii_character_sum / 100) + offset, math.log(ascii_character_sum / 100) + offset)\n end_positon = (start_position[0] + 100, start_position[1] + 100)\n square_position = (start_position, end_positon)\n print(square_position)\n \n return square_position", "def mutate_point_poly(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 3: seed = 0\n if seed == 0:\n insert_point(mutated_genome,index)\n elif seed == 1:\n remove_point(mutated_genome,index)\n elif seed == 2:\n switch_points(mutated_genome,index)\n elif seed == 3:\n shuffle_points(mutated_genome,index)\n elif seed == 4:\n move_point(mutated_genome,index)\n elif seed == 5:\n shift_point(mutated_genome,index)\n elif seed == 6:\n increment_point(mutated_genome,index)\n else: #seed == 7:\n decrement_point(mutated_genome,index)", "def spine(self):", "def random_move(turtle, distance):\n angle = uniform(-90,90)\n d = uniform(0,distance)\n turtle.left(angle)\n turtle.forward(d)", "def rand_place_Ez(i):\n side = rnd.random()\n if side < 0.25:\n Ez_positions[i], Ez_angles[i] = (0,rnd.randrange(0,N,1)),rnd.randrange(-180, 180)*np.pi/180\n elif side < 0.5:\n Ez_positions[i], Ez_angles[i] = (N,rnd.randrange(0,N,1)),rnd.randrange(-180, 180)*np.pi/180\n elif side < 0.75:\n Ez_positions[i], Ez_angles[i] = (rnd.randrange(0,N,1),0),rnd.randrange(-180, 180)*np.pi/180\n else:\n Ez_positions[i], Ez_angles[i] = (rnd.randrange(0,N,1),1),rnd.randrange(-180, 180)*np.pi/180\n return", "def getRandomCoordinates( self, size ):\n if not self.mIsLoaded: self.__loadIndex()\n\n token = random.choice( self.mIndex.keys() ) \n strand = random.choice( (\"+\", \"-\") )\n pos_id, pos_seq, lcontig = self.mIndex[token][:3]\n rpos = random.randint( 0, lcontig )\n if random.choice( (\"True\", \"False\") ):\n start = rpos\n end = min(rpos + size, lcontig)\n else:\n start = max(0, rpos - size)\n end = rpos\n \n return token, strand, start, end", "def place_entrance(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__current_room = x, y # places adventurer in dungeon at start of game\r\n self.__entrance_room = x, y\r\n self.__maze[x][y].set_entrance(True)", "def mutate_point_rect(mutated_genome):\n seed = random.randint(0,1)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if seed == 0:\n move_point_rect(mutated_genome,index)\n else: #seed == 1:\n shift_point_rect(mutated_genome,index)", "def set_random_pos(self, which):\n available = [[r, c] for r, row in enumerate(self.maze)\n for c, value in enumerate(row) if value == ' ']\n choice = random.choice(available)\n if which == 'starting':\n self.current_pos = choice\n elif which == 'finishing':\n self.finish_pos = choice", "def RandomCoordinate(): \r\n return ReturnRounded(np.random.uniform(-10,10))", "def square_diamond(sx, sy, size, strong):\n if size == 1:\n return\n\n dsize = size/2\n ex = sx+size-1\n ey = sy+size-1\n # lets get math style\n\n\n # SQUARE STEP\n\n A = sx, sy\n B = ex, sy\n C = sx, ey\n D = ex, ey\n E = sx+dsize, sy+dsize\n F = sx, sy + dsize\n G = sx + dsize, sy\n H = ex, sy + dsize\n I = sx + dsize, ey\n\n def RAND(X):\n return random.randint(-strong, strong)\n\n ### for coasts dont disappear\n\n def normalize(add_z, X):\n if self[X] <= 0:\n if add_z > 0:\n add_z = -5\n else:\n if add_z <= 0:\n add_z = 5\n return add_z\n\n # Generate heights\n # E = (A+B+C+D) / 4 + RAND(d)\n # F = (A + C + E + E) / 4 + RAND(d)\n # G = (A + B + E + E) / 4 + RAND(d)\n # H = (B + D + E + E) / 4 + RAND(d)\n # I = (C + D + E + E) / 4 + RANS(d)\n\n ### E\n\n try:\n\n add_z = ((self[A] + self[B] + self[C] + self[D]) / 4) + RAND(E)\n\n except KeyError, e:\n print A, B, C, D, size, dsize, len(self)\n raise e\n\n\n self[E] = normalize(add_z, E)\n\n ### F\n\n add_z = (self[A] + self[C] + self[E] + self[E]) / 4 + RAND(F)\n\n self[F] = normalize(add_z, F)\n\n ### G\n\n add_z = (self[A] + self[B] + self[E] + self[E]) / 4 + RAND(G)\n\n self[G] = normalize(add_z, G)\n\n ### H\n\n add_z = (self[B] + self[D] + self[E] + self[E]) / 4 + RAND(H)\n\n self[H] = normalize(add_z, H)\n\n ### I\n add_z = (self[C] + self[D] + self[E] + self[E]) / 4 + RAND(I)\n\n self[I] = normalize(add_z, I)\n\n\n # DIAMOND STEP\n\n # get coordinates\n # 0 - x, 1 - y\n\n x, y = 0, 1\n\n dx = (G[x] - A[x]) / 2\n dy = (F[y] - A[y]) / 2\n\n J = A[x] + dx, A[y] + dy\n K = G[x] + dx, G[y] + dy\n L = F[x] + dx, F[y] + dy\n M = E[x] + dx, E[y] + dy\n\n N = A[x], A[y] + dy\n O = A[x] + dx, A[y]\n P = G[x], G[y] + dy\n Q = A[x] + dx, F[y]\n\n # Generate Heights\n # J = (A + G + F + E)/4 + RAND(d)\n # K = (G + B + E + H)/4 + RAND(d)\n # L = (F + E + C + I)/4 + RAND(d)\n # M = (E + H + I + D)/4 + RAND(d)\n\n # J\n add_z = ((self[A] + self[G] + self[F] + self[E]) / 4) + RAND(J)\n self[J] = normalize(add_z, J)\n\n # K\n add_z = ((self[G] + self[B] + self[E] + self[H]) / 4) + RAND(K)\n self[K] = normalize(add_z, K)\n\n # L\n add_z = ((self[F] + self[E] + self[C] + self[I]) / 4) + RAND(L)\n self[L] = normalize(add_z, L)\n\n # M\n add_z = ((self[E] + self[H] + self[I] + self[D]) / 4) + RAND(M)\n self[M] = normalize(add_z, M)\n\n # N = (K + A + J + F)/4 + RAND(d)\n # O = (L + A + G + J)/4 + RAND(d)\n # P = (J + G + K + E)/4 + RAND(d)\n # Q = (F + J + E + L)/4 + RAND(d)\n\n # N\n add_z = ((self[K] + self[A] + self[J] + self[F]) / 4) + RAND(N)\n self[N] = normalize(add_z, N)\n\n # O\n add_z = ((self[L] + self[A] + self[G] + self[J]) / 4) + RAND(O)\n self[O] = normalize(add_z, O)\n\n # P\n add_z = ((self[J] + self[G] + self[K] + self[E]) / 4) + RAND(P)\n self[P] = normalize(add_z, P)\n\n # Q\n add_z = ((self[F] + self[J] + self[E] + self[L]) / 4) + RAND(Q)\n self[Q] = normalize(add_z, Q)\n\n # N = (A + J + F)/3 + RAND(d)\n # O = (A + G + J)/3 + RAND(d)\n\n # N\n add_z = ((self[A] + self[J] + self[F]) / 3) + RAND(N)\n self[N] = normalize(add_z, N)\n\n # O\n add_z = ((self[A] + self[G] + self[J]) / 3) + RAND(N)\n self[O] = normalize(add_z, O)\n\n\n ### Start recurse for diamond alg\n square_diamond(A[0], A[1], dsize, strong)\n square_diamond(G[0], G[1], dsize, strong)\n square_diamond(F[0], F[1], dsize, strong)\n square_diamond(E[0], E[1], dsize, strong)", "def move_to_random_pos(self):\n newpos = [(np.random.rand() - 0.5) * 0.1,\n (np.random.rand() - 0.5) * 0.1,\n np.random.rand() * 0.9 + 0.2]\n self.move_to(newpos)", "def Generate_Random( self ):\n print( 'Generating Random coordinates' )\n stands = self.Data.Stand.keys()\n stands.sort()\n for s in stands:\n trees = self.Data.Stand[s].Tree.keys()\n trees.sort()\n for t in trees:\n self.Data.Stand[s].Tree[t].X = random.uniform( 0, 208.71 )\n self.Data.Stand[s].Tree[t].Y = random.uniform( 0, 208.71 )", "def mutate_point_wline(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 1: seed = 0\n if seed == 0:\n insert_point_wline(mutated_genome,index)\n elif seed == 1:\n remove_point_wline(mutated_genome,index)\n elif seed == 2:\n switch_points_wline(mutated_genome,index)\n elif seed == 3:\n shuffle_points_wline(mutated_genome,index)\n elif seed == 4:\n move_point_wline(mutated_genome,index)\n elif seed == 5:\n shift_point_wline(mutated_genome,index)\n elif seed == 6:\n increment_point_wline(mutated_genome,index)\n else: #seed == 7:\n decrement_point_wline(mutated_genome,index)", "def random_walk(n):\n x,y = 0,0\n for i in range(n):\n step = random.choice(['N','S','E','W'])\n if step == 'N':\n y+=1\n elif step == 'S':\n y-=1\n elif step == 'E':\n x+=1\n else:\n x-=1\n return (x,y)", "def place_pillar_e(self):\r\n x = random.randint(0, (self.__nx - 1))\r\n y = random.randint(0, (self.__ny - 1))\r\n self.__pillar_e = x, y\r\n if self.pillar_e_room() == self.pillar_a_room() or \\\r\n self.pillar_e_room() == self.pillar_i_room() or \\\r\n self.pillar_e_room() == self.pillar_p_room() or \\\r\n self.pillar_e_room() == self.entrance_room() or \\\r\n self.pillar_e_room() == self.exit_room():\r\n return self.place_pillar_e()\r\n self.__maze[x][y].set_pillar_e(True)", "def randLoc(this):\n from temp_aber import randperc, trapch\n\n if randperc() > 50:\n this.locId = -5\n else:\n this.locId = -183\n\n trapch(this.locId)", "def move_point_trig(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n old_points = list(mutated_genome[index][2])\n old_points[random.randint(0,2)] = point\n mutated_genome[index][2] = tuple(old_points)", "def randwalk(n):\n\tx=0;\n\ty=0;\n\n\tfor i in range(n):\n\t\tstep = random.choice(['N','S','E','W'])\n\t\tif step== 'N':\n\t\t\ty=y+1\n\t\telif step=='S':\n\t\t\ty=y-1\n\t\telif step=='E':\n\t\t\tx=x+1\n\t\telse:\n\t\t\tx=x-1\n\treturn (x,y)", "def random_walk(n):\n\tx, y = 0, 0\n\tfor i in range(n):\n\t\tstep = random.choice(['N', 'S', 'E', 'W'])\n\t\tif step == 'N':\n\t\t\ty += 1\n\t\tif step == 'S':\n\t\t\ty -= 1\n\t\tif step == 'E':\n\t\t\tx += 1\n\t\tif step == 'W':\n\t\t\tx -= 1\n\treturn (x, y)", "def _create_random_tetrino(self):\n shape_index = randrange(constant.NUM_SHAPES)\n shape = constant.SHAPES[shape_index]\n shape_locations = self.translate_shape(shape[0], 0, 0)\n num_blocks = len(shape_locations)\n location = self._create_random_offsets(shape_locations)\n new_tetrino = Tetrino(location, shape_index, \\\n num_blocks, self.tetrino_id, self.game_size)\n self.tetrino_set[self.tetrino_id] = new_tetrino\n self.tetrino_id += 1\n return new_tetrino", "def sleeve(self, parent, um, info = \"Patron de T-shirt\"):\n sleeve_attribs = {inkex.addNS('label', 'inkscape'): info,'transform': 'translate(-100,-200)'}\n piece_group = inkex.etree.SubElement(parent, 'g', sleeve_attribs)\n\n # The template main vertexes absolute positions\n vertexes = {\n 'shoulder': (0, 0),\n 'sleeve_middle': (0, um['top_sleeve']-um['under_sleeve']),\n 'armpit': (um['bicep'], um['top_sleeve'] - um['under_sleeve']),\n 'sleeve_top': (0, um['top_sleeve']),\n 'sleeve_bottom': (um['bicep']-0.5*um['ease'],um['top_sleeve']),\n }\n if self.options.grid:\n reference = inkex.etree.SubElement(piece_group, 'g',{inkex.addNS('label', 'inkscape'): info + \"_structure\"})\n draw_svg_line([vertexes['shoulder'],(0, um['top_sleeve']),(um['bicep']-0.5*um['ease'],0)], reference, self.doted_line)\n draw_svg_line([vertexes['sleeve_middle'], (um['bicep'],0)], reference, self.doted_line)\n for name, vertex in vertexes.items():\n draw_svg_circle(self.getunittouu('4mm'), vertex, reference, self.normal_line)", "def _seed(self, seed):\n self.world.seed(seed)", "def mutate_point_circ(mutated_genome):\n seed = random.randint(0,3)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if seed == 0:\n move_point_circ(mutated_genome,index)\n elif seed == 1:\n shift_point_circ(mutated_genome,index)\n elif seed == 2:\n move_radius_circ(mutated_genome,index)\n else: #seed == 3:\n shift_radius_circ(mutated_genome,index)", "def move_point(mutated_genome,index):\n Xval = random.randint(-int(imagewidth/5.),int(imagewidth*6./5.))\n Yval = random.randint(-int(imageheight/5.),int(imageheight*6./5.))\n point = (Xval,Yval)\n point_index = random.randint(0,max(0,len(mutated_genome[index][2])-1))\n mutated_genome[index][2][point_index] = point", "def _get_random_pos_on_a_side(self):\n pass", "def cut_trees(self, )\n\n\n\n def random_spot(x_low, y_low, x_range, y_range):\n x = randint(x_low, x_low + x_range)\n y = randint(y_low, y_low + y_range)\n dur = random.uniform(0.5, 3.0)\n\n return pyautogui.moveTo(x, y, dur)", "def spirala(t):\n t.penup()\n t.setx(random.randrange(-200,200))\n t.sety(random.randrange(-200,200))\n t.pencolor(random.randrange(0,255),random.randrange(0,255),200)\n t.width(random.randrange(2,13))\n t.pendown()\n\n for i in range(120):\n \tt.forward(20+i)\n \tt.left(30 - i/1.5)", "def mutate_point_poly3(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 1: seed = 0\n if seed == 0:\n insert_point(mutated_genome,index)\n elif seed == 1:\n remove_point(mutated_genome,index)\n elif seed == 2:\n switch_points(mutated_genome,index)\n elif seed == 3:\n shuffle_points(mutated_genome,index)\n elif seed == 4:\n move_point(mutated_genome,index)\n elif seed == 5:\n shift_point(mutated_genome,index)\n elif seed == 6:\n increment_point(mutated_genome,index)\n else: #seed == 7:\n decrement_point(mutated_genome,index)" ]
[ "0.56462944", "0.54003584", "0.5304589", "0.5286129", "0.5285182", "0.5238742", "0.521904", "0.5194716", "0.51497424", "0.51151454", "0.50812453", "0.50709474", "0.5054132", "0.50421154", "0.49938712", "0.49918446", "0.49793696", "0.4965727", "0.49629447", "0.49620885", "0.49517375", "0.48856944", "0.4882016", "0.48750204", "0.48735237", "0.48701468", "0.48576313", "0.48530224", "0.4811988", "0.478996" ]
0.60079974
0
Add the spines randomly to the morphology.
def add_spines_to_morphology(self): # A list of the data of all the spines that will be added to the neuron morphology spines_list = list() # Remove the internal samples, or the samples that intersect the soma at the first # section and each arbor nmv.skeleton.ops.apply_operation_to_morphology_partially( *[self.morphology, self.options.morphology.axon_branch_order, self.options.morphology.basal_dendrites_branch_order, self.options.morphology.apical_dendrite_branch_order, nmv.skeleton.ops.get_random_spines_on_section, self.options.mesh.random_spines_percentage, spines_list]) # Keep a list of all the spines objects spines_objects = [] # Load all the template spines and ignore the verbose messages of loading self.load_spine_meshes() nmv.logger.info('Cloning and integrating spines') building_timer = nmv.utilities.timer.Timer() building_timer.start() # Load the synapses from the file number_spines = len(spines_list) for i, spine in enumerate(spines_list): # Show progress nmv.utilities.time_line.show_iteration_progress('\t* Spines', i, number_spines) # Emanate a spine spine_object = self.emanate_spine(spine, i) # Add the object to the list spines_objects.append(spine_object) # Done nmv.utilities.time_line.show_iteration_progress( '\t* Spines', number_spines, number_spines, done=True) # Report the time building_timer.end() nmv.logger.info('Spines: [%f] seconds' % building_timer.duration()) # Delete the template spines nmv.scene.ops.delete_list_objects(self.spine_meshes) # Return the spines objects list return spines_objects
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def insert_bonuses(self):\n segs = random.sample(self.segments, 2)\n\n for s in segs:\n offset = random.randint(-10, 10) / 10.0\n self.add_sprite(s, \"bonus\", offset)", "def random_terrain(self):\n terrain_segments = [] # To hold the list of segments that will be added to the space as a terrain\n\n # Generate the point tuples\n points = [(i, random.randint(self.screen_height // 20, self.screen_height // 7))\n for i in range(0, self.screen_width + SEGMENT_LENGTH, SEGMENT_LENGTH)]\n\n # Loop through the point tuples and populate the 'terrain' list\n for i in range(1, len(points)):\n floor = pm.Segment(self.space.static_body, (points[i - 1][0], points[i - 1][1]),\n (points[i][0], points[i][1]), TERRAIN_THICKNESS)\n floor.friction = TERRAIN_FRICTION\n floor.filter = pm.ShapeFilter(group=0)\n floor.collision_type = 4\n floor.filter = GameScene.border_sf\n terrain_segments.append(floor)\n self.space.add(terrain_segments)", "def emanate_spine(self,\n spine,\n id):\n\n # Select a random spine from the spines list\n spine_template = random.choice(self.spine_meshes)\n\n # Get a copy of the template and update it\n spine_object = nmv.scene.ops.duplicate_object(spine_template, id)\n\n # Rename the spine\n spine_object.name = '%s_spine_%d' % (self.options.morphology.label, id)\n\n # Scale the spine\n spine_scale = spine.size * random.uniform(1.25, 1.5)\n nmv.scene.ops.scale_object_uniformly(spine_object, spine_scale)\n\n # Translate the spine to the post synaptic position\n nmv.scene.ops.set_object_location(spine_object, spine.post_synaptic_position)\n\n # Rotate the spine towards the pre-synaptic point\n nmv.scene.ops.rotate_object_towards_target(\n spine_object, Vector((0, 0, -1)),\n spine.pre_synaptic_position * (1 if random.random() < 0.5 else -1))\n\n # Adjust the shading\n nmv.shading.adjust_material_uv(spine_object, 5)\n\n # Return a reference to the spine\n return spine_object", "def randomize(self):\n \n spins = [np.random.random() > 0.5 for x in range(self.size)]\n self.spins_initial = bitarray.bitarray(spins)", "def add_noise(self):\n self.segments = deepcopy(self.segments)\n # Iterate through each of the first three Segments in the WordForm.\n for i in range(3):\n # Add noise to each Segment.\n self.segments[i].add_noise()", "def load_spine_meshes(self):\n # Load all the template spines and ignore the verbose messages of loading\n nmv.utilities.disable_std_output()\n self.spine_meshes = nmv.file.load_spines(nmv.consts.Paths.SPINES_MESHES_HQ_DIRECTORY)\n nmv.utilities.enable_std_output()\n\n # Create the material\n material = nmv.shading.create_material(\n name='%spine_material', color=self.options.mesh.spines_color,\n material_type=self.options.mesh.material)\n\n # Apply the shader\n for spine_object in self.spine_meshes:\n\n # Apply the shader to each spine mesh\n nmv.shading.set_material_to_object(spine_object, material)", "def fill_list(self):\n for i in range(0, constants.STARTING_WORDS):\n random_word = constants.LIBRARY[random.randint(0, len(constants.LIBRARY) - 1)]\n x = random.randint(1, constants.MAX_X - len(self.get_text()))\n y = random.randint(1, constants.MAX_Y - len(self.get_text()))\n position = Point(x, y)\n self.set_position(position)\n velocity = Point(0, 1)\n self._add_segment(random_word, position, velocity)\n print()", "def add_snowman(self):\n self.scenes[self.current_scene].add_object(Snowman())\n self.redraw()", "def add_random_fields(smali_line):\n for _ in range(u.random_nop_interval()):\n print re.sub(r':', u.get_random(True, 32) + ':', smali_line), # Append", "def random_item_sp(self):\n if random.random() < 0.3:\n self.window.add(self.shorten_paddle, x=self.ball.x+self.objects_length/2, y=self.ball.y)\n self.shorten_paddle_exist = True", "def add_noise(self):\n self.noise = np.random.poisson(lam=self.lam, size=self.im.shape)\n self.im += self.noise\n return", "def drawNewMotifSite(self):\n\t\ttot = float(sum(self.siteScores))\n\t\tsiteProbs = [x/tot for x in self.siteScores]\t# normalize the siteScores\n\t\tassert abs(1.0-sum(siteProbs)) < 0.00001\t\t# check probs sum to 1 (within margin of error)\n\t\t# draw randomly according to this distribution\n\t\tr = random.random()\t\t# returns random uniform[0,1]\n\t\tsite = 0\n\t\tcumulative = siteProbs[site]\n\t\twhile cumulative < r:\n\t\t\tsite += 1\n\t\t\tcumulative += siteProbs[site]\n\t\tself.motif = site", "def add_mines(self):\n for x, y in sample(list(itertools.product(range(self.width), range(self.height))), self.num_mines):\n self.grid[y][x] = self.mine", "def _addNoise(self):\n self.dispNoise = self.dispRaw.copy()\n self.dispNoise[:, 0] += self.sigmaEast * numpy.random.randn(self.numStations)\n self.dispNoise[:, 1] += self.sigmaNorth * numpy.random.randn(self.numStations)\n self.dispNoise[:, 2] += self.sigmaUp * numpy.random.randn(self.numStations)\n return", "def generate(self):\n for i in range(4):\n random_first = randomize_first_box()\n self.randomize(random_first)\n for i in range(9):\n random_pos = randomize_position()\n self.randomize(random_pos)\n self.board.solve()", "def stir(self):\n random.shuffle(self.ingredients)\n return self", "def add_noise(self):\n self.noise = np.random.poisson(lam=self.lam, size=self.image.shape)\n self.image += self.noise\n return", "def generate_lists(self):\n scenelist = self.scenelist\n newbies = self.newbies\n claimlist = [ob for ob in self.claimlist if ob not in newbies]\n choices = self.valid_scene_choices\n num_scenes = self.NUM_SCENES - (len(claimlist) + len(scenelist))\n if num_scenes > 0:\n try:\n scenelist.extend(random.sample(choices, num_scenes))\n except ValueError:\n scenelist.extend(choices)\n scenelist = sorted(scenelist, key=lambda x: x.key.capitalize())\n self.caller.player_ob.db.random_scenelist = scenelist", "def setup(self):\n\n # Create the Sprite lists\n self.sprite_list = arcade.SpriteList()\n\n r = 60\n for x in rand_range(0, 100 * math.pi, scale=math.pi / 5):\n star = arcade.Sprite(\"../../resources/arcade/gold_1.png\")\n star.center_x = SCREEN_WIDTH / 2 + r * math.cos(x)\n star.center_y = SCREEN_HEIGHT / 2 + r * math.sin(x)\n star.seed = scale_generator(x=random() * math.pi, offset=.5, step=.01)\n star.scale = next(star.seed)\n self.sprite_list.append(star)\n r += 3", "def populate_objects(self):\n if not self._random_object: # only populate the first object\n U.spawn_object(self.object_list[0], self.object_initial_position)\n else:\n rand_x = np.random.uniform(low=-0.35, high=0.35, size=(len(self.object_list),))\n rand_y = np.random.uniform(low=2.2, high=2.45, size=(len(self.object_list),))\n for idx, obj in enumerate(self.object_list):\n box_pos = Pose(position=Point(x=rand_x[idx],\n y=rand_y[idx],\n z=1.05))\n U.spawn_object(obj, box_pos)", "def mutate_point_wline(mutated_genome):\n seed = random.randint(0,7)\n index = random.randint(0,max(0,len(mutated_genome)-1))\n if len(mutated_genome[index][2]) < 1: seed = 0\n if seed == 0:\n insert_point_wline(mutated_genome,index)\n elif seed == 1:\n remove_point_wline(mutated_genome,index)\n elif seed == 2:\n switch_points_wline(mutated_genome,index)\n elif seed == 3:\n shuffle_points_wline(mutated_genome,index)\n elif seed == 4:\n move_point_wline(mutated_genome,index)\n elif seed == 5:\n shift_point_wline(mutated_genome,index)\n elif seed == 6:\n increment_point_wline(mutated_genome,index)\n else: #seed == 7:\n decrement_point_wline(mutated_genome,index)", "def trial(length, height):\n screen.refresh()\n global stimList\n global oddLength\n global oddHeight\n currentLength = int(maxLength / 4)\n currentHeight = int(maxHeight / 4)\n for i in range(stimAmt):\n if i == oddLocation:\n oddLength = currentLength\n oddHeight = currentHeight\n stimList.append(\n pg.draw.rect(\n screen.fg,\n PgTools.rand_color(),\n (currentLength, currentHeight, length, height,),\n )\n )\n PgTools.rand_pattern(\n screen.fg,\n (\n currentLength,\n currentHeight,\n ),\n (length, height),\n i=(randint(0, 2), randint(0, 1)),\n )\n if randShapes:\n PgTools.rand_shape(screen.fg, (currentLength, currentHeight),(length, height), oddSeed)\n else:\n stimList.append(\n pg.draw.rect(\n screen.fg,\n color,\n (currentLength, currentHeight, length, height,),\n )\n )\n PgTools.rand_pattern(\n screen.fg,\n (\n currentLength,\n currentHeight,\n ),\n (length, height),\n patColor,\n randNums,\n )\n if randShapes:\n PgTools.rand_shape(screen.fg, (currentLength, currentHeight),(length, height), regSeed)\n currentLength += maxLength / 4\n currentLength = int(currentLength)\n if (i + 1) % 3 == 0:\n currentLength = maxLength / 4\n currentLength = int(currentLength)\n currentHeight += maxHeight / 4\n currentHeight= int(currentHeight)", "def implement_random(self):\n shape = set()\n for coord in INDICES:\n if randint(0, 1):\n shape.add(coord)\n self.implement_shape(shape)", "def random_segs(cls, shape, lemma = None, case = None):\n # For each C or V segment in `shape`, initialize a random Segment of the\n # appropriate type. Initialize a new WordForm with all these Segments.\n return cls([Segment(seg_type = seg) for seg in shape], lemma, case)", "def generate_random_linelist (teff,wv_bounds=(4500,5500),species_params=None,filepath=None):\n abund_offset_range = (-1,1)\n species_offset_range = (-1,1)\n ew_dist_width = 30\n ep_range = (0,12)\n loggf_range = (-6.0,0.5) \n \n theta = 5040.0/teff\n \n # # TODO: remove this calculation???\n # # # fix to a particular line which should be by the turnoff\n # # # Fe I 88.2 2.22 EP -4.2 loggf\n # loggf = -4.2\n # ep = 2.22\n # x_turnoff = abund_standard['Fe']['abundance']+loggf-theta*ep\n # x-x_turnoff = -5\n # \n # based on the model abundance used in the cog file\n xnorm = -6.5\n ynorm = -2.0\n \n # read in the parameters \n if species_params is None:\n species_params = _elements_params\n el_params = species_params.copy()\n for el,pars in _elements_params.items():\n el_params.setdefault(el,pars)\n \n\n coeffs, knots, centers, scales = np.array(cog_ppol_hf[\"coefficients\"]), np.array(cog_ppol_hf[\"knots\"]), np.array(cog_ppol_hf[\"centers\"]), np.array(cog_ppol_hf[\"scales\"])\n iqp = piecewise_polynomial.InvertiblePiecewiseQuadratic(coeffs, knots, centers=centers, scales=scales)\n iqp_deriv = iqp.deriv()\n \n # calc the linelist\n linelist = {}\n element_abund = {}\n for species,pars in list(species_params.items()):\n wvs = np.random.uniform(wv_bounds[0],wv_bounds[1],pars['n'])\n solar_abund_offset = np.random.uniform(*abund_offset_range)\n \n # get the abundance for this element, ignore species\n abund = abund_standard[species]['abundance']+solar_abund_offset\n element_abund.setdefault(abund_standard[species]['element'],abund) \n \n species_offset = np.random.uniform(*species_offset_range) \n species_abund = element_abund[abund_standard[species]['element']]+species_offset\n species_abund = np.repeat(species_abund,pars['n'])\n \n # generate the parameters for the lines\n spe_col = np.repeat(abund_standard.species_id(species),pars['n'])\n ew = np.random.exponential(ew_dist_width,pars['n'])\n ep = np.random.uniform(ep_range[0],ep_range[1],pars['n'])\n loggf = np.random.uniform(loggf_range[0],loggf_range[1],pars['n'])\n \n # calculate the line strengths from the COG\n #x = species_abund + loggf - theta*ep + xnorm\n logrw = np.log10(ew/wvs)\n x = iqp.inverse(logrw-ynorm)\n loggf = species_abund - x - theta*ep + xnorm\n\n # estimate the lorzentian and gaussian widths for this line\n lorz_width = estimate_lorentz_width(x, iqp_deriv)\n gauss_width = np.repeat(99.9,pars['n'])\n \n # add to the linelist\n linelist[species] = np.dstack((wvs,spe_col,ep,loggf,ew,gauss_width,lorz_width))[0]\n \n if filepath is not None:\n # save moog file\n f = open(filepath,'w')\n header = \"# Fake linelist created THIMBLES with teff {} # \"\n header += \"wvs species ep loggf ew gauss_width lorz_width # \"\n header += \"guassian and lorentzian widths are estimate\\n\"\n f.write(header.format(teff))\n \n fmt = \"{0:>9.5f} {1:>9.1f} {2:>9.2f} {3:>9.2f}\"+20*\" \"+\" {4:>9.2f}\"+10*\" \"\n fmt += \" {5:>9.2f} {6:>9.2f} FAKE_LINE\\n\"\n for species,ll in linelist.items():\n for row in ll:\n f.write(fmt.format(*row)) \n return linelist", "def __init__(self):\n super().__init__()\n self._points = 0\n self._segments = []\n self.fill_list()\n # i = random.randint(0, len(self._segments) - 1)\n # self.set_text(self._segments[i])\n self.reset()", "def randPlace(self):\r\n random.seed(self.seed)\r\n \r\n # Start placement on Partition A\r\n partA = True\r\n for node in self.G.nodes():\r\n \r\n randSite = random.randint(0,int(self.sitesNum/2)-1)\r\n \r\n if partA:\r\n partSite = self.sitesA\r\n self.G.node[node][\"part\"] = 'A'\r\n \r\n else:\r\n partSite = self.sitesB\r\n self.G.node[node][\"part\"] = 'B'\r\n \r\n while (partSite[randSite].isOcp()):\r\n randSite = random.randint(0,int(self.sitesNum/2)-1) \r\n\r\n partSite[randSite].setCell(node)\r\n self.G.node[node][\"site\"] = partSite[randSite]\r\n \r\n # Toggle partition for next placement\r\n partA = not partA", "def sample_sonnet_syl_and_rhyme(hmm, obs_map, rhyme_dict, n_syl = 10):\n sonnetLines = []\n r_sonnetLines = []\n sonnet = ''\n sonnet_length = 14\n count = 0\n syl_counts = syllable_dict()\n # print(syl_counts)\n \n while count < sonnet_length:\n # Pick a random word from the rhyming dictionary that the line has to start with. \n \n start_word = np.random.choice(list(rhyme_dict.keys()))\n rhyme_word = np.random.choice(rhyme_dict[start_word])\n line1 = sample_sentence_syl(hmm, obs_map, rhyme_dict, start_word, n_syl)\n line2 = sample_sentence_syl(hmm, obs_map, rhyme_dict, rhyme_word, n_syl)\n (worked1, nline1) = make_line(line1, n_syl, syl_counts)\n (worked2, nline2) = make_line(line2, n_syl, syl_counts)\n if worked1 and worked2:\n sonnetLines.append(nline1)\n sonnetLines.append(nline2)\n count += 2\n\n # Now flip the order of each line.\n for line in sonnetLines:\n line_reversed = ' '.join(reversed(line.split(' '))).capitalize()\n r_sonnetLines.append(line_reversed + '\\n')\n\n # Rearrange 7 couplets into a sonnet.\n for stanza in range(0, 3):\n idx = [0, 1, 2, 3]\n for i in range(len(idx)):\n idx[i] += stanza * 4\n sonnet += r_sonnetLines[idx[0]] + r_sonnetLines[idx[2]] + r_sonnetLines[idx[1]] + r_sonnetLines[idx[3]]\n\n for line_num in range(12, 14):\n sonnet += r_sonnetLines[line_num]\n\n return sonnet", "def sample_spherical(self):\n vec = np.random.randn(self.dims, self.arms)\n vec /= np.linalg.norm(vec, axis=0)\n self.contexts = vec.T", "def __init_snake(self):\n if len(self.__snake) > 0:\n for s in self.__snake:\n s.remove()\n self.__snake = []\n self.__ticker.setInterval(self.__step)\n self.__h_direction = random.choice(range(4))\n max_len_x = self.width() // self.__cell_edge\n max_len_y = self.height() // self.__cell_edge\n pos_x = random.choice(range(self.__default_len + 2, max_len_x - 2)) * self.__cell_edge\n pos_y = random.choice(range(self.__default_len + 2, max_len_y - 2)) * self.__cell_edge\n icon = f'resources/{self.__directions[self.__h_direction]}.svg'\n s = Snake(self, icon, self.__h_direction, self.__cell_edge, QtCore.QPoint(pos_x, pos_y))\n self.__snake.append(s)\n for i in range(self.__default_len - 1):\n self.__add_node()\n if not self.__candy:\n self.__candy = Snake(self, 'resources/candy.svg', size=self.__cell_edge)\n self.__new_candy()" ]
[ "0.60628176", "0.5960485", "0.59101003", "0.5616719", "0.56113416", "0.5536109", "0.5498553", "0.5497647", "0.5496644", "0.54036343", "0.52970546", "0.52900267", "0.5285795", "0.5261114", "0.5230162", "0.5227572", "0.5211758", "0.5208825", "0.5196995", "0.5181738", "0.51782364", "0.517423", "0.51669604", "0.5124515", "0.51140076", "0.5076457", "0.50670743", "0.50602645", "0.5040047", "0.5027603" ]
0.7675542
0
x is the timedomain signal fs is the sampling frequency framesz is the frame size, in seconds hop is the the time between the start of consecutive frames, in seconds
def stft(x, fs, framesz, hop): framesamp = int(framesz*fs) hopsamp = int(hop*fs) w = scipy.hamming(framesamp) X = scipy.array([scipy.fft(w*x[i:i+framesamp],256) for i in range(0, len(x)-framesamp, hopsamp)]) X=X[:,0:128] return X
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def stft(x, fs, framesz, hop):\n framesamp = int(framesz*fs)\n hopsamp = int(hop*fs)\n w = scipy.hamming(framesamp)\n X = scipy.array([scipy.fft(w*x[i:i+framesamp]) \n for i in range(0, len(x)-framesamp, hopsamp)])\n return X", "def time_to_frames(times, sr=22050, hop_length=512, n_fft=None):\n\n samples = time_to_samples(times, sr=sr)\n\n return samples_to_frames(samples, hop_length=hop_length, n_fft=n_fft)", "def stft(x, fs, framesz, hop, two_sided=True, fft_size=None):\n\n framesamp = int(framesz*fs)\n hopsamp = int(hop*fs)\n overlap_samp = framesamp - hopsamp\n\n _, _, X = scipy.signal.stft(x, fs, window='hann', nperseg=framesamp,\n noverlap=overlap_samp, nfft=fft_size, return_onesided=not two_sided)\n return X.T", "def frameTimes(self):\n sr = self.sampleRate\n offset = self.activeOffset\n stride = self.activeStride\n nf = self.numFrames\n t = np.arange(nf) * (stride[0] / sr) + (offset / sr)\n return t", "def frames_to_time(frames, sr=22050, hop_length=512, n_fft=None):\n\n samples = frames_to_samples(frames,\n hop_length=hop_length,\n n_fft=n_fft)\n\n return samples_to_time(samples, sr=sr)", "def test_frame_times_framesync():\n my_file_struct = FileStruct(os.path.join(\"fixtures\", \"chirp.mp3\"))\n pcp = PCP(my_file_struct, FeatureTypes.framesync, sr=11025)\n times = pcp.frame_times\n assert(isinstance(times, np.ndarray))", "def xframe(x, time, fix_time=True, round_time=1.e-5, dt=-1):\n x=np.asarray(x)\n time=np.asarray(time)\n if fix_time:\n if dt<0:\n dt=np.round((time[1]-time[0])/round_time)*round_time\n time=np.linspace(0.,dt*(x.size-1),x.size)\n time[1]=dt\n df = pd.DataFrame({\"t\":time.ravel(),\"x\":x.ravel()}, index=time.ravel())\n df.index.name='#t'\n return df", "def get_frame_time(self, f):\n return f * self.get_frame_duration()", "def st_rfft(x, frame_size, hop_size, fft_size=None):\n if not fft_size:\n fft_size = frame_size\n idx_starts = np.arange(0, len(x)-frame_size, hop_size, dtype='int')\n xf = np.zeros([int(fft_size/2+1), len(idx_starts)], dtype=np.complex)\n win = np.sqrt(sig.hann(frame_size, False))\n\n for cnt, idx_start in enumerate(idx_starts):\n idx_stop = idx_start + frame_size\n xtemp = np.fft.rfft(x[idx_start:idx_stop]*win, n=fft_size)\n xf[:, cnt] = xtemp\n\n return xf", "def spwvd(fx,tstep=2**5,nfbins=2**10,df=1.0,nh=None,ng=None,sigmat=None,\r\n sigmaf=None):\r\n \r\n if type(fx) is list:\r\n fx=np.array(fx)\r\n try:\r\n fn,fm=fx.shape\r\n if fm>fn:\r\n fm,fn=fx.shape\r\n except ValueError:\r\n fn=len(fx)\r\n fm=1\r\n if fm>1:\r\n print 'computing cross spectra'\r\n #compute the analytic signal of function f and dctrend\r\n fa=sps.hilbert(dctrend(fx[0]))\r\n fb=sps.hilbert(dctrend(fx[1]))\r\n else:\r\n #compute the analytic signal of function f and dctrend\r\n fa=sps.hilbert(dctrend(fx))\r\n fb=fa\r\n print 'Computed Analytic signal'\r\n \r\n #sampling period\r\n df=float(df)\r\n dt=1/df\r\n \r\n #create normalize windows in time (g) and frequency (h)\r\n #note window length should be odd so that h,g[0]=1,nh>ng\r\n if nh==None:\r\n nh=np.floor(fn/2.)\r\n #make sure the window length is odd\r\n if np.remainder(nh,2)==0:\r\n nh=nh+1\r\n #calculate length for time smoothing window\r\n if ng==None:\r\n ng=np.floor(fn/5.)\r\n if np.remainder(ng,2)==0:\r\n ng=ng+1\r\n #calculate standard deviations for gaussian windows \r\n if sigmat==None:\r\n sigmah=nh/(6*np.sqrt(2*np.log(2)))\r\n else:\r\n sigmah=sigmat\r\n \r\n if sigmaf==None:\r\n sigmag=ng/(6*np.sqrt(2*np.log(2)))\r\n else:\r\n sigmag=sigmaf\r\n nh=int(nh)\r\n ng=int(ng)\r\n print 'nh='+str(nh)+'; ng='+str(ng)\r\n #calculate windows and normalize\r\n h=sps.gaussian(nh,sigmah)\r\n h=h/sum(h)\r\n \r\n g=sps.gaussian(ng,sigmag)\r\n g=g/sum(g)\r\n \r\n Lh=(nh-1)/2 #midpoint index of window h\r\n Lg=(ng-1)/2 #midpoint index of window g\r\n \r\n #create a time array such that the first point is centered on time window\r\n tlst=np.arange(start=0,stop=fn+1,step=tstep,dtype='int')\r\n \r\n #create an empty array to put the tf in \r\n #make sure data type is complex \r\n tfarray=np.zeros((nfbins,len(tlst)),dtype='complex128')\r\n \r\n #create a frequency array with just positive frequencies\r\n flst=np.fft.fftfreq(nfbins,dt)[0:nfbins/2]\r\n \r\n #calculate pseudo WV\r\n for point,t in enumerate(tlst):\r\n #find the smallest possible time shift\r\n maxtau=min(t+Lg-1,fn-t+Lg,round(nfbins/2),Lh)\r\n #create time lag list\r\n taulst=np.arange(start=-min(Lg,fn-t),stop=min(Lg,t-1)+1,step=1,\r\n dtype='int')\r\n #calculate windowed correlation function of analytic function for\r\n #zero frequency \r\n tfarray[0,point]=sum(2*(g[Lg+taulst]/sum(g[Lg+taulst]))*fa[t-taulst-1]*\r\n np.conjugate(fb[t-taulst-1]))\r\n #calculate tfd by calculating convolution of window and correlation \r\n #function as sum of correlation function over the lag period times the\r\n #window at that point. Calculate symmetrical segments for FFT later\r\n for mm in range(maxtau):\r\n taulst=np.arange(start=-min(Lg,fn-t-mm-1),stop=min(Lg,t-mm-1)+1,\r\n step=1,dtype='int')\r\n #compute positive half\r\n gm=2*(g[Lg+taulst]/sum(g[Lg+taulst]))\r\n Rmm=sum(gm*fa[t+mm-taulst-1]*np.conjugate(fb[t-mm-taulst]))\r\n tfarray[mm,point]=h[Lh+mm-1]*Rmm\r\n #compute negative half \r\n Rmm=sum(gm*fa[t-mm-taulst]*np.conjugate(fb[t+mm-taulst-1]))\r\n tfarray[nfbins-mm-1,point]=h[Lh-mm]*Rmm\r\n mm=round(nfbins/2)\r\n \r\n if t<=fn-mm and t>=mm and mm<=Lh:\r\n print 'doing weird thing'\r\n taulst=np.arange(start=-min(Lg,fn-t-mm),stop=min(Lg,fn-t,mm)+1,step=1,\r\n dtype='int')\r\n gm=g[Lg+taulst]/sum(g[Lg+taulst])\r\n tfarray[mm-1,point]=.5*\\\r\n (sum(h[Lh+mm]*(gm*fa[t+mm-taulst-1]*\r\n np.conjugate(fb[t-mm-taulst])))+\\\r\n sum(h[Lh-mm]*(gm*fa[t-mm-taulst]*\r\n np.conjugate(fb[t+mm-taulst-1]))))\r\n \r\n tfarray=np.fft.fft(tfarray,axis=0)\r\n #rotate for plotting purposes so that (t=0,f=0) is at the lower left\r\n tfarray=np.rot90(tfarray.T,1)\r\n \r\n return tfarray,tlst,flst", "def wvd(fx,nh=2**8-1,tstep=2**5,nfbins=2**10,df=1.0):\r\n \r\n if type(fx) is list:\r\n fx=np.array(fx)\r\n try:\r\n fn,fm=fx.shape\r\n if fm>fn:\r\n fm,fn=fx.shape\r\n except ValueError:\r\n fn=len(fx)\r\n fm=1\r\n if fm>1:\r\n fn=fn[0]\r\n print 'computing cross spectra'\r\n #compute the analytic signal of function f and dctrend\r\n fa=sps.hilbert(dctrend(fx[0]))\r\n fb=sps.hilbert(dctrend(fx[1]))\r\n fa=fa.reshape(fn)\r\n fb=fb.reshape(fn)\r\n else:\r\n #compute the analytic signal of function f and dctrend\r\n fa=sps.hilbert(dctrend(fx))\r\n fa=fa.reshape(fn)\r\n fb=fa.copy()\r\n \r\n #sampling period\r\n df=float(df)\r\n dt=1./df\r\n tau=(nh-1)/2\r\n \r\n #create a time array such that the first point is centered on time window\r\n tlst=np.arange(start=0,stop=fn-1,step=tstep,dtype='int')\r\n \r\n #create an empty array to put the tf in \r\n tfarray=np.zeros((nfbins,len(tlst)),dtype='complex128')\r\n \r\n #create a frequency array with just positive frequencies\r\n flst=np.fft.fftfreq(nfbins,dt)[0:nfbins/2]\r\n \r\n #calculate pseudo WV\r\n for point,nn in enumerate(tlst):\r\n #calculate the smallest timeshift possible\r\n taun=min(nn,tau,fn-nn-1)\r\n #make a timeshift array\r\n taulst=np.arange(start=-taun,stop=taun+1,step=1,dtype='int')\r\n #calculate rectangular windowed correlation function of analytic signal\r\n Rnn=4*np.conjugate(fa[nn-taulst])*fb[nn+taulst] \r\n #calculate fft of windowed correlation function\r\n FTRnn=np.fft.fft(padzeros(Rnn,npad=nfbins))\r\n #put into tfarray\r\n tfarray[:,point]=FTRnn[::-1]\r\n \r\n #normalize\r\n tfarray=tfarray/nh\r\n \r\n return tfarray,tlst,flst", "def calc_frame_time(instrument, aperture, xdim, ydim, amps):\n instrument = instrument.lower()\n if instrument == \"nircam\":\n xs = xdim\n ys = ydim\n colpad = 12\n\n # Fullframe\n if amps == 4:\n rowpad = 1\n fullpad = 1\n else:\n # All subarrays\n rowpad = 2\n fullpad = 0\n if ((xdim <= 8) & (ydim <= 8)):\n # The smallest subarray\n rowpad = 3\n\n elif instrument == \"niriss\":\n xs = ydim\n ys = xdim\n colpad = 12\n\n # Fullframe\n if amps == 4:\n rowpad = 1\n fullpad = 1\n else:\n rowpad = 2\n fullpad = 0\n\n elif instrument == 'fgs':\n xs = ydim\n ys = xdim\n colpad = 6\n if 'acq1' in aperture.lower():\n colpad = 12\n rowpad = 1\n if amps == 4:\n fullpad = 1\n else:\n fullpad = 0\n\n return ((1.0 * xs / amps + colpad) * (ys + rowpad) + fullpad) * 1.e-5", "def xframe(x, time, fix_time=False, round_time=1.e-4, dt=-1):\n x = np.asarray(x)\n time = np.asarray(time)\n if fix_time:\n if dt < 0:\n dt = np.round((time[1] - time[0]) / round_time) * round_time\n time = np.linspace(0., dt * (x.size - 1), x.size)\n time[1] = dt\n df = pd.DataFrame({\"t\": time.ravel(), \"x\": x.ravel()}, index=time.ravel())\n df.index.name = '#t'\n return df", "def __init__(self,\n sample_rate=16000,\n frame_length=25,\n frame_step=10,\n fft_length=None):\n self.frame_length = int(sample_rate * frame_length / 1e3)\n self.frame_step = int(sample_rate * frame_step / 1e3)\n self.fft_length = fft_length if fft_length else int(2**(np.ceil(\n np.log2(self.frame_length))))", "def timerX(*args, startTime: float=0.0, **kwargs)->float:\n pass", "def sfreq_to_times(gaze_array, sfreq, start_time=0):\n return np.arange(0, len(gaze_array) / sfreq, 1. / sfreq) + start_time", "def calculate_time(ix, xi, wf):\n wf_len = len(wf)\n x_time = np.arange(ix, (ix + wf_len * xi), xi)\n return x_time", "def smethod(fx,L=11,nh=2**8,tstep=2**7,ng=1,df=1.0,nfbins=2**10,sigmaL=None):\r\n \t\r\n df=float(df)\r\n \r\n if type(fx) is list:\r\n fx=np.array(fx)\r\n try:\r\n fn,fm=fx.shape\r\n if fm>fn:\r\n fm,fn=fx.shape\r\n except ValueError:\r\n fn=len(fx)\r\n fm=1\r\n if fm>1:\r\n print 'computing cross spectra'\r\n #compute the analytic signal of function f and dctrend\r\n #fa=sps.hilbert(dctrend(fx[0]))\r\n #fb=sps.hilbert(dctrend(fx[1]))\r\n fa=fx[0]\r\n fb=fx[1]\r\n fa=fa.reshape(fn)\r\n fb=fb.reshape(fn)\r\n pxa,tlst,flst=stft(fa,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)\r\n pxb,tlst,flst=stft(fb,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)\r\n pxx=pxa*pxb.conj()\r\n else:\r\n #compute the analytic signal of function f and dctrend\r\n #fa=sps.hilbert(dctrend(fx))\r\n fa=fx\r\n fa=fa.reshape(fn)\r\n fb=fa\r\n pxx,tlst,flst=stft(fa,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)\r\n# pxb=pxa\r\n\r\n #make an new array to put the new tfd in\r\n tfarray=abs(pxx)**2\r\n #get shape of spectrogram\r\n nf,nt=tfarray.shape\r\n #create a list of frequency shifts\r\n Llst=np.arange(start=-L/2+1,stop=L/2+1,step=1,dtype='int')\r\n #create a frequency gaussian window\r\n if sigmaL==None:\r\n sigmaL=L/(1*np.sqrt(2*np.log(2)))\r\n p=sps.gaussian(L,sigmaL)\r\n #make a matrix of windows\r\n pm=np.zeros((L,nt))\r\n for kk in range(nt):\r\n pm[:,kk]=p\r\n \r\n #loop over frequency and calculate the s-method \r\n for ff in range(L/2,nf-L/2):\r\n tfarray[ff,:]=tfarray[ff,:]+2*np.real(np.sum(pm*pxx[ff+Llst,:]*\r\n pxx[ff-Llst,:].conj(),axis=0))\r\n tfarray=tfarray/L\r\n \r\n return tfarray,tlst,flst,pxx", "def _get_time(x, time, warn_sfreq=False):\n # process time argument\n if hasattr(time, '__iter__'):\n # create sfreq from times array\n times = np.array(time)\n if warn_sfreq and (np.std(times[1:] - times[:-1]) > 1e-5):\n warnings.warn(WARN_SFREQ)\n sfreq = 1. / np.mean(times[1:] - times[:-1]) \n else:\n # create times array from sfreq\n sfreq = time\n times = sfreq_to_times(x, sfreq)\n return times, sfreq", "def _chunk_time(x, samp_buffer=0):\n if samp_buffer < 0:\n raise ValueError(\n 'Buffer between signal peaks must be a positive number')\n if samp_buffer != int(samp_buffer):\n raise ValueError('Number of samples must be an integer')\n\n if type(x[0]) == np.bool_:\n Xs = np.arange(len(x))\n x = Xs[x]\n X = len(x)\n\n cur_start = x[0]\n cur_samp = x[0]\n Nchunk = 0\n chunks = []\n for i in range(1, X):\n if x[i] > (cur_samp + samp_buffer + 1):\n if Nchunk == 0:\n chunks = [cur_start, cur_samp]\n else:\n chunks = np.vstack([chunks, [cur_start, cur_samp]])\n\n Nchunk = Nchunk + 1\n cur_start = x[i]\n\n cur_samp = x[i]\n\n # Add final row to chunk\n if Nchunk == 0:\n chunks = [[cur_start, cur_samp]]\n else:\n chunks = np.vstack([chunks, [cur_start, cur_samp]])\n\n return chunks", "def number_frames(signal_len, frame_len, frame_step):\n frames = 1\n if signal_len > frame_len:\n temp = (1.0 * signal_len - frame_len)/frame_step\n frames += int(np.floor(temp))\n\n return frames", "def stft(x, fft_size, hopsamp):\n w = np.hamming(fft_size)\n return np.array([np.fft.rfft(w*x[i:i+fft_size]) \n for i in range(0, len(x)-fft_size, hopsamp)])", "def framing(signal, frame_length, frame_step, window_func=lambda x: np.ones((x,))):\n signal_length = len(signal)\n num_frames = 1 + (signal_length - frame_length) // frame_step\n\n frames = np.zeros((num_frames, frame_length))\n for index in range(num_frames):\n frames[index] = np.asarray(signal[index * frame_step: index * frame_step + frame_length],\n dtype='float32') * window_func(frame_length)\n return frames", "def xvframe(x,v,time,round_time=1.e-5,fix_time=True,dt=-1):\n x=np.asarray(x)\n v=np.asarray(v)\n time=np.asarray(time)\n if fix_time:\n if dt<0:\n dt=np.round((time[1]-time[0])/round_time)*round_time\n time=np.linspace(0.,dt*(x.size-1),x.size)\n time[1]=dt\n df = pd.DataFrame({\"t\":time.flatten(),\"x\":x.flatten(),\"v\":v.flatten()}, index=np.round(time/round_time)*round_time)\n df.index.name='#t'\n return df", "def generate_singlesine(time = 0, samples_nb = 1000, rep_frequency = 10 , pulse_frequency = 50, amplitude = 1 , edge = 1, phase_offset = 0, noise = 0):\r\n\r\n\tif edge not in [0,1]:\r\n\t\tprint(colorama.Back.RED + colorama.Style.BRIGHT + \"ERROR: invalid phase (either 0 for a rising or a 1 for a falling edge) , exit.\"+ colorama.Style.NORMAL + colorama.Back.RESET)\r\n\t\t# Return code for error (empty input file):\r\n\t\tsys.exit(10)\r\n\r\n\r\n\t#Creating empty lists for t and y\r\n\tt = np.zeros(samples_nb)\r\n\r\n\tif noise == 0:\r\n\t\ty = np.zeros(samples_nb)\r\n\telse:\r\n\t\ty = np.random.normal(0, noise, samples_nb)\r\n\r\n\t#Determining the interval limits of t\r\n\tt_limit =1/float(rep_frequency*2)\r\n\r\n\t#Updating the t interval\r\n\tt = np.arange(-samples_nb/2,samples_nb/2)/float(samples_nb*rep_frequency) + 1/float(samples_nb*rep_frequency)\r\n\r\n\r\n\t#calculating the time_shift\r\n\t#delta_t = phase_offset/(2*np.pi*pulse_frequency)\r\n\tdelta_t = phase_offset/(2*np.pi*rep_frequency)\r\n\r\n\t#Setting the pulse amplitude\r\n\ta_pulse = amplitude\r\n\tif edge == 1:\r\n\t\ta_pulse *= -1\r\n\r\n\t#Calculating the pulse limits\r\n\tp_limit = 1/float(2*pulse_frequency)\r\n\tp_interval = list ([-p_limit,p_limit])\r\n\r\n\r\n\tfor n in range (0,len(t)) :\r\n\t\tif (t[n] + delta_t) > p_interval[0] and (t[n] + delta_t) <= p_interval[1]:\r\n\t\t\ty[n] += a_pulse * np.sin(2*np.pi*pulse_frequency*(t[n]+delta_t))\r\n\r\n\r\n\r\n\t#plt.plot(t,y)\r\n\t#plt.show()\r\n\r\n\tresult = {}\r\n\tresult ['time'] = time\r\n\tresult ['t'] = t\r\n\tresult ['y'] = y\r\n\r\n\treturn result", "def make_frames(signal, sampling_rate, frame_size=0.025, frame_overlap=0.015):\n frame_length = int(round(frame_size * sampling_rate)) #seconds to samples\n frame_step = int(round((frame_size - frame_overlap) * sampling_rate)) #seconds to samples\n #signal_length = len(emphasized_signal)\n\n nf = abs(len(signal) - frame_length)/float(frame_step)\n num_frames = 0\n if int(nf) < 1:\n num_frames = 1 # Make sure that we have at least 1 frame\n else:\n num_frames = int(np.ceil(nf))\n\n padding = np.zeros((num_frames * frame_step) + frame_length - len(signal)) #padding to be added at the end of the signal\n# padded_signal = np.concatenate((signal, padding), axis = None)\n padded_signal = np.zeros((len(padding)+len(signal)))\n np.put(padded_signal, list(range(len(signal))), signal) #put original signal in the front\n np.put(padded_signal, list(range(len(signal), len(padded_signal))), padding) #put padding at the back after signal\n\n indices = np.tile(np.array(range(0, frame_length)), (num_frames, 1)) + np.tile(np.array(range(0, num_frames * frame_step, frame_step)), (frame_length, 1)).T\n frames = padded_signal[indices.astype(np.int32, copy=False)]\n\n #Windowing\n frames = frames * hamming(frame_length)\n return frames", "def beats(signal,fs,hop_len = 64, **kwargs):\n tempo, beat_ = librosa.beat.beat_track(y=signal, sr=fs, hop_length=hop_len)\n return len(beat_)", "def tempo(signal,fs,hop_len = 64, **kwargs):\n tempo, beats = librosa.beat.beat_track(y=signal, sr=fs, hop_length=hop_len)\n return tempo", "def forward(self, x):\n splits = split_sample(x, n_seconds=8)\n spec = MelSpectrogram(n_mels=self.n_mels, n_fft=self.n_fft)\n samples = [spec(s).unsqueeze(1) for s in splits]\n self.model.eval()\n with torch.no_grad():\n preds = [self.model(s) for s in samples]\n return torch.tensor(preds).mean()", "def temporal_sampling(\n num_frames, start_idx, end_idx, num_samples, start_frame=0\n):\n index = torch.linspace(start_idx, end_idx, num_samples)\n index = torch.clamp(index, 0, num_frames - 1).long()\n return start_frame + index" ]
[ "0.6516196", "0.6015201", "0.5844554", "0.5838434", "0.57497185", "0.5695513", "0.5695148", "0.5584178", "0.5576615", "0.5564907", "0.5522588", "0.55157", "0.5506528", "0.55026734", "0.5488658", "0.54744744", "0.5466684", "0.54563797", "0.5440899", "0.5399331", "0.5397807", "0.5392136", "0.5388212", "0.5383736", "0.5379811", "0.5356535", "0.5306861", "0.52927804", "0.52927345", "0.52862424" ]
0.64851505
1
Test simple JDBC query consumer origin for network fault tolerance. We delay the pipeline using a Delay stage so as we get time to shut the SDC container network to test retry and resume logic of origin stage.
def test_query_consumer_network(sdc_builder, sdc_executor, database): number_of_rows = 10_000 table_name = get_random_string(string.ascii_lowercase, 20) pipeline_builder = sdc_builder.get_pipeline_builder() jdbc_query_consumer = pipeline_builder.add_stage('JDBC Query Consumer') jdbc_query_consumer.set_attributes(incremental_mode=False, sql_query=f'SELECT * FROM {table_name}') delay = pipeline_builder.add_stage('Delay') # milliseconds to delay between batches, so as we get time to disconnect network delay.set_attributes(delay_between_batches=1000) trash = pipeline_builder.add_stage('Trash') finisher = pipeline_builder.add_stage('Pipeline Finisher Executor') jdbc_query_consumer >> delay >> trash jdbc_query_consumer >= finisher pipeline = pipeline_builder.build('JDBC Query Origin').configure_for_environment(database) sdc_executor.add_pipeline(pipeline) metadata = sqlalchemy.MetaData() table = sqlalchemy.Table(table_name, metadata, sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True), sqlalchemy.Column('name', sqlalchemy.String(40))) try: logger.info('Creating table %s in %s database ...', table_name, database.type) table.create(database.engine) logger.info('Adding %s rows into %s database ...', number_of_rows, database.type) connection = database.engine.connect() connection.execute(table.insert(), [{'id': i, 'name': str(uuid.uuid4())} for i in range(1, number_of_rows+1)]) pipeline_cmd = sdc_executor.start_pipeline(pipeline) pipeline_cmd.wait_for_pipeline_output_records_count(int(number_of_rows/3)) sdc_executor.container.network_disconnect() sleep(5) # sleep few seconds to have pipeline go into retry mode sdc_executor.container.network_reconnect() pipeline_cmd.wait_for_finished() history = sdc_executor.get_pipeline_history(pipeline) # -2 to take out two events generated from record count pipeline_record_count = (history.latest.metrics.counter('pipeline.batchOutputRecords.counter').count - 2) assert pipeline_record_count == number_of_rows finally: logger.info('Dropping table %s in %s database...', table_name, database.type) table.drop(database.engine)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_jdbc_query_executor(sdc_builder, sdc_executor, database):\n table_name = get_random_string(string.ascii_lowercase, 20)\n table = _create_table(table_name, database)\n\n DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]\n pipeline_builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='DELIMITED',\n header_line='WITH_HEADER',\n raw_data='\\n'.join(DATA))\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n\n jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')\n query_str = f\"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')\"\n\n if Version(sdc_builder.version) < Version('3.14.0'):\n jdbc_query_executor.set_attributes(sql_query=query_str)\n else:\n jdbc_query_executor.set_attributes(sql_queries=[query_str])\n\n trash = pipeline_builder.add_stage('Trash')\n dev_raw_data_source >> record_deduplicator >> jdbc_query_executor\n record_deduplicator >> trash\n pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(RAW_DATA) - 1)\n sdc_executor.stop_pipeline(pipeline)\n\n result = database.engine.execute(table.select())\n data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id\n result.close()\n assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]\n finally:\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n table.drop(database.engine)", "def test_jdbc_query_executor_successful_query_event(sdc_builder, sdc_executor, database):\n table_name = get_random_string(string.ascii_lowercase, 20)\n table = _create_table(table_name, database)\n\n DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]\n pipeline_builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='DELIMITED',\n header_line='WITH_HEADER',\n raw_data='\\n'.join(DATA),\n stop_after_first_batch=True)\n\n query_str = f\"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')\"\n\n jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')\n\n if Version(sdc_builder.version) < Version('3.14.0'):\n jdbc_query_executor.set_attributes(sql_query=query_str)\n else:\n jdbc_query_executor.set_attributes(sql_queries=[query_str])\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n wiretap = pipeline_builder.add_wiretap()\n trash2 = pipeline_builder.add_stage('Trash')\n\n dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= wiretap.destination\n record_deduplicator >> trash2\n pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n event_records = wiretap.output_records\n assert len(event_records) == 3\n assert 'successful-query' == event_records[0].header['values']['sdc.event.type']\n assert 'successful-query' == event_records[1].header['values']['sdc.event.type']\n assert 'successful-query' == event_records[2].header['values']['sdc.event.type']\n\n result = database.engine.execute(table.select())\n data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id\n result.close()\n assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]\n finally:\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n table.drop(database.engine)", "def test_jdbc_query_executor_failed_query_event(sdc_builder, sdc_executor, database):\n table_name = get_random_string(string.ascii_lowercase, 20)\n table = _create_table(table_name, database)\n\n DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]\n pipeline_builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='DELIMITED',\n header_line='WITH_HEADER',\n raw_data='\\n'.join(DATA),\n stop_after_first_batch=True)\n invalid_table = \"INVALID_TABLE\"\n query_str = f\"INSERT INTO {invalid_table} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')\"\n\n jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')\n\n if Version(sdc_builder.version) < Version('3.14.0'):\n jdbc_query_executor.set_attributes(sql_query=query_str)\n else:\n jdbc_query_executor.set_attributes(sql_queries=[query_str])\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n wiretap = pipeline_builder.add_wiretap()\n trash2 = pipeline_builder.add_stage('Trash')\n\n dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= wiretap.destination\n record_deduplicator >> trash2\n pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n event_records = wiretap.output_records\n assert len(event_records) == 3\n assert 'failed-query' == event_records[0].header['values']['sdc.event.type']\n assert 'failed-query' == event_records[1].header['values']['sdc.event.type']\n assert 'failed-query' == event_records[2].header['values']['sdc.event.type']\n\n result = database.engine.execute(table.select())\n data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id\n result.close()\n assert data_from_database == []\n finally:\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n table.drop(database.engine)", "def test_jdbc_query_executor_failure_state(sdc_builder, sdc_executor, database):\n table_name = get_random_string(string.ascii_lowercase, 20)\n metadata = sqlalchemy.MetaData()\n table = sqlalchemy.Table(table_name,\n metadata,\n sqlalchemy.Column('reason', sqlalchemy.String(50)))\n logger.info('Creating table %s in %s database ...', table_name, database.type)\n table.create(database.engine)\n\n query = f\"INSERT INTO {table_name} VALUES ('${{record:value('/reason')}}')\"\n\n builder = sdc_builder.get_pipeline_builder()\n source = builder.add_stage('JDBC Multitable Consumer')\n source.table_configs = [{\"tablePattern\": 'this_table_do_not_exists'}]\n\n trash = builder.add_stage('Trash')\n\n stop_stage = builder.add_stop_event_stage('JDBC Query')\n if Version(sdc_builder.version) < Version('3.14.0'):\n stop_stage.set_attributes(sql_query=query)\n else:\n stop_stage.set_attributes(sql_queries=[query])\n\n source >> trash\n\n pipeline = builder.build().configure_for_environment(database)\n # Injecting failure - this URL won't exists, pipeline won't be able to start properly\n source.jdbc_connection_string = \"jdbc:mysql://this-do-not-exists:3306/awesome-db\"\n sdc_executor.add_pipeline(pipeline)\n\n try:\n sdc_executor.start_pipeline(pipeline, wait=False).wait_for_status('START_ERROR', ignore_errors=True)\n\n result = database.engine.execute(table.select())\n db = result.fetchall()\n result.close()\n\n assert db[0][0] == 'FAILURE'\n finally:\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n table.drop(database.engine)", "def test_jdbc_consumer_non_incremental_mode(sdc_builder, sdc_executor, database, batch_size):\n if database.type == 'Oracle':\n pytest.skip(\"This test depends on proper case for column names that Oracle auto-uppers.\")\n\n num_records = 8\n input_data = [{'id': i, 'name': get_random_string()} for i in range(1, num_records + 1)]\n table_name = get_random_string(string.ascii_lowercase, 20)\n sql_query = f'SELECT * FROM {table_name} ORDER BY id ASC'\n\n # Create pipeline\n pipeline_builder = sdc_builder.get_pipeline_builder()\n origin = pipeline_builder.add_stage('JDBC Query Consumer')\n origin.set_attributes(incremental_mode=False,\n sql_query=sql_query,\n max_batch_size_in_records=batch_size)\n wiretap = pipeline_builder.add_wiretap()\n finisher = pipeline_builder.add_stage(\"Pipeline Finisher Executor\")\n\n origin >> wiretap.destination\n origin >= finisher\n pipeline = pipeline_builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n # Create and populate table\n logger.info('Creating table %s in %s database ...', table_name, database.type)\n table = sqlalchemy.Table(table_name,\n sqlalchemy.MetaData(),\n sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column('name', sqlalchemy.String(32)))\n table.create(database.engine)\n connection = database.engine.connect()\n connection.execute(table.insert(), input_data)\n\n # Run the pipeline and check the stage consumed all the expected records. Repeat several times to\n # ensure non-incremental mode works as expected after restarting the pipeline.\n for i in range(3):\n wiretap.reset()\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n sdc_records = [record.field\n for record in wiretap.output_records]\n assert sdc_records == input_data\n\n finally:\n logger.info('Dropping table %s in %s database...', table_name, database.type)\n table.drop(database.engine)", "def test_jdbc_query_executor_lifecycle_events(sdc_builder, sdc_executor, database):\n if isinstance(database, OracleDatabase):\n pytest.skip('This test does not support Oracle')\n elif type(database) == SQLServerDatabase:\n pytest.skip('This test does not support SQL Server')\n\n table_name = get_random_string(string.ascii_lowercase, 20)\n metadata = sqlalchemy.MetaData()\n table = sqlalchemy.Table(table_name,\n metadata,\n sqlalchemy.Column('user', sqlalchemy.String(50)),\n sqlalchemy.Column('event', sqlalchemy.String(50)))\n logger.info('Creating table %s in %s database ...', table_name, database.type)\n table.create(database.engine)\n\n query = f\"INSERT INTO {table_name} VALUES ('${{record:value('/user')}}', '${{record:attribute('sdc.event.type')}}')\"\n\n builder = sdc_builder.get_pipeline_builder()\n source = builder.add_stage('Dev Raw Data Source')\n source.stop_after_first_batch = True\n source.data_format = 'TEXT'\n source.raw_data = 'SOMETHING'\n\n trash = builder.add_stage('Trash')\n\n start_stage = builder.add_start_event_stage('JDBC Query')\n if Version(sdc_builder.version) < Version('3.14.0'):\n start_stage.set_attributes(sql_query=query)\n else:\n start_stage.set_attributes(sql_queries=[query])\n\n stop_stage = builder.add_stop_event_stage('JDBC Query')\n if Version(sdc_builder.version) < Version('3.14.0'):\n stop_stage.set_attributes(sql_query=query)\n else:\n stop_stage.set_attributes(sql_queries=[query])\n\n source >> trash\n\n pipeline = builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n result = database.engine.execute(table.select())\n db = sorted(result.fetchall(), key=lambda row: row[1])\n result.close()\n\n assert db[0][0] == 'admin'\n assert db[0][1] == 'pipeline-start'\n assert db[1][0] == ''\n assert db[1][1] == 'pipeline-stop'\n finally:\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n table.drop(database.engine)", "def test_jdbc_query_executor_multiple_queries(sdc_builder, sdc_executor, database):\n table_name = f'stf_{get_random_string(string.ascii_lowercase, 20)}'\n table = _create_table(table_name, database)\n\n ROWS_IN_DATABASE_UPDATED = [\n {'id': 1, 'name': 'Alex'},\n {'id': 2, 'name': 'Alex'},\n {'id': 3, 'name': 'Alex'}\n ]\n\n DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]\n pipeline_builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='DELIMITED',\n header_line='WITH_HEADER',\n raw_data='\\n'.join(DATA))\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n\n jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')\n query_str1 = f\"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')\"\n query_str2 = f\"UPDATE {table_name} SET name = 'Alex' WHERE name = '${{record:value('/name')}}'\"\n\n jdbc_query_executor.set_attributes(sql_queries=[query_str1, query_str2])\n\n trash = pipeline_builder.add_stage('Trash')\n dev_raw_data_source >> record_deduplicator >> jdbc_query_executor\n record_deduplicator >> trash\n pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_pipeline_output_records_count(len(RAW_DATA) - 1)\n sdc_executor.stop_pipeline(pipeline)\n\n result = database.engine.execute(table.select())\n data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id\n result.close()\n assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE_UPDATED]\n finally:\n logger.info(f'Dropping table {table_name} in {database.type} database ...')\n table.drop(database.engine)", "def test_jdbc_lookup_processor(sdc_builder, sdc_executor, database, credential_store):\n table_name = get_random_string(string.ascii_lowercase, 20)\n table = _create_table(table_name, database, quote=True)\n logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)\n connection = database.engine.connect()\n connection.execute(table.insert(), ROWS_IN_DATABASE)\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='DELIMITED',\n header_line='WITH_HEADER',\n raw_data='\\n'.join(LOOKUP_RAW_DATA),\n stop_after_first_batch=True)\n\n jdbc_lookup = pipeline_builder.add_stage('JDBC Lookup')\n query_str = f'SELECT \"name\" FROM \"{table_name}\" WHERE \"id\" = ${{record:value(\"/id\")}}'\n if type(database) in [MySqlDatabase, MariaDBDatabase]:\n query_str = f'SELECT `name` FROM `{table_name}` WHERE `id` = ${{record:value(\"/id\")}}'\n column_mappings = [dict(dataType='USE_COLUMN_TYPE',\n columnName='name',\n field='/FirstName')]\n jdbc_lookup.set_attributes(sql_query=query_str,\n column_mappings=column_mappings)\n\n wiretap = pipeline_builder.add_wiretap()\n dev_raw_data_source >> jdbc_lookup >> wiretap.destination\n pipeline = pipeline_builder.build(title='JDBC Lookup').configure_for_environment(database, credential_store)\n sdc_executor.add_pipeline(pipeline)\n\n LOOKUP_EXPECTED_DATA = copy.deepcopy(ROWS_IN_DATABASE)\n for record in LOOKUP_EXPECTED_DATA:\n record.pop('id')\n record['FirstName'] = record.pop('name')\n\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n rows_from_wiretap = [{list(record.field.keys())[1]: list(record.field.values())[1].value}\n for record in wiretap.output_records]\n assert rows_from_wiretap == LOOKUP_EXPECTED_DATA\n finally:\n if sdc_executor.get_pipeline_status(pipeline).response.json().get('status') == 'RUNNING':\n sdc_executor.stop_pipeline(pipeline)\n\n logger.info('Dropping table %s in %s database...', table_name, database.type)\n table.drop(database.engine)", "def test_sql_server_cdc_no_more_data(sdc_builder, sdc_executor, database, no_of_threads):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n sql_server_cdc = pipeline_builder.add_stage('SQL Server CDC Client')\n sql_server_cdc.set_attributes(max_pool_size=no_of_threads,\n no_of_threads=no_of_threads)\n\n dest_table_name = get_random_string(string.ascii_uppercase, 9)\n\n dest_table = create_table(database, DEFAULT_SCHEMA_NAME, dest_table_name)\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer')\n\n jdbc_producer.set_attributes(schema_name=DEFAULT_SCHEMA_NAME,\n table_name_template=dest_table_name,\n default_operation='INSERT',\n field_to_column_mapping=[])\n\n pipeline_finisher_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')\n\n sql_server_cdc >= pipeline_finisher_executor\n sql_server_cdc >> jdbc_producer\n pipeline = pipeline_builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n tables = []\n no_of_records = 5\n rows_in_database = setup_sample_data(no_of_threads * no_of_records)\n\n for index in range(0, no_of_threads):\n table_name = get_random_string(string.ascii_lowercase, 20)\n # split the rows_in_database into no_of_records for each table\n # e.g. for no_of_records=5, the first table inserts rows_in_database[0:5]\n # and the secord table inserts rows_in_database[5:10]\n table = setup_table(database, DEFAULT_SCHEMA_NAME, table_name,\n rows_in_database[(index*no_of_records): ((index+1)*no_of_records)])\n tables.append(table)\n\n # wait for data captured by cdc jobs in sql server before starting the pipeline\n ct_table_name = f'{DEFAULT_SCHEMA_NAME}_{table_name}_CT'\n wait_for_data_in_ct_table(ct_table_name, no_of_records, database)\n\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert_table_replicated(database, rows_in_database, DEFAULT_SCHEMA_NAME, dest_table_name)\n\n finally:\n for table in tables:\n logger.info('Dropping table %s in %s database...', table, database.type)\n table.drop(database.engine)\n\n logger.info('Dropping table %s in %s database...', dest_table, database.type)\n dest_table.drop(database.engine)", "def test_pipeline_preview_with_test_stage(sdc_builder, sdc_executor, database):\n if isinstance(database, OracleDatabase):\n pytest.skip('This test does not support oracle and its upper casing of column names.')\n\n metadata = sqlalchemy.MetaData()\n table_name = get_random_string(string.ascii_lowercase, 20)\n table = sqlalchemy.Table(\n table_name,\n metadata,\n sqlalchemy.Column('id', sqlalchemy.Integer, primary_key=True),\n sqlalchemy.Column('name', sqlalchemy.String(32))\n )\n table.create(database.engine)\n\n builder = sdc_builder.get_pipeline_builder()\n generator = builder.add_stage(label='Dev Data Generator')\n trash = builder.add_stage(label='Trash')\n\n jdbc = builder.add_test_origin_stage(label='JDBC Query Consumer')\n jdbc.incremental_mode = True\n jdbc.sql_query = 'SELECT * FROM {0} WHERE '.format(table_name) + 'id > ${OFFSET} ORDER BY id'\n jdbc.initial_offset = '0'\n jdbc.offset_column = 'id'\n\n generator >> trash\n pipeline = builder.build().configure_for_environment(database)\n\n sdc_executor.add_pipeline(pipeline)\n\n try:\n connection = database.engine.connect()\n connection.execute(table.insert(), [{'id': 1}])\n\n preview = sdc_executor.run_pipeline_preview(pipeline, test_origin=True).preview\n assert preview is not None\n assert preview.issues.issues_count == 0\n\n assert len(preview[jdbc].output) == 1\n assert preview[jdbc].output[0].field['id'].value == 1\n finally:\n logger.info('Dropping table %s in %s database...', table_name, database.type)\n table.drop(database.engine)", "def test_jdbc_query_executor_insert_query_result_count(sdc_builder, sdc_executor, database):\n table_name = get_random_string(string.ascii_lowercase, 20)\n table = _create_table(table_name, database)\n\n DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]\n pipeline_builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='DELIMITED',\n header_line='WITH_HEADER',\n raw_data='\\n'.join(DATA),\n stop_after_first_batch=True)\n\n query_str = f\"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')\"\n\n jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')\n\n jdbc_query_executor.set_attributes(include_query_result_count_in_events=True)\n\n if Version(sdc_builder.version) < Version('3.14.0'):\n jdbc_query_executor.set_attributes(sql_query=query_str)\n else:\n jdbc_query_executor.set_attributes(sql_queries=[query_str])\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n wiretap = pipeline_builder.add_wiretap()\n trash2 = pipeline_builder.add_stage('Trash')\n\n dev_raw_data_source >> record_deduplicator >> jdbc_query_executor >= wiretap.destination\n record_deduplicator >> trash2\n pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n event_records = wiretap.output_records\n assert len(event_records) == 3\n assert 'successful-query' == event_records[0].header['values']['sdc.event.type']\n assert 'successful-query' == event_records[1].header['values']['sdc.event.type']\n assert 'successful-query' == event_records[2].header['values']['sdc.event.type']\n\n assert '1 row(s) affected' == event_records[0].value['value']['query-result']['value']\n assert '1 row(s) affected' == event_records[1].value['value']['query-result']['value']\n assert '1 row(s) affected' == event_records[2].value['value']['query-result']['value']\n\n result = database.engine.execute(table.select())\n data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id\n result.close()\n assert data_from_database == [(record['name'], record['id']) for record in ROWS_IN_DATABASE]\n finally:\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n table.drop(database.engine)", "def test_jdbc_query_executor_parallel_query_execution(sdc_builder, sdc_executor, database, enable_parallel_execution):\n table_name = get_random_string(string.ascii_uppercase, 20)\n table = _create_table(table_name, database)\n\n # first, the inserts - they will run in parallel,\n # then all the updates will run sequentially\n # net result is all records should get updated to the (last) new value.\n # otherwise we've failed.\n statements = []\n for rec in ROWS_IN_DATABASE:\n statements.extend([f\"INSERT INTO {table_name} (name, id) VALUES ('{rec['name']}', {rec['id']})\",\n f\"UPDATE {table_name} SET name = 'bob' WHERE id = {rec['id']}\",\n f\"UPDATE {table_name} SET name = 'MERRICK' WHERE id = {rec['id']}\"])\n # convert to string - Dev Raw Data Source Data Format tab does not seem\n # to \"unroll\" the array into newline-terminated records.\n statements = \"\\n\".join(statements)\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='TEXT', raw_data=statements)\n\n jdbc_query_executor = pipeline_builder.add_stage('JDBC Query', type='executor')\n\n query_str = \"${record:value('/text')}\"\n\n jdbc_query_executor.set_attributes(enable_parallel_queries=enable_parallel_execution,\n maximum_pool_size=2,\n minimum_idle_connections=2)\n\n if Version(sdc_builder.version) < Version('3.14.0'):\n jdbc_query_executor.set_attributes(sql_query=query_str)\n else:\n jdbc_query_executor.set_attributes(sql_queries=[query_str])\n\n dev_raw_data_source >> jdbc_query_executor\n\n pipeline = pipeline_builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n sdc_executor.start_pipeline(pipeline)\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 3 * len(ROWS_IN_DATABASE))\n sdc_executor.stop_pipeline(pipeline)\n\n result = database.engine.execute(table.select())\n data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id\n result.close()\n assert data_from_database == [('MERRICK', record['id']) for record in ROWS_IN_DATABASE]\n finally:\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n table.drop(database.engine)", "def test_sql_server_cdc_with_specific_capture_instance_name(sdc_builder, sdc_executor, database):\n\n try:\n schema_name = DEFAULT_SCHEMA_NAME\n tables = []\n no_of_records = 5\n no_of_tables = 3\n target_table_index = 2\n rows_in_database = setup_sample_data(no_of_tables * no_of_records)\n\n # setup the tables first\n for index in range(0, no_of_tables):\n table_name = get_random_string(string.ascii_lowercase, 20)\n # split the rows_in_database into no_of_records for each table\n # e.g. for no_of_records=5, the first table inserts rows_in_database[0:5]\n # and the secord table inserts rows_in_database[5:10]\n table = setup_table(database, schema_name, table_name,\n rows_in_database[(index*no_of_records): ((index+1)*no_of_records)])\n tables.append(table)\n\n if (index == target_table_index):\n capture_instance_name = f'{schema_name}_{table_name}'\n\n target_rows = rows_in_database[target_table_index * no_of_records: (target_table_index + 1) * no_of_records]\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n sql_server_cdc = pipeline_builder.add_stage('SQL Server CDC Client')\n sql_server_cdc.set_attributes(table_configuration=[{'capture_instance': capture_instance_name}])\n\n dest_table_name = get_random_string(string.ascii_uppercase, 9)\n\n dest_table = create_table(database, DEFAULT_SCHEMA_NAME, dest_table_name)\n tables.append(dest_table)\n jdbc_producer = pipeline_builder.add_stage('JDBC Producer')\n\n jdbc_producer.set_attributes(schema_name=DEFAULT_SCHEMA_NAME,\n table_name_template=dest_table_name,\n default_operation='INSERT',\n field_to_column_mapping=[])\n\n pipeline_finisher_executor = pipeline_builder.add_stage('Pipeline Finisher Executor')\n\n sql_server_cdc >= pipeline_finisher_executor\n sql_server_cdc >> jdbc_producer\n pipeline = pipeline_builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n # wait for data captured by cdc jobs in sql server before starting the pipeline\n ct_table_name = f'{capture_instance_name}_CT'\n wait_for_data_in_ct_table(ct_table_name, no_of_records, database)\n\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert_table_replicated(database, target_rows, DEFAULT_SCHEMA_NAME, dest_table_name)\n\n finally:\n for table in tables:\n logger.info('Dropping table %s in %s database...', table, database.type)\n table.drop(database.engine)", "def test_jdbc_tee_processor(sdc_builder, sdc_executor, database):\n if isinstance(database, OracleDatabase):\n pytest.skip('JDBC Tee Processor does not support Oracle')\n elif type(database) == SQLServerDatabase:\n pytest.skip('JDBC Tee Processor does not support SQL Server')\n\n table_name = get_random_string(string.ascii_lowercase, 20)\n table = _create_table(table_name, database)\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='DELIMITED',\n header_line='WITH_HEADER',\n raw_data='\\n'.join(RAW_DATA),\n stop_after_first_batch=True)\n\n jdbc_tee = pipeline_builder.add_stage('JDBC Tee')\n # Note that here ids are not inserted. Database generates them automatically.\n field_to_column_mapping = [dict(columnName='name',\n dataType='USE_COLUMN_TYPE',\n field='/name',\n paramValue='?')]\n generated_column_mappings = [dict(columnName='id',\n dataType='USE_COLUMN_TYPE',\n field='/id')]\n jdbc_tee.set_attributes(default_operation='INSERT',\n field_to_column_mapping=field_to_column_mapping,\n generated_column_mappings=generated_column_mappings,\n table_name=table_name)\n\n wiretap = pipeline_builder.add_wiretap()\n dev_raw_data_source >> jdbc_tee >> wiretap.destination\n pipeline = pipeline_builder.build(title='JDBC Tee').configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Verify the JDBC Tee processor has got new ids which were generated by database.\n rows_from_wiretap = [{list(item.field.keys())[0]: list(item.field.values())[0].value,\n list(item.field.keys())[1]: int(list(item.field.values())[1].value)}\n for item in wiretap.output_records]\n assert rows_from_wiretap == ROWS_IN_DATABASE\n finally:\n if sdc_executor.get_pipeline_status(pipeline).response.json().get('status') == 'RUNNING':\n sdc_executor.stop_pipeline(pipeline)\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n table.drop(database.engine)", "def test_jdbc_query_executor_select_query_result_count(sdc_builder, sdc_executor, database):\n table_name = get_random_string(string.ascii_lowercase, 20)\n table = _create_table(table_name, database)\n\n DATA = ['id,name'] + [','.join(str(item) for item in rec.values()) for rec in ROWS_IN_DATABASE]\n pipeline_builder = sdc_builder.get_pipeline_builder()\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='DELIMITED',\n header_line='WITH_HEADER',\n raw_data='\\n'.join(DATA),\n stop_after_first_batch=True)\n\n query_str1 = f\"INSERT INTO {table_name} (name, id) VALUES ('${{record:value('/name')}}', '${{record:value('/id')}}')\"\n query_str2 = f\"SELECT * FROM {table_name}\"\n\n jdbc_query_executor1 = pipeline_builder.add_stage('JDBC Query', type='executor')\n if Version(sdc_builder.version) < Version('3.14.0'):\n jdbc_query_executor1.set_attributes(sql_query=query_str1)\n else:\n jdbc_query_executor1.set_attributes(sql_queries=[query_str1])\n\n jdbc_query_executor2 = pipeline_builder.add_stage('JDBC Query', type='executor')\n\n jdbc_query_executor2.set_attributes(include_query_result_count_in_events=True)\n\n if Version(sdc_builder.version) < Version('3.14.0'):\n jdbc_query_executor2.set_attributes(sql_query=query_str2)\n else:\n jdbc_query_executor2.set_attributes(sql_queries=[query_str2])\n\n record_deduplicator = pipeline_builder.add_stage('Record Deduplicator')\n wiretap = pipeline_builder.add_wiretap()\n trash2 = pipeline_builder.add_stage('Trash')\n\n dev_raw_data_source >> record_deduplicator >> jdbc_query_executor1 >= jdbc_query_executor2 >= wiretap.destination\n record_deduplicator >> trash2\n pipeline = pipeline_builder.build(title='JDBC Query Executor').configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n event_records = wiretap.output_records\n assert len(event_records) == 3\n assert 'successful-query' == event_records[0].header['values']['sdc.event.type']\n assert 'successful-query' == event_records[1].header['values']['sdc.event.type']\n assert 'successful-query' == event_records[2].header['values']['sdc.event.type']\n\n assert '3 row(s) returned' == event_records[0].value['value']['query-result']['value']\n assert '3 row(s) returned' == event_records[1].value['value']['query-result']['value']\n assert '3 row(s) returned' == event_records[2].value['value']['query-result']['value']\n\n result = database.engine.execute(table.select())\n result.close()\n finally:\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n table.drop(database.engine)", "def when_query_pipeline(context):\n result = context.stage.runTest('testing stage endpoint')\n print('Result = {}'.format(result))\n context.result = result", "def test_runner_with_db(dataset, time_start, time_diff):\n\n session = dataset\n\n end_date = datetime.datetime(2020, 5, 17, 13, 0, 5)\n replay_rate = 1.0 \n\n db_connector_test = DataBaseConnector(session=session, \n table_name='timeseries_dataset', \n time_column='timestamp', \n start_date=time_start,\n end_date=end_date)\n\n runner = CentralRunner(db_connection=db_connector_test, \n output_system='mock_output_systerm', \n start_time=time_start, \n end_time=end_date,\n replay_rate=replay_rate )\n\n\n results_test = [\n {'timestamp': datetime.datetime(2021, 1, 1, 10, 1, 0), 'text': 'bob', 'value': 10.0},\n {'timestamp': datetime.datetime(2021, 1, 1, 10, 1, 1), 'text': 'cat', 'value':-10.0},\n {'timestamp': datetime.datetime(2021, 1, 1, 10, 1, 1), 'text': 'eat', 'value': 12.1}\n ]\n \n # test that the trigger_release is working right\n # expect 1\n start = time.perf_counter()\n \n code_start = datetime.datetime.now()\n\n # we need to retink the way that we trigger this....\n runner._trigger_release(result_set=results_test, code_start=code_start, replay_start_time=time_start, \n batch=(datetime.datetime(2021, 1, 1, 10, 1, 0), datetime.datetime(2021, 1, 1, 10, 1, 1)), \n replay_rate=replay_rate)\n \n end = time.perf_counter()\n\n code_time = end - start\n assert int(code_time) == time_diff", "def test_jdbc_tables_header(sdc_builder, sdc_executor, database):\n\n table_name1 = get_random_string(string.ascii_lowercase, 20)\n table_name2 = get_random_string(string.ascii_lowercase, 20)\n if database.type == 'Oracle':\n # When not quoted, Oracle automatically converts names to upper case. Quoting is inconsistent between\n # databases, so it is preferable to avoid it in SQL below. And to get a compatible result during creation,\n # we omit quotes here also.\n create_quotes_names = False\n else:\n create_quotes_names = True\n\n logger.info('Creating two identical tables in %s database...', database.type)\n table1 = _create_table(table_name1, database, quote=create_quotes_names)\n table2 = _create_table(table_name2, database, quote=create_quotes_names)\n\n connection = database.engine.connect()\n try:\n logger.info('Adding %s rows into each table...', len(ROWS_IN_DATABASE))\n connection.execute(table1.insert(), ROWS_IN_DATABASE)\n connection.execute(table2.insert(), ROWS_IN_DATABASE)\n\n builder = sdc_builder.get_pipeline_builder()\n\n sql_query = \"SELECT t1.id, t2.name \" \\\n f\"FROM {table_name1} t1 \" \\\n f\" JOIN {table_name2} t2 \" \\\n \" ON t1.name = t2.name \" \\\n \"WHERE t1.id > ${OFFSET} \" \\\n \"ORDER BY t1.id\"\n origin = builder.add_stage('JDBC Query Consumer')\n origin.sql_query = sql_query\n origin.offset_column = 'id'\n origin.incremental_mode = True\n origin.on_unknown_type = 'STOP_PIPELINE'\n\n wiretap = builder.add_wiretap()\n\n origin >> wiretap.destination\n\n pipeline = builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n sdc_executor.start_pipeline(pipeline)\n sdc_executor.wait_for_pipeline_metric(pipeline, 'input_record_count', 3)\n sdc_executor.stop_pipeline(pipeline)\n\n # Check jdbc.tables header.\n tables_header = wiretap.output_records[0].header['values']['jdbc.tables']\n logger.debug('%s=\"%s\"', \"header['values']['jdbc.tables']\", tables_header)\n logger.debug('%s=\"%s\"', \"database.type\", database.type)\n # According to documentation some JDBC drivers may not provide this information:\n # https://docs.streamsets.com/platform-datacollector/latest/datacollector/UserGuide/Origins/JDBCConsumer.html\n if database.type == 'Oracle':\n # Oracle does not seem to populate this field\n assert tables_header == \"\"\n elif database.type == 'SQLServer':\n # SQLServer does not seem to populate this field\n assert tables_header == \"\"\n else:\n # MySQL, PostgreSQL and MiriaDB all return source table names as a coma-delimited list.\n # Ordering of the list is not known for PostgreSQL and MiriaDB, but For MySQL it is predictably random.\n # The logic below asserts that both names are reported in any order (and case is ignored, though this\n # should not be necessary):\n tables_list = tables_header.split(',')\n tables_normalized_map = map(lambda x:x.lower(), tables_list)\n assert set(tables_normalized_map) == {table_name1, table_name2}\n\n finally:\n try:\n logger.info('Dropping table %s in %s database ...', table_name1, database.type)\n connection.execute(f\"DROP TABLE {table_name1}\")\n logger.info('Dropping table %s in %s database ...', table_name2, database.type)\n connection.execute(f\"DROP TABLE {table_name2}\")\n except Exception as ex:\n logger.warning('Error during cleanup', exc_info=ex)", "def _jdbc_query_origin(self):\n self.origin_system = self.environments['database'].engine.dialect.name\n self._setup_origin_table()\n pipeline_builder = self.sdc_builder.get_pipeline_builder()\n jdbc_query_consumer = pipeline_builder.add_stage('JDBC Query Consumer', type='origin')\n jdbc_query_consumer.set_attributes(incremental_mode=False,\n sql_query=f'SELECT * FROM {self.dataset}')\n return jdbc_query_consumer, pipeline_builder", "def test_runner_full_loop(caplog, dataset):\n caplog.set_level(logging.DEBUG)\n\n session = dataset\n\n start_date = datetime.datetime(2020, 5, 17, 13, 0, 0)\n end_date = datetime.datetime(2020, 5, 17, 13, 0, 5)\n replay_rate = 1 \n \n db_connector_test = DataBaseConnector(session=session, \n table_name='timeseries_dataset', \n time_column='timestamp', \n start_date=start_date,\n end_date=end_date)\n\n test_publisher = ConsolePublisher()\n\n runner = CentralRunner(db_connection=db_connector_test, \n output_system=test_publisher, \n start_time=start_date, \n end_time=end_date,\n replay_rate=replay_rate )\n\n start = time.perf_counter()\n \n runner.run()\n\n end = time.perf_counter()\n\n code_time = end - start\n \n print(code_time)\n \n assert int(code_time) == 4", "def test_jdbc_tee_processor_multi_ops(sdc_builder, sdc_executor, database, use_multi_row):\n if isinstance(database, OracleDatabase):\n pytest.skip('JDBC Tee Processor does not support Oracle')\n if type(database) == SQLServerDatabase:\n pytest.skip('JDBC Tee Processor does not support SQL Server')\n\n table_name = get_random_string(string.ascii_lowercase, 20)\n pipeline_builder = sdc_builder.get_pipeline_builder()\n DATA = [\n {'operation': 2, 'name': 'Jarcec', 'id': 2}, # delete\n {'operation': 3, 'name': 'Hari', 'id': 3}, # update\n {'operation': 1, 'name': 'Eddie'}, # insert, id will be added by JDBC Tee\n {'operation': 1, 'name': 'Fran'} # insert, id will be added by JDBC Tee\n ]\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON',\n raw_data='\\n'.join(json.dumps(rec) for rec in DATA),\n stop_after_first_batch=True)\n\n HEADER_EXPRESSIONS = [dict(attributeToSet='sdc.operation.type',\n headerAttributeExpression=\"${record:value('/operation')}\")]\n expression_evaluator = pipeline_builder.add_stage('Expression Evaluator')\n expression_evaluator.header_attribute_expressions = HEADER_EXPRESSIONS\n\n FIELD_TO_COLUMN = [dict(columnName='name', field='/name', paramValue='?')]\n jdbc_tee = pipeline_builder.add_stage('JDBC Tee')\n jdbc_tee.set_attributes(default_operation='INSERT',\n field_to_column_mapping=FIELD_TO_COLUMN,\n generated_column_mappings=[dict(columnName='id', field='/id')],\n table_name=table_name,\n use_multi_row_operation=use_multi_row)\n\n wiretap = pipeline_builder.add_wiretap()\n dev_raw_data_source >> expression_evaluator >> jdbc_tee >> wiretap.destination\n pipeline_title = 'JDBC Tee MultiOps MultiRow' if use_multi_row else 'JDBC Tee MultiOps SingleRow'\n pipeline = pipeline_builder.build(title=pipeline_title).configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n table = _create_table(table_name, database)\n try:\n logger.info('Adding %s rows into %s database ...', len(ROWS_IN_DATABASE), database.type)\n connection = database.engine.connect()\n # Passing only names to get the correct sequence numbers esp. PostgreSQL\n if type(database) == SQLServerDatabase:\n connection.execute(table.insert(), [{'id': row['id'], 'name': row['name']} for row in ROWS_IN_DATABASE])\n else:\n connection.execute(table.insert(), [{'name': row['name']} for row in ROWS_IN_DATABASE])\n\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n sequence_id = len(ROWS_IN_DATABASE)\n # Verify the database is updated.\n result = database.engine.execute(table.select())\n data_from_database = sorted(result.fetchall(), key=lambda row: row[1]) # order by id\n result.close()\n expected_data = [(row['name'], row['id']) for row in ROWS_IN_DATABASE]\n for record in DATA:\n if record['operation'] == 1: # insert\n sequence_id += 1\n expected_data.append((record['name'], sequence_id))\n elif record['operation'] == 2: # delete\n expected_data = [row for row in expected_data if row[1] != record['id']]\n elif record['operation'] == 3: # update\n expected_data = [row if row[1] != record['id'] else (record['name'], row[1]) for row in expected_data]\n assert data_from_database == expected_data\n\n # Verify the JDBC Tee processor has the new ID which were generated by database.\n name_id_from_output = [(record.field['name'], record.field['id']) for record in wiretap.output_records]\n assert name_id_from_output == [('Jarcec', 2), ('Hari', 3), ('Eddie', 4), ('Fran', 5)]\n finally:\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n table.drop(database.engine)", "def test_standard_sqs_consumer(sdc_builder, sdc_executor, aws):\n queue_name = '{}_{}'.format(aws.sqs_queue_prefix, get_random_string(string.ascii_letters, 10))\n\n builder = sdc_builder.get_pipeline_builder()\n amazon_sqs_consumer = builder.add_stage('Amazon SQS Consumer')\n amazon_sqs_consumer.set_attributes(data_format='TEXT',\n queue_name_prefixes=[queue_name])\n trash = builder.add_stage('Trash')\n amazon_sqs_consumer >> trash\n\n consumer_origin_pipeline = builder.build(title='Amazon SQS Consumer pipeline').configure_for_environment(aws)\n sdc_executor.add_pipeline(consumer_origin_pipeline)\n\n client = aws.sqs\n logger.info('Creating %s SQS queue on AWS ...', queue_name)\n queue_url = client.create_queue(QueueName=queue_name)['QueueUrl']\n try:\n # note there is a limit of 10 messages only for sending in a batch\n number_of_messages = 10\n message_entries = [{'Id': str(i), 'MessageBody': 'Message {0}'.format(i)} for i in range(number_of_messages)]\n sent_response = client.send_message_batch(QueueUrl=queue_url, Entries=message_entries)\n if len(sent_response.get('Successful', [])) != number_of_messages:\n raise Exception('Test messages not successfully sent to the queue %s', queue_name)\n\n # messages are published, read through the pipeline and assert\n snapshot = sdc_executor.capture_snapshot(consumer_origin_pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(consumer_origin_pipeline)\n\n result_data = [str(record.field['text']) for record in snapshot[amazon_sqs_consumer.instance_name].output]\n assert sorted(result_data) == sorted([message['MessageBody'] for message in message_entries])\n finally:\n if queue_url:\n logger.info('Deleting %s SQS queue of %s URL on AWS ...', queue_name, queue_url)\n client.delete_queue(QueueUrl=queue_url)", "def test_jdbc_tee_commits_on_empty_batches(use_multi_row, sdc_builder, sdc_executor, database):\n\n if isinstance(database, OracleDatabase) and use_multi_row:\n pytest.skip('multi_row is not supported on oracle databases')\n\n builder = sdc_builder.get_pipeline_builder()\n table_name = get_random_string(string.ascii_lowercase, 20)\n\n script = \"\"\"\n// First batch contains exactly one record\nvar batch = sdc.createBatch();\nvar record = sdc.createRecord('generated data');\nrecord.value = {'name': 'A'};\nbatch.add(record);\nbatch.process(\"batch\", \"non-empty\");\n\n// Sent 1000 batches that will be empty\nvar step;\nfor (step = 0; step < 1000; step++) {\n batch = sdc.createBatch();\n batch.process(\"whatever\", \"batch-\" + step);\n}\n\"\"\"\n\n origin = builder.add_stage('JavaScript Scripting')\n origin.record_type = 'NATIVE_OBJECTS'\n origin.user_script = script\n\n tee = builder.add_stage('JDBC Tee')\n tee.default_operation = 'INSERT'\n tee.field_to_column_mapping = [dict(columnName='name', field='/name', paramValue='?')]\n tee.generated_column_mappings = [dict(columnName='id', field='/id')]\n tee.table_name = table_name\n tee.use_multi_row_operation = use_multi_row\n\n trash = builder.add_stage('Trash')\n\n origin >> tee >> trash\n\n pipeline = builder.build().configure_for_environment(database)\n sdc_executor.add_pipeline(pipeline)\n\n table = _create_table(table_name, database)\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # First of all, verify that the table have exactly one record with expected values\n result = database.engine.execute(table.select())\n db = sorted(result.fetchall(), key=lambda row: row[1]) # order by id\n result.close()\n assert len(db) == 1\n assert db[0][0] == 'A'\n assert db[0][1] == 1\n\n # Second of all, we should see exactly 1001 batches generated by our scripting origin\n history = sdc_executor.get_pipeline_history(pipeline)\n assert history.latest.metrics.counter('pipeline.batchCount.counter').count == 1001\n\n # Then let's explore how many commits have we generated to ensure that we don't have 1001 commits\n expected_commits = 1 if use_multi_row else 2\n assert history.latest.metrics.timer('custom.JDBCTee_01.Commit Timer.0.timer').count == expected_commits\n finally:\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n table.drop(database.engine)", "def test_jdbc_tee_processor_mysql_use_legacy_zoned_datetime_format_property(sdc_builder, sdc_executor, input,\n converter_type, database_type,\n expected_legacy_format,\n expected_default_format, database,\n keep_data,\n use_legacy_zoned_datetime_format):\n\n if Version(database.version) < Version('8.0.0'):\n pytest.skip(f\"Test is skipped because MySQL database version is {database.version} < 8.0.0\")\n\n table_name = get_random_string(string.ascii_lowercase, 20)\n connection = database.engine.connect()\n connection.execute(\"SET sql_mode=ANSI_QUOTES\")\n\n # Build pipeline\n builder = sdc_builder.get_pipeline_builder()\n origin = builder.add_stage('Dev Raw Data Source')\n origin.data_format = 'JSON'\n origin.stop_after_first_batch = True\n origin.raw_data = json.dumps({\"value\": input})\n\n converter = builder.add_stage('Field Type Converter')\n converter.conversion_method = 'BY_FIELD'\n converter.field_type_converter_configs = [{\n 'fields': ['/value'],\n 'targetType': converter_type,\n 'dataLocale': 'en,US',\n 'dateFormat': 'YYYY_MM_DD_HH_MM_SS',\n 'zonedDateTimeFormat': 'ISO_OFFSET_DATE_TIME',\n 'scale': 2\n }]\n\n tee = builder.add_stage('JDBC Tee')\n tee.table_name = table_name\n tee.default_operation = 'INSERT'\n tee.field_to_column_mapping = []\n tee.on_record_error = 'STOP_PIPELINE'\n tee.generated_column_mappings = [{\n 'dataType': 'USE_COLUMN_TYPE',\n 'columnName': 'id',\n 'field': '/id'\n }]\n wiretap = builder.add_wiretap()\n\n properties = [{'key': 'useLegacyZonedDatetime', 'value': str(use_legacy_zoned_datetime_format)}]\n attributes = {'additional_jdbc_configuration_properties': properties}\n tee.set_attributes(**attributes)\n\n origin >> converter >> tee >> wiretap.destination\n\n pipeline = builder.build().configure_for_environment(database)\n # Workarounds for STE,STF specific stuff\n\n tee.init_query = \"SET sql_mode=ANSI_QUOTES\"\n\n sdc_executor.add_pipeline(pipeline)\n\n try:\n # Create table\n connection.execute(f\"\"\"\n CREATE TABLE \"{table_name}\"(\n \"id\" int primary key auto_increment,\n \"value\" {database_type} NULL\n )\n \"\"\")\n\n # Run pipeline and read from Elasticsearch to assert\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Verify returned records\n records = wiretap.output_records\n assert len(records) == 1\n assert records[0].field['id'] == 1\n\n rs = connection.execute(f'select \"id\", \"value\" from \"{table_name}\"')\n rows = [row for row in rs]\n assert len(rows) == 1\n\n # Generated key is \"1\"\n assert rows[0][0] == 1\n\n # And assert actual value - few corrections for \"problematical\" types\n actual = rows[0][1]\n if type(actual) == memoryview:\n actual = actual.tobytes()\n\n expected = expected_legacy_format if use_legacy_zoned_datetime_format is True else expected_default_format\n assert actual == expected\n\n finally:\n if not keep_data:\n logger.info('Dropping table %s in %s database ...', table_name, database.type)\n connection.execute(f'DROP TABLE \"{table_name}\"')", "def test_basic_with_special_quoting(sdc_builder, sdc_executor, gcp):\n bucket_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n data = '\\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE_QUOTING)\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev raw data source\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON', raw_data=data, stop_after_first_batch=True)\n\n # Google BigQuery destination stage\n bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n bigquery.set_attributes(project_id=gcp.project_id,\n dataset=dataset_name,\n table=table_name,\n bucket=bucket_name,\n enable_data_drift=True,\n create_table=True,\n create_dataset=True,\n purge_stage_file_after_ingesting=True,\n quote_character=\"|\",\n column_separator=\":\")\n\n dev_raw_data_source >> bigquery\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n\n bigquery_client = gcp.bigquery_client\n dataset_ref = DatasetReference(gcp.project_id, dataset_name)\n\n try:\n logger.info(f'Creating temporary bucket {bucket_name}')\n bucket = gcp.retry_429(gcp.storage_client.create_bucket)(bucket_name)\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Verify by reading records using Google BigQuery client\n # We retrieve the table after the pipeline has automatically created it\n table = bigquery_client.get_table(f'{dataset_name}.{table_name}')\n data_from_bigquery = [tuple(row.values()) for row in bigquery_client.list_rows(table)]\n data_from_bigquery.sort()\n\n expected_data = [tuple(v for v in d.values()) for d in ROWS_IN_DATABASE_QUOTING]\n\n assert len(data_from_bigquery) == len(expected_data)\n assert data_from_bigquery == expected_data\n finally:\n _clean_up_bigquery(bigquery_client, dataset_ref)\n _clean_up_gcs(gcp, bucket, bucket_name)", "def test_runner_long_duration(caplog, replay_rate):\n\n caplog.set_level(logging.INFO)\n\n path = 'test_data/test_data.parquet'\n time_column = 'requesttimestamp'\n start_date = datetime.datetime(2020, 7, 10, 0, 1, 0)\n end_date = datetime.datetime(2020, 7, 10, 0, 5, 0)\n replay_rate = replay_rate\n bootstrap_servers = 'kafka:9092'\n topic = 'test_stream_2'\n\n fileconnector = ParquetFileConnector(path=path, time_column=time_column, \n start_date=start_date, end_date=end_date)\n\n fileconnector.startup_checks()\n\n publisher = KafkaPublisher(\n bootstrap_servers=bootstrap_servers,\n topic=topic\n )\n\n runner = CentralRunner(db_connection=fileconnector, \n output_system=publisher, \n start_time=start_date, \n end_time=end_date,\n replay_rate=replay_rate )\n\n start = time.perf_counter()\n \n runner.run()\n\n #publisher.close()\n\n end = time.perf_counter()\n\n code_time = end - start\n\n period_duration = (end_date - datetime.timedelta(seconds=replay_rate) - start_date).total_seconds()\n\n assert abs(code_time - period_duration/replay_rate) < 1\n\n #assert int(code_time) == (end_date - start_date).total_seconds() * replay_rate", "def test_basic(sdc_builder, sdc_executor, gcp, file_format):\n\n if Version(sdc_builder.version) < Version('5.5.0') and file_format == 'JSON':\n pytest.skip('JSON staging introduced in 5.5.0')\n\n bucket_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n data = '\\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE)\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev raw data source\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON',\n raw_data=data,\n stop_after_first_batch=True)\n\n # Google BigQuery destination stage\n bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n bigquery.set_attributes(project_id=gcp.project_id,\n dataset=dataset_name,\n table=table_name,\n bucket=bucket_name,\n staging_file_format=file_format,\n enable_data_drift=False,\n create_table=False,\n purge_stage_file_after_ingesting=True)\n\n dev_raw_data_source >> bigquery\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n\n bigquery_client = gcp.bigquery_client\n dataset_ref = DatasetReference(gcp.project_id, dataset_name)\n\n try:\n logger.info(f'Creating temporary bucket {bucket_name}')\n bucket = gcp.retry_429(gcp.storage_client.create_bucket)(bucket_name)\n\n logger.info('Creating dataset %s and table %s using Google BigQuery client ...', dataset_name, table_name)\n bigquery_client.create_dataset(dataset_ref)\n table = bigquery_client.create_table(Table(dataset_ref.table(table_name), schema=SCHEMA))\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Verify by reading records using Google BigQuery client\n data_from_bigquery = [tuple(row.values()) for row in bigquery_client.list_rows(table)]\n data_from_bigquery.sort()\n\n expected_data = [tuple(v for v in d.values()) for d in ROWS_IN_DATABASE]\n\n assert len(data_from_bigquery) == len(expected_data)\n assert data_from_bigquery == expected_data\n finally:\n _clean_up_bigquery(bigquery_client, dataset_ref)\n _clean_up_gcs(gcp, bucket, bucket_name)", "def test_mongodb_origin_simple(sdc_builder, sdc_executor, mongodb):\n pipeline_builder = sdc_builder.get_pipeline_builder()\n pipeline_builder.add_error_stage('Discard')\n\n mongodb_origin = pipeline_builder.add_stage('MongoDB', type='origin')\n mongodb_origin.set_attributes(capped_collection=False,\n database=get_random_string(ascii_letters, 5),\n collection=get_random_string(ascii_letters, 10))\n\n trash = pipeline_builder.add_stage('Trash')\n mongodb_origin >> trash\n pipeline = pipeline_builder.build().configure_for_environment(mongodb)\n\n try:\n # MongoDB and PyMongo add '_id' to the dictionary entries e.g. docs_in_database\n # when used for inserting in collection. Hence the deep copy.\n docs_in_database = copy.deepcopy(ORIG_DOCS)\n\n # Create documents in MongoDB using PyMongo.\n # First a database is created. Then a collection is created inside that database.\n # Then documents are created in that collection.\n logger.info('Adding documents into %s collection using PyMongo...', mongodb_origin.collection)\n mongodb_database = mongodb.engine[mongodb_origin.database]\n mongodb_collection = mongodb_database[mongodb_origin.collection]\n insert_list = [mongodb_collection.insert_one(doc) for doc in docs_in_database]\n assert len(insert_list) == len(docs_in_database)\n\n # Start pipeline and verify the documents using snaphot.\n sdc_executor.add_pipeline(pipeline)\n snapshot = sdc_executor.capture_snapshot(pipeline=pipeline, start_pipeline=True).snapshot\n sdc_executor.stop_pipeline(pipeline)\n rows_from_snapshot = [{record.value['value']['name']['sqpath'].lstrip('/'):\n record.value['value']['name']['value']}\n for record in snapshot[mongodb_origin].output]\n\n assert rows_from_snapshot == ORIG_DOCS\n\n finally:\n logger.info('Dropping %s database...', mongodb_origin.database)\n mongodb.engine.drop_database(mongodb_origin.database)", "def test_on_record_error(sdc_builder, sdc_executor, stage_attributes):\n\n DATA = {'name': 'Al Gore', 'birthplace': 'Washington, D.C.'}\n on_record_error = stage_attributes['on_record_error']\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.data_format = 'JSON'\n dev_raw_data_source.raw_data = json.dumps(DATA)\n dev_raw_data_source.stop_after_first_batch = True\n\n field_replacer = pipeline_builder.add_stage('Field Replacer')\n field_replacer.set_attributes(replacement_rules=[{'setToNull': False, 'fields': '/age'}],\n field_does_not_exist='TO_ERROR',\n **stage_attributes)\n\n wiretap = pipeline_builder.add_wiretap()\n\n dev_raw_data_source >> field_replacer >> wiretap.destination\n\n pipeline = pipeline_builder.build()\n\n sdc_executor.add_pipeline(pipeline)\n\n if on_record_error == 'DISCARD':\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n assert not wiretap.error_records and not wiretap.output_records\n\n elif on_record_error == 'STOP_PIPELINE':\n try:\n sdc_executor.start_pipeline(pipeline).wait_for_status('RUN_ERROR')\n\n assert False, 'An exception should have been thrown'\n except RunError:\n\n assert not wiretap.error_records and not wiretap.output_records\n\n elif on_record_error == 'TO_ERROR':\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n record = wiretap.error_records[0]\n assert record.field == DATA and not wiretap.output_records", "def test_basic_values_as_null(sdc_builder, sdc_executor, gcp, null_value):\n bucket_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n dataset_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n table_name = f'stf_{get_random_string(ascii_lowercase, 10)}'\n data = '\\n'.join(json.dumps(rec) for rec in ROWS_IN_DATABASE_NULL)\n\n pipeline_builder = sdc_builder.get_pipeline_builder()\n\n # Dev raw data source\n dev_raw_data_source = pipeline_builder.add_stage('Dev Raw Data Source')\n dev_raw_data_source.set_attributes(data_format='JSON',\n raw_data=data,\n stop_after_first_batch=True)\n\n # Google BigQuery destination stage\n bigquery = pipeline_builder.add_stage(name=DESTINATION_STAGE_NAME)\n bigquery.set_attributes(project_id=gcp.project_id,\n dataset=dataset_name,\n table=table_name,\n bucket=bucket_name,\n enable_data_drift=True,\n create_table=True,\n create_dataset=True,\n purge_stage_file_after_ingesting=True,\n null_value=null_value)\n\n dev_raw_data_source >> bigquery\n\n pipeline = pipeline_builder.build().configure_for_environment(gcp)\n\n bigquery_client = gcp.bigquery_client\n dataset_ref = DatasetReference(gcp.project_id, dataset_name)\n\n try:\n logger.info(f'Creating temporary bucket {bucket_name}')\n bucket = gcp.retry_429(gcp.storage_client.create_bucket)(bucket_name)\n\n sdc_executor.add_pipeline(pipeline)\n sdc_executor.start_pipeline(pipeline).wait_for_finished()\n\n # Verify by reading records using Google BigQuery client\n # We retrieve the table after the pipeline has automatically created it\n table = bigquery_client.get_table(f'{dataset_name}.{table_name}')\n data_from_bigquery = [tuple(row.values()) for row in bigquery_client.list_rows(table)]\n data_from_bigquery.sort()\n\n expected_data = [tuple(None if v == null_value else v for v in d.values()) for d in ROWS_IN_DATABASE_NULL]\n\n assert len(data_from_bigquery) == len(expected_data)\n assert data_from_bigquery == expected_data\n finally:\n _clean_up_bigquery(bigquery_client, dataset_ref)\n _clean_up_gcs(gcp, bucket, bucket_name)" ]
[ "0.70550436", "0.7047451", "0.6958239", "0.69195366", "0.6885504", "0.68017673", "0.6705965", "0.653406", "0.64730465", "0.6431238", "0.63461107", "0.628354", "0.6265989", "0.6264696", "0.6191097", "0.6113087", "0.59760135", "0.5914398", "0.58425444", "0.57655364", "0.5700489", "0.5667138", "0.564919", "0.5594621", "0.55723673", "0.5535636", "0.5524801", "0.55086386", "0.54975355", "0.54256046" ]
0.79750395
0
Remove the image data at filename within the store.
def remove_data(writer: UFOWriter, filename: str) -> None: writer.removeImage(filename)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_image_file(sender, instance, **kwargs):\n # Pass false so ImageField doesn't save the model.\n instance.image.delete(False)", "def delImg(img_name):\n img = Image.objects.raw({\"_id\": img_name}).first()\n img.delete()\n return", "def del_image(self, name):\r\n if self.images is None or name not in self.images:\r\n return\r\n l = self.images\r\n self.images = None\r\n l.setdefault('/empties/', [])\r\n # push the number on the empties list\r\n l['/empties/'].append(l[name])\r\n del l[name]\r\n self.images = l", "def photo_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.path.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def photo_edit_file_cleanup(sender, **kwargs):\n instance = kwargs.get('instance')\n filename = instance.upload.url[1:]\n if os.path.exists(filename):\n os.remove(filename)", "def remove_dataset_file(sender, instance, **kwargs):\n if instance.original_file:\n if os.path.isfile(instance.original_file.path):\n os.remove(instance.original_file.path)", "def delete(self, filename):\n pass", "def rem_file(self, key):\n del self.fileList[key]\n\n path = os.path.join(self.file_path, '%s.xoj' % key)\n try:\n os.remove( path )\n except:\n print \"Unable to remove\", path\n self.save()", "def delete(self, filename):\n raise NotImplementedError", "def removed(self, filename):\r\n self.__close_and_reload(filename)", "def removeSndFile(self, filename):\n try:\n sndfile = self.sndfiles[filename]\n except KeyError:\n return\n for ch in range(sndfile.getChannels()):\n w = self.grid.getWaveform(sndfile, ch)\n self.sb.unregisterWaveform(w)\n i = self.grid.getRowIndex(sndfile, ch)\n if i is not None:\n self.grid.removeRow(i)\n self.player.stop()\n self.player.removeSndFile(sndfile)\n del self.sndfiles[filename]", "def removefile(self, file):\n return _image.image_removefile(self, file)", "def delLocalData(self):\n try:\n if len(self.localFilename): os.remove(self.localFilename)\n except Exception as e:\n pass", "def delete_image(Name=None):\n pass", "def delete_image(filename):\n referrer = request.referrer\n path = \"/Users/ericmontague/sponsormatch/app/static/images/\" + filename\n image = Image.query.filter_by(path=path).first_or_404()\n event = Event.query.get_or_404(image.event_id)\n if not current_user.is_organizer(event) and not current_user.is_administrator():\n return redirect(url_for(\"main.index\"))\n db.session.delete(image)\n db.session.commit()\n flash(\"Your event image was successfully deleted.\", \"success\")\n return redirect(referrer)", "def delete(self, filename, **kw):\n\n file_path = os.path.join(self.storage_path, filename)\n\n try:\n os.remove(file_path)\n except OSError:\n pass", "def remove_image(self, imagename, del_img=False):\n os.system('rm -r {}.model'.format(imagename))\n os.system('rm -r {}.flux'.format(imagename))\n os.system('rm -r {}.psf'.format(imagename))\n os.system('rm -r {}.residual'.format(imagename))\n if del_img:\n os.system('rm -r {}.image'.format(imagename))", "def delete_file(self, name):\n del self.files[name]", "def remove(self, fileName):\n self.communicate(CMD_RM + ' ' + fileName)", "def predio_delete(sender, instance, **kwargs):\n instance.dataFile.delete(False)", "def __on_delete(self):\n self.image.delete()", "def __on_delete(self):\n self.image.delete()", "def __delitem__(self, key):\n if self.file_exists:\n try:\n with open_hdf5(self.file_name, mode=\"a\") as store:\n del store[self._get_h5_path(key)]\n except (AttributeError, KeyError):\n pass", "def delete(self, num):\n file_name = self.games[num]\n file_path = path.join(self.saved_games, file_name)\n if path.exists(file_path):\n os.remove(file_path)\n logger.info(\"Remove the file %s\", file_path)\n else:\n logger.error(\"The file %s doesn't existe\", file_path)\n\n minimap_types = [\"cover\", \"fog\"]\n for _type in minimap_types:\n name = file_name.split('.json')[0]\n name_path = path.join(self.saved_minimap, f\"{name}-{_type}.png\")\n if path.exists(name_path):\n os.remove(name_path)\n logger.info(\"Remove the file %s\", name_path)\n else:\n logger.error(\"The file %s doesn't existe\", name_path)\n\n pg.event.wait()\n self.refresh()", "def delete( self ):\n if os.path.exists(self.filename):\n os.remove(self.filename)", "def delete(self, *args, **kwargs):\n self.image.storage.delete(self.image.name)\n delete(self.image)\n super().delete(*args, **kwargs)", "def _removeFile(self, filename):\n try:\n #delete the output file\n os.remove(filename)\n except:\n #print (\"Failed to remove the file: \" + filename)\n pass", "def clean(self):\n if self.image:\n self.glance.images.delete(self.image['id'])\n\n if self.image_file:\n shutil.rmtree(self.download_path)", "def delete_file(sender, instance, *args, **kwargs):\n if instance.image:\n _delete_file(instance.image.path)", "def delete_tag(filename, tag_name):\n storeapps = APP.config[\"storage\"]\n filename = filename.encode(\"utf-8\")\n\n try:\n application = list(nativeapps.io.ls(storeapps, r\".*\" + filename + \"$\"))[0]\n meta_path = os.path.join(os.path.dirname(application), \"metadata.json\")\n metadata = json.loads(nativeapps.io.readfile(meta_path))\n tags = metadata.get(\"tags\", [])\n if tag_name in tags:\n tags.remove(tag_name)\n metadata[\"tags\"] = tags\n nativeapps.io.writefile(meta_path, json.dumps(metadata))\n except IndexError:\n return \"Unknown application: %s\" % (application), 404\n\n return \"removed\", 200" ]
[ "0.7071417", "0.7065978", "0.70587105", "0.6948764", "0.6843448", "0.6837711", "0.6807847", "0.67592394", "0.6742228", "0.67016274", "0.66833335", "0.6654753", "0.6651216", "0.66283286", "0.65885246", "0.6580812", "0.65307504", "0.65212464", "0.64562976", "0.64470166", "0.6442764", "0.6442764", "0.6436027", "0.641259", "0.63814795", "0.63785595", "0.63721097", "0.6364536", "0.63569385", "0.63529927" ]
0.8101599
0
This function will filter by times.
def _filterTimes(self): print(self.tRange) idT = np.where((self.tRange[0] > np.array(self.rawD['Epoch'][:])) & (self.tRange[1] < np.array(self.rawD['Epoch'][:])))[0] #print(self.rawD['Epoch'][:100]) print(idT) # Filter data for key in filter(lambda x: ('Epoch' in x or ('Counts' in x and x[-1] == 's')), self.rawD.keys()): self.d[key] = self.rawD[key].copy()[idT] return
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filter_by_date(items, start_time, end_time=None):\n start_time = parser.parse(start_time + \"UTC\").timestamp()\n if end_time:\n end_time = parser.parse(end_time + \"UTC\").timestamp()\n else:\n end_time = time.time()\n\n filtered_items = []\n for item in items:\n if 'time' in item:\n item_time = item['time']\n elif 'timestamp' in item:\n item_time = item['timestamp']\n timestamp = parser.parse(item_time + \"UTC\").timestamp()\n if end_time > timestamp > start_time:\n filtered_items.append(item)\n\n return filtered_items", "def filter_lower_datetime(time, list_time):\n return [t for t in list_time if t <= time]", "def filter_by_time(df, user):\n\n time = user.time_to_cook.replace('cooking_time_less_than_', '')\n\n return df.loc[df.minutes <= int(time)]", "def times_filter(d, times, meets_criteria=matches_timestr):\n mapping = map(type, times)\n if [ str, type(None), type(None) ] == mapping and meets_criteria(times[0]):\n d1 = doytimestr_to_datetime('%d:%s:00' % (d[0].year, times[0].replace('/',':')))\n #return '%s' % d1\n return d1, d1, 0\n elif [ str, str, type(None) ] == mapping and meets_criteria(times[0]) and meets_criteria(times[1]):\n d1 = doytimestr_to_datetime('%d:%s:00' % (d[0].year, times[0].replace('/',':')))\n d2 = doytimestr_to_datetime('%d:%s:00' % (d[1].year, times[1].replace('/',':')))\n #return '%s to %s' % (d1, d2)\n return d1, d2, timedelta_hours(d2-d1)\n else:\n #return ''\n return None, None, None", "def time_filter(records, seconds):\n delta = datetime.timedelta(seconds)\n records = iter(records)\n previous = next(records)\n yield previous\n current = None\n fields = ['host', 'type', 'user_agent', 'info']\n\n for record in records:\n current = record\n for field in fields:\n if current[field] != previous[field]:\n yield current\n break\n else:\n if previous['datetime'] + delta < current['datetime']:\n yield current\n\n previous = current", "def filter_times(timestamps, time_difference):\n timestamps = sorted(set(timestamps))\n\n filtered_timestamps = []\n for current_timestamp in timestamps:\n if not filtered_timestamps or current_timestamp - filtered_timestamps[-1] > time_difference:\n filtered_timestamps.append(current_timestamp)\n\n return filtered_timestamps", "def test_subset_by_time(self):\n\n this_satellite_dict = satellite_io.subset_by_time(\n satellite_dict=copy.deepcopy(SATELLITE_DICT_ALL_EXAMPLES),\n desired_times_unix_sec=DESIRED_TIMES_UNIX_SEC\n )[0]\n\n self.assertTrue(compare_satellite_dicts(\n this_satellite_dict, SATELLITE_DICT_SUBSET_BY_TIME\n ))", "def filter_timespans(self, minTime=2.0):\n igList = [ig.Rsc['PAIR'] for ig in self.Set if abs(float(ig.Rsc['TIME_SPAN_YEAR'])) < minTime]\n self.Set.omit(IG=igList)", "def search_by_time(self, tl):\n print(\"Search by minutes\")\n minutes = input(\"Please enter the number of minutes for the task: \")\n try:\n minutes = int(minutes)\n except ValueError as err:\n utils.print_error(err)\n return self.search_by_time(tl)\n else:\n return tl.findall_time(minutes)", "def _hist_filter_ts(commands, start_time, end_time):\n for cmd in commands:\n if start_time <= cmd[1] < end_time:\n yield cmd", "def filter_time_range(start: int, end: int, time_slots: Optional[Container[int]] = None) -> Iterator[int]:\n if time_slots is None:\n time_slots = range(24)\n for time in range(start, end):\n if time in time_slots:\n yield time", "def get_tweets(which, hours):\n objects = tweepy.Cursor(\n twitter.list_timeline,list_id=which,\n include_rts=False,count=100\n ).items()\n time_objects = []\n cutoff = (\n datetime.utcnow() - timedelta(hours=hours)\n ).strftime('%b %d %H:%M:%S')\n for tweet in objects:\n data = tweet._json # isolate metadata\n raw_time = datetime.strptime(\n data['created_at'],\n '%a %b %d %H:%M:%S +0000 %Y'\n )\n time = raw_time.strftime('%b %d %H:%M:%S') # reformat to match cutoff for boolean\n if time > cutoff:\n time_objects.append(tweet)\n return time_objects", "def FilterScan(self, time_ranges, start_time, end_time, upload_time):\r\n # Always add it to total time_range\r\n self.total_time_range.AddScan(start_time, end_time,\r\n upload_time)\r\n\r\n for time_range in time_ranges:\r\n if time_range.TimeisValid(start_time):\r\n time_range.AddScan(start_time, end_time, upload_time)\r\n return\r\n\r\n logging.warning(\"Scan does not match any filters\")", "def filter_metrics_choices(self): \n cols = pd.Series(tdr.get_catalog().tubidw.all_metric_hourly.columns)\n filter_metrics = ['no filters'] + cols[cols.str.endswith(tuple(['_count', '_sec']))].tolist()\n return filter_metrics", "def get_sessions(sessions, time_feat_dict):\n filt = Session.filter_time_func(time_feat_dict)\n return [s for s in sessions if filt(shortstr2time(s['start']))]", "def solve_filter_time_interval(self):\n if 'interval' in self.filter_request:\n temp_list_pack = []\n temp_list_pack.append(self.list_pack[0])\n curr_time = pandas.to_datetime(self.list_pack[0]['time_stamp'])\n filter_interval = int(self.filter_request['interval'])\n\n if filter_interval <= 0:\n filter_interval = int(1)\n\n for i in self.list_pack:\n pack_time = pandas.to_datetime(i['time_stamp'])\n if (curr_time + pandas.to_timedelta(filter_interval, unit='s')) <= pack_time:\n temp_list_pack.append(i)\n curr_time = pandas.to_datetime(i['time_stamp'])\n\n self.list_pack = temp_list_pack", "def _FilterMMarks(self):\n\n to_remove = []\n tplus1 = datetime.datetime.now() - datetime.timedelta(hours=1)\n\n for (i, (m1, m2)) in enumerate(self._mmarks):\n if (m1.starttime < tplus1):\n to_remove.append(i)\n\n to_remove.reverse()\n for i in to_remove:\n self._mmarks.pop(i)", "def search_by_time(integer, row):\n clear_screen()\n found = False\n for item in row:\n if item[\"Time\"] == str(integer):\n print_entry(item)\n found = True\n if found is False:\n print(\"No Entries Found..\")", "def exclude_times(self, *tuples):\n for item in tuples:\n if isinstance(item, TimeRange):\n self._excluded_times.append(item)\n else:\n self.exclude_time(*item)\n return self", "def sample_times():\n\tthe_times = []\n\tday = config.window_start_date\n\twhile day <= config.window_end_date:\n\t\t# times from start of window on day to end of window \n\t\ttime = config.tz.localize( datetime.combine( \n\t\t\tday, config.window_start_time \n\t\t) )\n\t\tend_time = config.tz.localize( datetime.combine( \n\t\t\tday, config.window_end_time \n\t\t) )\n\t\twhile time < end_time: # While still in the time window\n\t\t\tthe_times.append( time )\n\t\t\ttime += timedelta(minutes=1)\n\t\tday += timedelta(days=1)\n\treturn the_times", "def time_filter(target_time, format, delta_hours):\n return datetime.strptime(target_time, format) + timedelta(hours=delta_hours) >= datetime.utcnow()", "def get_talks_gt_one_hour(videos):\r\n return [video for video in videos if iso8601DurationToSeconds(video.duration) > 60 * 60]", "def get_timed_product(fromday, endday, shop):\n queryset = OrderDetail.objects.filter(shop=shop).filter(start_time__gte=fromday).filter(start_time__lte=endday)\n return queryset", "def get_talks_gt_one_hour(videos):\n return [v for v in videos if get_hours(v) >= 1]", "def filter_paths(movement, paths, time_threshold):\r\n\r\n # check if all inputs are positive integers\r\n conditions_value = time_threshold <= 0\r\n if conditions_value:\r\n raise ValueError(\"Input values need to be positive\")\r\n\r\n # Variable that store paths equal to or larger than time threshold\r\n pass_paths = []\r\n\r\n # Pull out time variable\r\n T = movement['t']\r\n\r\n # Run through each path and check whether the time spending\r\n # on the path is equal to or larger than the time threshold\r\n for path in paths:\r\n start_time, end_time = T[path].ravel()\r\n if (end_time - start_time) >= time_threshold:\r\n pass_paths.append(path)\r\n\r\n return(pass_paths)", "def filter_datetime_range(self, queryobject, start_datetime, end_datetime):\n raise NotImplementedError()", "def filter_time_match(file1, file2):\n freq1 = int(file1.split(\".\")[1].split(\"_\")[1].replace(\"M\", \"\"))\n freq2 = int(file2.split(\".\")[1].split(\"_\")[1].replace(\"M\", \"\"))\n df1, df2 = filter_overlapping_files_dfs(file1, file2)\n\n dt1 = pandas.to_datetime(df1[\"date\"] + \" \" + df1[\"hour\"])\n dt2 = pandas.to_datetime(df2[\"date\"] + \" \" + df2[\"hour\"])\n\n dt_delta = datetime.timedelta(minutes=freq2 - freq1)\n time_match_df1 = dt1.copy()\n time_match_df2 = dt2.copy()\n for idx, dt in dt2.items():\n match = dt1[(dt1 >= dt) & (dt1 <= dt + dt_delta)]\n time_match_df1[match.index] = idx\n time_match_df2[idx] = 0\n time_match_df2[idx] = tuple(match.index)\n\n time_match_df2[time_match_df2.apply(len) != 10]\n return time_match_df1, time_match_df2", "def create_time_filter_dict() -> dict:\n today = datetime.today()\n today_as_date = today.date()\n one_day_delta = timedelta(hours=24)\n time_filter_dict = {\n constants.DAY_BEFORE_YESTERDAY: (\n today_as_date - one_day_delta * 2,\n today_as_date - one_day_delta,\n ),\n constants.IN_PAST_24_HOURS: (today - one_day_delta, today),\n constants.YESTERDAY: (today_as_date - one_day_delta, today_as_date),\n constants.TODAY: (today_as_date, today_as_date + one_day_delta),\n constants.IN_NEXT_24_HOURS: (today, today + one_day_delta),\n constants.TOMORROW: (today_as_date + one_day_delta, today_as_date + one_day_delta * 2),\n }\n return time_filter_dict", "def filter_only_remaining(self,now):\n\t\ttimeshift = now.replace(tzinfo=\"Europe/London\")\n\t\treturn Programs([program for program in self.list if program.end > timeshift and program.end < now])", "def filter_data(start_time, end_time, table_name=\"content\"):\n datas = get_text_from_mysql(table_name=table_name,\n start_time=start_time,\n end_time=end_time)\n return datas" ]
[ "0.7031687", "0.67676854", "0.6751928", "0.6737246", "0.66668457", "0.6608179", "0.6398275", "0.6397288", "0.63106745", "0.6258909", "0.61309755", "0.61288965", "0.60868865", "0.60805154", "0.6061895", "0.5981721", "0.59279084", "0.58912027", "0.57801604", "0.57451755", "0.5723075", "0.56875205", "0.5685025", "0.5678982", "0.56469303", "0.5617843", "0.5617696", "0.5606753", "0.5605311", "0.5597027" ]
0.700179
1
Permutes the weight to use the sliced rotary implementation.
def permute_for_sliced_rotary(weight, num_heads, rotary_dim=None): if rotary_dim is not None: weight = weight.reshape(num_heads, weight.shape[0] // num_heads, -1) rotary_weight = weight[:, :rotary_dim] rotary_weight = permute_for_sliced_rotary( rotary_weight.reshape(num_heads * rotary_dim, -1), num_heads ).reshape(num_heads, rotary_dim, -1) weight[:, :rotary_dim] = rotary_weight return weight.reshape(-1, weight.shape[-1]) return ( weight.reshape(num_heads, weight.shape[0] // num_heads // 2, 2, weight.shape[1]) .swapaxes(1, 2) .reshape(weight.shape[0], weight.shape[1]) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def weight_rotate(weight):\n weight = weight.permute(1, 2, 3, 0)\n return weight", "def permute(self):\n raise NotImplementedError()", "def apply_permutation(hyper, pol, perm):\n pass", "def rotate_weights(w, s):\n\n n = len(w)\n\n w = [\n np.sum([w[j] * s[(n + i - j) % n] for j in range(0, n)])\n for i in range(0, n)\n ]\n\n return w", "def Rot_layer(self, w):\n for idx, element in enumerate(w):\n qml.Rot(element[0], element[1], element[2], wires=idx)", "def mutate(self):\n \n # Mutate each weight\n self.w1 = self.w1 + np.random.normal(0, 1, 8).reshape((2,4))\n self.b1 = self.b1 + np.random.normal(0, 1, 2).reshape((2,1))\n self.w2 = self.w2 + np.random.normal(0, 1, 4).reshape((2,2))\n self.b2 = self.b2 + np.random.normal(0, 1, 2).reshape((2,1))\n self.w3 = self.w3 + np.random.normal(0, 1, 2).reshape((1,2))\n self.b3 = self.b3 + np.random.normal(0, 1, 1)\n \n # Return thyself\n return self", "def permutation(self, x):\r\n x = array(x)\r\n x = roll(x, self.num_calls)\r\n self.num_calls += 1\r\n return x", "def _reweight(self):\n self._seed_weights = [self._graph.degree(seed) for seed in self._seeds]\n weight_sum = np.sum(self._seed_weights)\n self._seed_weights = [float(weight)/weight_sum for weight in self._seed_weights]", "def permute_parent_element(perm, el):\n # perm[oldIndex] = newIndex\n return el # no need to permute operation sequence", "def mutate(self, prob_grow, prob_flip, prob_shrink, seed_density, mutation_rate):\r\n #\r\n mutant = copy.deepcopy(self)\r\n #\r\n # prob_grow = probability of invoking grow()\r\n # prob_flip = probability of invoking flip_bits()\r\n # prob_shrink = probability of invoking shrink()\r\n # seed_density = target density of ones in an initial random seed\r\n # mutation_rate = probability of flipping an individual bit\r\n #\r\n assert prob_grow + prob_flip + prob_shrink == 1.0\r\n #\r\n uniform_random = rand.uniform(0, 1)\r\n #\r\n if (uniform_random < prob_grow):\r\n # this will be invoked with a probability of prob_grow\r\n mutant.grow(seed_density) \r\n elif (uniform_random < (prob_grow + prob_flip)):\r\n # this will be invoked with a probability of prob_flip\r\n mutant.flip_bits(mutation_rate)\r\n else:\r\n # this will be invoked with a probability of prob_shrink\r\n mutant.shrink()\r\n # erase the parent's history from the child\r\n pop_size = len(self.history)\r\n mutant.history = np.zeros(pop_size, dtype=np.float)\r\n return mutant", "def __pow__ (self, idx):\n return perm(*(self._getcycles() * idx))", "def weight(self):", "def superpose(self, other, weight):\n self.vector += np.multiply(weight,other.vector)", "def _mutate(self,arr,p_mut):\n mut = np.random.random_sample(arr.shape)<p_mut\n no_mut = ~mut\n mut_val = np.random.uniform(low=self.minval,high=self.maxval,size=arr.shape)\n return (no_mut*arr) + (mut*mut_val)", "def _Recombination(self, Population_Parents_Weights, Population_Parents_Sigma, rows): #GenerateParents\r\n Population_Weights_Recombination = np.zeros(shape = (rows, Population_Parents_Weights.shape[1]))\r\n Population_Sigma_Recombination = np.zeros(shape = (rows, Population_Parents_Weights.shape[1]))\r\n for index_row, _ in enumerate( Population_Weights_Recombination ):\r\n \"\"\"\r\n 可能可以平行計算\r\n \"\"\"\r\n TwoRowschoiced = np.random.choice(Population_Parents_Weights.shape[0], size=2, replace=False,)\r\n Parent1Mask = np.random.randint(2, size=Population_Parents_Weights.shape[1])\r\n Parent2Mask = np.full(shape = Population_Parents_Weights.shape[1], fill_value = 1 ) - Parent1Mask\r\n \r\n Population_Weights_Recombination[index_row,:] = (Population_Parents_Weights[TwoRowschoiced] * [Parent1Mask, Parent2Mask]).sum(axis=0)\r\n Population_Sigma_Recombination[index_row,:] = Population_Parents_Sigma[TwoRowschoiced].mean(axis=0)\r\n return Population_Weights_Recombination, Population_Sigma_Recombination", "def permute(p,l,length):\n assert length >= 0\n if length == 0:\n\tprint p\n\treturn\n\n for i in range(0,length):\n\tn = p + (l[i],) \n\tpermute(n,l[0:i]+l[i+1:],length-1)", "def permute_fixed(s_input):\r\n return permute_row_elements(s_input, p_val)", "def permute_fixed(s_input):\r\n return permute_row_elements(s_input, p_val)", "def permute_fixed(s_input):\r\n return permute_row_elements(s_input, p_val)", "def permute_fixed(s_input):\r\n return permute_row_elements(s_input, p_val)", "def permute_fixed(s_input):\r\n return permute_row_elements(s_input, p_val)", "def forward(self, input):\n return input.permute(*self.perm)", "def pulp_smash():", "def permute(self, arr):\n\n return arr[self.permutation_idxs]", "def permutations(cube):\r\n yield from rotations24(cube)\r\n yield from rotations24(np.flip(cube, 0))\r\n yield from rotations24(np.flip(cube, 1))\r\n yield from rotations24(np.flip(cube, 2))", "def _mutate(self, offspring):\n weight_idx = random.choice(range(len(offspring)))\n mutation_modifier = 1 + random.uniform(-self.mutation_delta, self.mutation_delta)\n offspring[weight_idx] *= mutation_modifier\n return self._normalize_weights(offspring)", "def prime(self, reps=0, weight=0):\n if reps == 1:\n self.onerm = weight\n else:\n self.onerm = (weight * reps * 0.033) + weight", "def rotate(self,r):\n return r.hprod( self.hprod( r.inv() ) )", "def lift_perm(p: Dict[int, int]) -> np.ndarray:\n n = len(p)\n pm = np.zeros((1 << n, 1 << n), dtype=complex)\n for i in range(1 << n):\n j = 0\n mask = 1 << n\n for q in range(n):\n mask >>= 1\n if (i & mask) != 0:\n j |= 1 << (n - 1 - p[q])\n pm[j][i] = 1\n return pm", "def _generate_p(self):\n self._values, weights = zip(*self._weights.items())\n cumsum = list(itertools.accumulate(weights))\n total = cumsum[-1]\n self._p = [i / total for i in cumsum]" ]
[ "0.6902893", "0.6378392", "0.60350555", "0.57593405", "0.5576342", "0.55144465", "0.5473023", "0.5443731", "0.52880347", "0.52178", "0.52050495", "0.51479506", "0.514577", "0.5136151", "0.51311886", "0.5107614", "0.5107106", "0.5107106", "0.5107106", "0.5107106", "0.5107106", "0.50649774", "0.50467956", "0.504238", "0.50154215", "0.5014749", "0.5013505", "0.49959558", "0.49796072", "0.49789166" ]
0.7253853
0
Retrieve box office dataset using Pandas.
def read_box_office_data(): df = pd.read_csv(dataset) # Return a DICT of the Rank column in our CSV: # {'Rank': 7095, 'Release_Group': 'Rififi 2000 Re-release', 'Worldwide': 463593, 'Domesti # c': 460226, 'Domestic_%': 0.992737163848462, 'Foreign': 3367, 'Foreign_%': 0.007262836151538094, 'Year': 2000 # , 'Filename': '2000.csv'} return df.to_dict("Rank")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_box_office_data(environ):\n df = pd.read_csv(dataset)\n data = df.to_dict(\"Rank\")\n\n # NOTE: Add QUERY_STRING for number of movies to display\n query_str: str = environ.get(\"QUERY_STRING\")\n print(query_str)\n\n # Add a query string to specify number of records to return\n if query_str == \"\":\n # Do I pass in the data via context? Can I return \"data\": df.to_dict(\"Rank\")?\n # print(df.to_dict(\"Rank\")[0]) # List[0] -> Dict\n return render_template(\n template_name=\"box_office.html\",\n context={\"path\": environ.get(\"PATH_INFO\"), \"data\": data},\n )\n else:\n # Only single-digit numbers for now. Could parse further to\n # get the actual value. Don't see anything in the request obj\n # specifically. Could find '=' then take everything after and\n # convert to INT.\n num_movies: int = int(query_str.split(\"=\")[-1]) # movies=5 -> 5\n print(num_movies, type(num_movies))\n return render_template(\n template_name=\"box_office.html\",\n context={\n \"path\": environ.get(\"PATH_INFO\"),\n \"data\": data[:num_movies],\n \"qs\": query_str,\n \"movies\": num_movies,\n },\n )", "def load_dataset():\n\n df_ = pd.read_excel(\"D:\\VERİBİLİMİOKULU\\VERİSETLERİ\\post_bilgileri.xlsx\")\n df = df_.copy()\n return df", "def extract_data():\n client = MongoClient(HOST, PORT)\n collection = client[DB][COLLECTION]\n df = pd.DataFrame(collection.find().limit(10))\n return df", "def test_dataframe(self):\n\n url=\"http://archive.ics.uci.edu/ml/machine-learning-databases/wine/wine.data\"\n readerobject=requester.url_to_df(url)\n self.assertIsInstance(readerobject,pd.DataFrame)", "def load_landkreis_information():\n client = MongoClient(f'mongodb://{os.getenv(\"USR_\")}:{os.getenv(\"PWD_\")}@{os.getenv(\"REMOTE_HOST\")}:{os.getenv(\"REMOTE_PORT\")}/{os.getenv(\"AUTH_DB\")}')\n db = client[os.getenv(\"MAIN_DB\")]\n lk_collection = db[\"lk_overview\"]\n data = pd.DataFrame(list(lk_collection.find()))\n return data", "def data_pandas(detections):\n return DataWrapperPandas(detections, duplicates_radius=1)", "def get_dataframe(config: ModelSettings):\n df = pd.read_excel(config.df_data_source_path, engine=\"openpyxl\")\n\n # only use volumes with more than 30 slices\n if \"z\" in df.columns:\n ddf = df[(df[\"z\"] >= 30)]\n return df", "def get_mojo_box_office():\n \n #get movies from db\n movies_df = movie_helper.get_movies_df()\n \n with tqdm(total=len(movies_df)) as pbar:\n for index, row in movies_df.iterrows(): \n #get df of box office info for each weekend\n weekend_df = mojo_helper.get_uk_box_office_df(row['imdbId'])\n weekend_df[\"movieId\"] = row[\"movieId\"]\n \n #insert into the database \n database_helper.bulk_insert_df(\"weekend_box_office_mojo\", weekend_df, weekend_df.columns.values.tolist())\n pbar.update(1)", "def dataframe_extended(self):\n return self.boxscore.dataframe", "def read_SMAP_L1B_HDF_box(FILE_NAME, box_lat, box_lon, nameVariableArray):\n\n db=pd.DataFrame()\n pd.options.mode.chained_assignment = None\n with h5py.File(FILE_NAME, mode='r') as f:\n for i in range(0, len(nameVariableArray)):\n nameVariable = nameVariableArray[i]\n # print('Variable a extraer:' +str(nameVariable))\n data = f[nameVariable][:]\n units = f[nameVariable].attrs['units']\n longname = f[nameVariable].attrs['long_name']\n _FillValue = f[nameVariable].attrs['_FillValue']\n valid_max = f[nameVariable].attrs['valid_max']\n valid_min = f[nameVariable].attrs['valid_min'] \n invalid = np.logical_or(data > valid_max,\n data < valid_min)\n invalid = np.logical_or(invalid, data == _FillValue)\n data[invalid] = np.nan\n data = np.ma.masked_where(np.isnan(data), data)\n data = data.flatten('F')\n \n # Get the geolocation data\n latitude = f['/Brightness_Temperature/tb_lat'][:]\n longitude = f['/Brightness_Temperature/tb_lon'][:]\n lat_index = np.logical_and(latitude > box_lat[0], latitude < box_lat[1])\n lon_index = np.logical_and(longitude > box_lon[0], longitude < box_lon[1])\n box_index = np.logical_and(lat_index, lon_index)\n data = f[nameVariable][box_index]\n #### se genera el objeto pandas\n db[nameVariable] = data\n latitude = f['/Brightness_Temperature/tb_lat'][box_index]\n longitude = f['/Brightness_Temperature/tb_lon'][box_index]\n\n\n # Latitude = Latitude.flatten('F')\n # Longitude = Longitude.flatten('F')\n\n db[\"Longitude\"] = pd.to_numeric(longitude)\n db[\"Latitude\"] = pd.to_numeric(latitude) \n\n db['Coordinates'] = list(zip(db.Longitude, db.Latitude))\n db['Coordinates'] = db['Coordinates'].apply(Point)\n\n db = db.dropna()\n return db", "def get_data(self)->pd.DataFrame:\n pass", "def data_with_fips(self) -> pd.DataFrame:\n return self.data", "def fetch_data(self) -> pd.DataFrame:\r\n os.chdir(r'\\\\192.168.8.90\\投研部\\Jessica\\test_data')\r\n if self.tic in ['RB.CCRI', 'HC.CCRI', 'I.CCRI', 'J.CCRI', 'JM.CCRI', 'ZC.CCRI']:\r\n f = pd.read_hdf('data.h5', 'snc')\r\n if self.tic in ['CU.CCRI', 'ZN.CCRI', 'AL.CCRI', 'NI.CCRI']:\r\n f = pd.read_hdf('data.h5', 'met')\r\n data = f.loc[f.loc[:, 'sec_code'] == self.tic, :]\r\n # extract I.CCRI data\r\n table = pd.pivot_table(data, index=['date'], columns=['factor_code'], values='factor_value')\r\n table = table.sort_values(by='date')\r\n \r\n return table", "def get_db_afvalcluster_info():\n db_df = get_dataframe(\"\"\"SELECT *\n FROM proj_afval_netwerk.afv_rel_nodes_poi\n \"\"\")\n db_df['woning'] = db_df['bk_afv_rel_nodes_poi'].str.split('~')\n db_df['cluster_x'] = db_df['woning'].apply(lambda x: x[0]).astype('float')\\\n .round(0).astype('int')\n db_df['cluster_y'] = db_df['woning'].apply(lambda x: x[1]).astype('float')\\\n .round(0).astype('int')\n db_df['type'] = db_df['woning'].apply(lambda x: x[2])\n db_df['bag'] = db_df['woning'].apply(lambda x: x[3])\n db_df = db_df.drop('woning', axis=1)\n return db_df", "def _read_dataset(self):\n import pandas as pd\n\n freesolv_path = get_data_file_path(FREESOLV_PATH)\n\n freesolv_db = pd.read_csv(freesolv_path, delimiter=';',\n skipinitialspace=True,\n skiprows=[0, 1, 2], header=0,\n names=['compound id', 'SMILES',\n 'iupac name',\n 'experimental value',\n 'experimental uncertainty',\n 'calculated value (GAFF)',\n 'calculated uncertainty',\n 'experimental reference',\n 'calculated reference',\n 'notes'])\n\n compound_ids = freesolv_db['compound id'].to_list()\n smiles_tags = freesolv_db['SMILES'].to_list()\n experimental_v = freesolv_db['experimental value'].to_list()\n return compound_ids, smiles_tags, experimental_v", "def load_geolocation_data():\n client = MongoClient(f'mongodb://{os.getenv(\"USR_\")}:{os.getenv(\"PWD_\")}@{os.getenv(\"REMOTE_HOST\")}:{os.getenv(\"REMOTE_PORT\")}/{os.getenv(\"AUTH_DB\")}')\n db = client[os.getenv(\"MAIN_DB\")]\n lk_collection = db[\"lkdata\"]\n data = pd.DataFrame(list(lk_collection.find()))\n data = data[[\"fields\"]]\n data = pd.concat([pd.DataFrame(data), pd.DataFrame(list(data[\"fields\"]))], axis=1).drop(\"fields\", 1)\n data[\"cca_2\"] = pd.to_numeric(data[\"cca_2\"])\n return data", "def sourceToDataframe(self):\n df = pd.read_excel(self.filename)\n df.columns = df.iloc[10]\n df = df.drop(df.index[:11])\n self.df = df #makes this df accessible to the whole class now\n self.insertODN()\n display(df.head())", "def df():\n fs.df()", "def get_data(self):\n\n return pd.read_sql_query(\"Select * from {table}\".format(table=self.table_name), con=self.con)", "def make_dataset(self, df, **kwargs):\n\t\treturn df", "def read_df_from_binary(file_name_mask):\n data = read_matrix_from_binary(file_name_mask + '-value.bin')\n with open(file_name_mask + '-name.txt', 'r') as f:\n index = f.readline().strip().split('\\t')\n columns = f.readline().strip().split('\\t')\n return pandas.DataFrame(data=data, index=index, columns=columns)", "def to_dataframe(self):\n return df_util.to_dataframe(requests.get(self.__url).json())", "def query_data_from_labkey(cell_line_id):\n\n # Query for labkey data\n db = LabKey(contexts.PROD)\n\n # Get production data for cell line\n data = db.dataset.get_pipeline_4_production_cells([(\"CellLine\", cell_line_id)])\n data = pd.DataFrame(data)\n\n # Because we are querying the `cells` dataset and not the `fovs` dataset\n # We need to clean up just a tiny bit\n\n # NOTE: Tyler is looking into this\n # The only reason we query the `cells` dataset is for the `PixelScale` numbers\n # But those _should_ be exposed on the `fovs` dataset so he is looking into\n # why they aren't. In the future this query should be much simpler.\n\n # Select down to just the columns we want\n data = data[[\n \"FOVId\", \"CellLine\", \"Gene\", \"Protein\",\n \"PixelScaleX\", \"PixelScaleY\", \"PixelScaleZ\",\n \"SourceReadPath\", \"ChannelNumber405\",\n \"ChannelNumber638\", \"ChannelNumberBrightfield\",\n \"NucleusSegmentationReadPath\",\n \"MembraneSegmentationReadPath\",\n \"StructureSegmentationReadPath\"\n ]]\n\n # Drop duplicates because this dataset will have a row for every cell\n # instead of per-FOV\n data = data.drop_duplicates(\"FOVId\")\n data = data.set_index(\"FOVId\")\n\n # Fix all filepaths\n data = fix_filepaths(data)\n\n return data", "def load_pandas():\n data = _get_data()\n return du.process_pandas(data, endog_idx=5, exog_idx=[10, 2, 6, 7, 8])", "def get_national_data():\n cursor = nat_data_coll.find({})\n df = pd.DataFrame(list(cursor))\n if df.empty:\n app.logger.error(\"While getting national data: no data\")\n return df", "def get_box_data(x_data, selection, network_type='all', infrastructure='way[\"highway\"]'):\n if x_data['from_file'] == 'yes':\n return copy.deepcopy(get_data_subset(selection, network_type=network_type, infrastructure=infrastructure))\n else:\n try:\n return ox.osm_net_download(north=x_data['north'],\n south=x_data['south'],\n east=x_data['east'],\n west=x_data['west'],\n network_type=network_type,\n infrastructure=infrastructure\n )\n except Exception as e:\n logger.exception(e)\n time.sleep(5)\n return [{'elements':[]}]", "def getdata(verbose=True, filenameext=None):\n savepath = 'O:/Administration/02 - Økonomi og PDK/Medarbejdermapper/Kasper/Focus1 - Ad hoc opgaver/Lungemed sengedage og visitationer/plots/'\n\n if verbose: print(' - Getting LPR3 data from parsing SQL query for \"bed days\" ')\n savefilename_days = savepath+'lungemedLPR3_SQLbeddays.xlsx'\n filepath = savepath+'/../Lungemed.sql'\n if filenameext is not None:\n savefilename_days = savefilename_days.replace('.xlsx', filenameext+'.xlsx')\n filepath = filepath.replace('.sql', filenameext+'.sql')\n overwrite_days = True\n dataframe_days = gdf.returndatapull(lbv.loadSQL_beddays(filepath=filepath), verbose=verbose, savefilename=savefilename_days, overwrite=overwrite_days)\n\n if verbose: print(' - Getting LPR3 data from parsing SQL query for \"visitations\" ')\n savefilename_vis = savepath+'lungemedLPR3_SQLvisitations.xlsx'\n filepath = savepath + '/../Lungemed_visitationsoprindelse_nogroup.sql'\n if filenameext is not None:\n savefilename_vis = savefilename_vis.replace('.xlsx', filenameext+'.xlsx')\n filepath = filepath.replace('.sql', filenameext + '.sql')\n overwrite_vis = True\n dataframe_vis = gdf.returndatapull(lbv.loadSQL_visitations(filepath=filepath), verbose=verbose, savefilename=savefilename_vis, overwrite=overwrite_vis)\n\n return dataframe_days, dataframe_vis", "def read_dataset():\n\n df = pd.read_csv('fake_job_postings.csv', index_col='job_id')\n return df", "def data(self):\n return self.as_named_DataFrame()", "def load_renter_data():\n return pd.read_sql_query(_sql_query, _con)" ]
[ "0.6330489", "0.62791806", "0.6192725", "0.6002709", "0.5984528", "0.5924598", "0.5915877", "0.59097326", "0.58125913", "0.5751532", "0.5750167", "0.56569916", "0.56387097", "0.55906785", "0.55713004", "0.5560078", "0.5538441", "0.55188626", "0.5498248", "0.5486015", "0.54845387", "0.5484486", "0.5483439", "0.5480935", "0.54614186", "0.54582137", "0.54563564", "0.5420929", "0.54069716", "0.54000646" ]
0.6367827
0
RungeKutta integrator (4th order) Input arguments x = current value of dependent variable t = independent variable (usually time) tau = step size (usually timestep) derivsRK = right hand side of the ODE; derivsRK is the name of the function which returns dx/dt Calling format derivsRK (x,t,param). param = extra parameters passed to derivsRK Output arguments xout = new value of x after a step of size tau
def rk4(x,t,tau,derivsRK,param): #couldn't get it to import right so I just copy pasted. half_tau = 0.5*tau F1 = derivsRK(x,t,param) t_half = t + half_tau xtemp = x + half_tau*F1 F2 = derivsRK(xtemp,t_half,param) xtemp = x + half_tau*F2 F3 = derivsRK(xtemp,t_half,param) t_full = t + tau xtemp = x + tau*F3 F4 = derivsRK(xtemp,t_full,param) xout = x + tau/6.*(F1 + F4 + 2.*(F2+F3)) return xout
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rk4(x,t,tau,derivsRK,param):\n half_tau = 0.5*tau\n F1 = derivsRK(x,t,param)\n t_half = t + half_tau\n xtemp = x + half_tau*F1\n F2 = derivsRK(xtemp,t_half,param)\n xtemp = x + half_tau*F2\n F3 = derivsRK(xtemp,t_half,param)\n t_full = t + tau\n xtemp = x + tau*F3\n F4 = derivsRK(xtemp,t_full,param)\n xout = x + tau/6.*(F1 + F4 + 2.*(F2+F3))\n return xout", "def rungeKutta4(x, derivX, deltat=0.1, **kwargs):\n\tk1 = derivX(x,**kwargs)*deltat\n\tk2 = derivX(x + 0.5*k1, **kwargs)*deltat\n\tk3 = derivX(x + 0.5*k2, **kwargs)*deltat\n\tk4 = derivX(x + k3, **kwargs)*deltat\n\treturn x + rungeKuttaCoefficient*(k1+2*k2+2*k3+k4)", "def rkStep(ebitParams, mySpecies, species, tstep, populationAtT0, populationAtTtstep):\r\n # longer function param calls yes but it speeds it up calculateK by 10%...\r\n # print(\"\\nRunning an RK step... \")\r\n\r\n # mySpecies.k1, mySpecies.r1 = calculateKR(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k1, mySpecies.r1, populationAtT0, mySpecies.tmpPop, 0.0, tstep)\r\n # mySpecies.k2, mySpecies.r2 = calculateKR(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k2, mySpecies.r2, populationAtT0, mySpecies.k1, 0.5, tstep)\r\n # mySpecies.k3, mySpecies.r3 = calculateKR(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k3, mySpecies.r3, populationAtT0, mySpecies.k2, 0.5, tstep)\r\n # mySpecies.k4, mySpecies.r4 = calculateKR(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k4, mySpecies.r4, populationAtT0, mySpecies.k3, 1.0, tstep)\r\n \r\n mySpecies.k1 = calculateK(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k1, populationAtT0, mySpecies.tmpPop, 0.0, tstep)\r\n mySpecies.k2 = calculateK(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k2, populationAtT0, mySpecies.k1, 0.5, tstep)\r\n mySpecies.k3 = calculateK(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k3, populationAtT0, mySpecies.k2, 0.5, tstep)\r\n mySpecies.k4 = calculateK(ebitParams, mySpecies, species, mySpecies.tmpPop, mySpecies.Z, mySpecies.ionizationRates, mySpecies.chargeExchangeRates, mySpecies.rrRates, mySpecies.k4, populationAtT0, mySpecies.k3, 1.0, tstep)\r\n \r\n\r\n # print(\"k values for q=1:\")\r\n # print(\"k1 %s\"%mySpecies.k1[1])\r\n # print(\"k2 %s\"%mySpecies.k2[1])\r\n # print(\"k3 %s\"%mySpecies.k3[1])\r\n # print(\"k4 %s\"%mySpecies.k4[1])\r\n\r\n # Updates the population of each charge state in the species.\r\n for qindex in range(0, mySpecies.Z + 1):\r\n # new energy value = ( kT(q-1)(pop gained by q-1) - kT(q)(lost by q) + kT(q+1)(gained by q+1) ) / total change in population \r\n # populationAtTtstep[qindex] = populationAtT0[qindex] + ((1 / 6) * (sum(mySpecies.r1[qindex]) + (2 * sum(mySpecies.r2[qindex]) + sum(mySpecies.r3[qindex]) ) + sum(mySpecies.r4[qindex]) ))\r\n populationAtTtstep[qindex] = populationAtT0[qindex] + ((1 / 6) * (mySpecies.k1[qindex] + (2 * (mySpecies.k2[qindex] + mySpecies.k3[qindex])) + mySpecies.k4[qindex]) )\r\n\r\n # New calculation of time stepped energy\r\n # deltaPop = [loss by q(i) from EI, gain by q(i) from CX or RR]\r\n # for q in range(0, mySpecies.Z+1):\r\n # deltaPop = [(mySpecies.r1[q][i] + (2 * (mySpecies.r2[q][i] + mySpecies.r3[q][i])) + mySpecies.r4[q][i])/6 for i in range(0,2)]\r\n # # print(\"DeltaPop for q=%s\"%q+\": %s\"%deltaPop)\r\n # if q==0:\r\n # try:\r\n # #this one is with gain only...\r\n # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]) + energyAtT0[q+1]*deltaPop[1]) / (populationAtT0[q]+deltaPop[1])\r\n # #this one is with gain and loss... caused problems\r\n # # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]-deltaPop[0]) + energyAtT0[q+1]*deltaPop[1]) / (populationAtTtstep[q])\r\n # except ZeroDivisionError:\r\n # energyAtTtstep[q] = energyAtT0[q]\r\n # elif q==mySpecies.Z:\r\n # lowerQ = [(mySpecies.r1[q-1][i] + (2 * (mySpecies.r2[q-1][i] + mySpecies.r3[q-1][i])) + mySpecies.r4[q-1][i])/6 for i in range(0,2)]\r\n # try:\r\n # #gain only\r\n # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]) + energyAtT0[q-1]*lowerQ[0]) / (populationAtT0[q]+deltaPop[1]+lowerQ[0])\r\n # # gain and loss\r\n # # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]-deltaPop[0]) + energyAtT0[q-1]*lowerQ[0]) / (populationAtTtstep[q])\r\n # except ZeroDivisionError:\r\n # energyAtTtstep[q] = energyAtT0[q]\r\n # else:\r\n # lowerQ = [(mySpecies.r1[q-1][i] + (2 * (mySpecies.r2[q-1][i] + mySpecies.r3[q-1][i])) + mySpecies.r4[q-1][i])/6 for i in range(0,2)]\r\n # # print(\"lowerQ: %s\"%lowerQ)\r\n # try:\r\n # #gain\r\n # # print(\"energyAtT0[q-1] = %s\"%energyAtT0[q-1] + \", lowerQ[0] = %s\"%lowerQ[0]+\", populationAtT0[q]=%s\"%populationAtT0[q]+\", deltaPop[1]=%s\"%deltaPop[1])\r\n # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]) + energyAtT0[q-1]*lowerQ[0] + energyAtT0[q+1]*deltaPop[1]) / (populationAtT0[q]+deltaPop[1]+lowerQ[0])\r\n # #gain and loss\r\n # # energyAtTtstep[q] = (energyAtT0[q]*(populationAtT0[q]-deltaPop[0]) + energyAtT0[q-1]*lowerQ[0] + energyAtT0[q+1]*deltaPop[1]) / (populationAtTtstep[q])\r\n # except ZeroDivisionError:\r\n # energyAtTtstep[q] = energyAtT0[q]\r\n\r\n \r\n # print(\"Initial pop: %s\"%populationAtT0 + \",\\nfinal pop: %s\"%populationAtTtstep)\r\n # print(\"Initial temp: %s\"%energyAtT0 + \",\\nfinal temp: %s\"%energyAtTtstep)\r\n return", "def rk4(derivs, y0, t, *args, **kwargs):\n\n try:\n Ny = len(y0)\n except TypeError:\n yout = np.zeros((len(t),), np.float_)\n else:\n yout = np.zeros((len(t), Ny), np.float_)\n\n yout[0] = y0\n\n for i in np.arange(len(t) - 1):\n\n thist = t[i]\n dt = t[i + 1] - thist\n dt2 = dt / 2.0\n y0 = yout[i]\n\n k1 = np.asarray(derivs(y0, thist, *args, **kwargs))\n k2 = np.asarray(derivs(y0 + dt2 * k1, thist + dt2, *args, **kwargs))\n k3 = np.asarray(derivs(y0 + dt2 * k2, thist + dt2, *args, **kwargs))\n k4 = np.asarray(derivs(y0 + dt * k3, thist + dt, *args, **kwargs))\n yout[i + 1] = y0 + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)\n return yout", "def rk4(derivs, y0, t, *args, **kwargs):\n\n try:\n Ny = len(y0)\n except TypeError:\n yout = np.zeros((len(t),), np.float_)\n else:\n yout = np.zeros((len(t), Ny), np.float_)\n\n yout[0] = y0\n\n for i in np.arange(len(t) - 1):\n\n thist = t[i]\n dt = t[i + 1] - thist\n dt2 = dt / 2.0\n y0 = yout[i]\n\n k1 = np.asarray(derivs(y0, thist, *args, **kwargs))\n k2 = np.asarray(derivs(y0 + dt2 * k1, thist + dt2, *args, **kwargs))\n k3 = np.asarray(derivs(y0 + dt2 * k2, thist + dt2, *args, **kwargs))\n k4 = np.asarray(derivs(y0 + dt * k3, thist + dt, *args, **kwargs))\n yout[i + 1] = y0 + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)\n\n return yout", "def rk4(derivs, y0, t, *args, **kwargs):\r\n\r\n try:\r\n Ny = len(y0)\r\n except TypeError:\r\n yout = np.zeros((len(t),), np.float_)\r\n else:\r\n yout = np.zeros((len(t), Ny), np.float_)\r\n\r\n yout[0] = y0\r\n i = 0\r\n\r\n for i in np.arange(len(t) - 1):\r\n\r\n thist = t[i]\r\n dt = t[i + 1] - thist\r\n dt2 = dt / 2.0\r\n y0 = yout[i]\r\n\r\n k1 = np.asarray(derivs(y0, thist, *args, **kwargs))\r\n k2 = np.asarray(derivs(y0 + dt2 * k1, thist + dt2, *args, **kwargs))\r\n k3 = np.asarray(derivs(y0 + dt2 * k2, thist + dt2, *args, **kwargs))\r\n k4 = np.asarray(derivs(y0 + dt * k3, thist + dt, *args, **kwargs))\r\n yout[i + 1] = y0 + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)\r\n return yout", "def MyGRRK3_step(f, t, qn, dt, r, e, w):\r\n assert((not np.any(np.isnan(t))) and np.all(np.isfinite(t)) and\r\n np.all(np.isreal(t))), \\\r\n \"t must be real, finite and not NaN\"\r\n assert((not np.any(np.isnan(dt))) and np.all(np.isfinite(dt)) and\r\n np.all(np.isreal(dt))), \\\r\n \"dt must be real, finite and not NaN\"\r\n assert(len(qn) == 2), \"qn must have length 2\"\r\n assert(hasattr(f, '__call__')), \\\r\n \"f must be a callable function\"\r\n assert((not np.any(np.isnan(r))) and np.all(np.isfinite(r)) and\r\n np.all(np.isreal(r))), \\\r\n \"r must be real, finite and not NaN\"\r\n assert((not np.any(np.isnan(e))) and np.all(np.isfinite(e)) and\r\n np.all(np.isreal(e))), \\\r\n \"e must be real, finite and not NaN\"\r\n assert((not np.any(np.isnan(w))) and np.all(np.isfinite(w)) and\r\n np.all(np.isreal(w))), \\\r\n \"w must be real, finite and not NaN\"\r\n\r\n def F(k0):\r\n \"\"\"\r\n Function defines the set of nonlinear equations describing k1 and k2\r\n of the third order explicit Runge-Kutta algorithm\r\n\r\n Parameters\r\n ----------\r\n\r\n k0 : vector\r\n intial guess for roots of the problem\r\n\r\n Returns\r\n -------\r\n\r\n f3 : vector\r\n set of nonlinear equations of k1 and k2\r\n \"\"\"\r\n assert((not np.any(np.isnan(k0))) and np.all(np.isfinite(k0)) and\r\n np.all(np.isreal(k0))),\\\r\n \"k0 must be real, finite and not NaN\"\r\n assert(len(k0) == 4), \"K must have length 4\"\r\n assert(hasattr(F, '__call__')), \\\r\n \"F must be a callable function\"\r\n k1 = np.array([k0[0], k0[1]])\r\n k2 = np.array([k0[2], k0[3]])\r\n f1 = k1 - np.array([f(t + dt / 3,\r\n qn + (dt / 12) * (5 * k1 - k2), r, e, w)])\r\n f2 = k2 - np.array([f(t + dt,\r\n qn + (dt / 4) * (3 * k1 + k2), r, e, w)])\r\n f3 = np.reshape(np.array([f1, f2]), (4,))\r\n return f3\r\n\r\n k0 = np.reshape(np.array([f(t + dt / 3, qn, r, e, w),\r\n f(t + dt, qn, r, e, w)]), (4,))\r\n k = fsolve(F, k0)\r\n k1 = np.array([k[0], k[1]])\r\n k2 = np.array([k[2], k[3]])\r\n qnpG1 = qn + (dt / 4) * (3 * k1 + k2)\r\n return qnpG1", "def _rk4(self, derivs, y0, t):\n yout = y0\n for i in range(len(t) - 1):\n thist = t[i]\n dt = t[i + 1] - thist\n dt2 = dt / 2.0\n k1 = np.asarray(derivs(yout, thist))\n k2 = np.asarray(derivs(yout + dt2 * k1, thist + dt2))\n k3 = np.asarray(derivs(yout + dt2 * k2, thist + dt2))\n k4 = np.asarray(derivs(yout + dt * k3, thist + dt))\n yout = yout + dt / 6.0 * (k1 + 2 * k2 + 2 * k3 + k4)\n return yout[:4]", "def Runge_Kutta_4(diff_fun: Callable[..., float],\\\n x_range: Tuple[float, float, float], initial_value: np.array,\\\n params: Tuple) -> Tuple[np.array, np.array]:\n\n # Extract the time step for ease of use and readability.\n dx = x_range[2]\n # Get the size of the parameter vector.\n vec_size = len(initial_value)\n # Initialize the output arrays.\n x = np.arange(x_range[0], x_range[1] + dx, dx)\n num_steps = len(x)\n y = np.array([np.zeros(vec_size) for i in range(num_steps)])\n y[0] = initial_value\n\n # Numerically compute the differential equation's parameters over the given \n # range.\n for n in range(1, num_steps):\n k1 = step_1(diff_fun, x[n-1], y[n-1], params, dx)\n k2 = step_2(diff_fun, x[n-1], y[n-1], params, dx, k1)\n k3 = step_3(diff_fun, x[n-1], y[n-1], params, dx, k2)\n k4 = step_4(diff_fun, x[n-1], y[n-1], params, dx, k3)\n y[n] = y[n-1] + ((k1 + (2 * k2) + (2 * k3) + k4) / 6)\n\n return x, y", "def with_rk4(dxdt,autonom=False,order=4):\n integrator = functools.partial(rk4,order=order)\n if autonom: step = lambda x0,t0,dt: integrator(lambda t,x: dxdt(x),x0,np.nan,dt)\n else: step = lambda x0,t0,dt: integrator( dxdt ,x0,t0 ,dt)\n name = \"rk\"+str(order)+\" integration of \"+pretty_repr(dxdt)\n step = NamedFunc(step,name)\n return step", "def runge_integrator(self, t, y, dt, tau):\n\n k1 = self.plant.rhs(t, y, tau)\n k2 = self.plant.rhs(t + 0.5 * dt, y + 0.5 * dt * k1, tau)\n k3 = self.plant.rhs(t + 0.5 * dt, y + 0.5 * dt * k2, tau)\n k4 = self.plant.rhs(t + dt, y + dt * k3, tau)\n return (k1 + 2 * (k2 + k3) + k4) / 6.0", "def _rk4(t, dt, x, f, args=None):\n x = np.asarray(x)\n k1 = np.asarray(f(x, t, *args))\n k2 = np.asarray(f(x + 0.5*dt*k1, t + 0.5*dt, *args))\n k3 = np.asarray(f(x + 0.5*dt*k2, t + 0.5*dt, *args))\n k4 = np.asarray(f(x + dt*k3, t + dt, *args))\n return x + dt*(k1 + 2*k2 + 2*k3 + k4)/6.0", "def rkStepEnergy(ebitParams, mySpecies, species, tstep, energyAtT0):\r\n return", "def Runge_Kutta_Fourth_Order(Method, Coordinate_file, Program, Temperature, Pressure,\n molecules_in_coord, Statistical_mechanics, RK4_stepsize, min_RMS_gradient, **keyword_parameters):\n # Setting up program specific file endings and giving parameter files blank names to avoid errors\n if Program == 'Tinker':\n file_ending = '.xyz'\n elif Program == 'Test':\n file_ending = '.npy'\n keyword_parameters['Parameter_file'] = ''\n\n RK_multiply = np.array([1./6., 1./3., 1./3., 1./6.])\n\n # Copying the coordinate file to a seperate file to work with\n os.system('cp ' + Coordinate_file + ' RK4' + file_ending)\n\n # Setting the different temperature stepsizes\n temperature_steps = np.array([0., RK4_stepsize/2., RK4_stepsize/2., RK4_stepsize])\n\n # Setting RK_4 array/matix and general parameters that aren't required for specific methods\n if (Method == 'GiQ') or (Method == 'GiQg'):\n RK_grad = np.zeros(4)\n if Method == 'GiQ':\n keyword_parameters['Gruneisen'] = 0.\n keyword_parameters['Wavenumber_Reference'] = 0.\n keyword_parameters['Volume_Reference'] = 0.\n keyword_parameters['Crystal_matrix_Reference'] = 0.\n elif (Method == 'GaQ') or (Method == 'GaQg'):\n RK_grad = np.zeros((4, 3, 3))\n if Method == 'GaQ':\n keyword_parameters['Gruneisen'] = 0.\n keyword_parameters['Wavenumber_Reference'] = 0.\n keyword_parameters['Volume_Reference'] = 0.\n keyword_parameters['Crystal_matrix_Reference'] = 0.\n\n # Calculating the RK gradients for the overall numerical gradient\n for i in range(4):\n print \" + Performing Runge-Kutta step \" + str(i + 1)\n if (Method == 'GiQ') or (Method == 'GiQg'):\n RK_grad[i], wavenumbers_hold, volume_hold = Ex.Call_Expansion(Method, 'local_gradient', Program,\n 'RK4' + file_ending, molecules_in_coord,\n min_RMS_gradient,\n Temperature=Temperature, Pressure=Pressure,\n volume_fraction_change=keyword_parameters[\n 'LocGrd_Vol_FracStep'],\n Statistical_mechanics=Statistical_mechanics,\n Parameter_file=\n keyword_parameters['Parameter_file'],\n Gruneisen=keyword_parameters['Gruneisen'],\n Wavenumber_Reference=\n keyword_parameters['Wavenumber_Reference'],\n Volume_Reference=\n keyword_parameters['Volume_Reference'])\n elif (Method == 'GaQ') or (Method == 'GaQg'):\n RK_grad[i], wavenumbers_hold = Ex.Call_Expansion(Method, 'local_gradient', Program, 'RK4' + file_ending,\n molecules_in_coord, min_RMS_gradient, Temperature=Temperature,\n Pressure=Pressure, matrix_parameters_fraction_change=\n keyword_parameters['LocGrd_LatParam_FracStep'],\n Statistical_mechanics=Statistical_mechanics,\n Parameter_file=keyword_parameters['Parameter_file'],\n Gruneisen=keyword_parameters['Gruneisen'],\n Wavenumber_Reference=\n keyword_parameters['Wavenumber_Reference'],\n Crystal_matrix_Reference=\n keyword_parameters['Crystal_matrix_Reference'],\n Aniso_LocGrad_Type=\n keyword_parameters['Aniso_LocGrad_Type'])\n volume_hold = 0.\n if i == 0:\n wavenumbers = 1.*wavenumbers_hold\n volume = 1.*volume_hold\n k1 = 1.*RK_grad[0]\n if i != 3:\n if (Method == 'GiQ') or (Method == 'GiQg'):\n dcrystal_matrix = 0.\n volume_fraction_change = (volume + RK_grad[i]*temperature_steps[i+1])/volume\n elif (Method == 'GaQ') or (Method == 'GaQg'):\n dcrystal_matrix = RK_grad[i]*temperature_steps[i+1]\n volume_fraction_change = 0.\n # Expanding the crystal to the next stepsize\n Ex.Call_Expansion(Method, 'expand', Program, Coordinate_file, molecules_in_coord, min_RMS_gradient,\n Parameter_file=keyword_parameters['Parameter_file'], dcrystal_matrix=dcrystal_matrix,\n volume_fraction_change=volume_fraction_change, Output='RK4')\n # Multiplying the found gradient by the fraction it will contribute to the overall gradient\n RK_grad[i] = RK_grad[i]*RK_multiply[i]\n # Summing all RK gradients for the overall numerical gradient\n numerical_gradient = np.sum(RK_grad, axis=0)\n\n # Removign excess files\n os.system('rm RK4'+file_ending)\n return numerical_gradient, wavenumbers, volume, k1", "def rk4(f, x, t, dt, order=4): \n if order >=1: k1 = dt * f(t , x)\n if order >=2: k2 = dt * f(t+dt/2, x+k1/2)\n if order ==3: k3 = dt * f(t+dt , x+k2*2-k1)\n if order ==4:\n k3 = dt * f(t+dt/2, x+k2/2)\n k4 = dt * f(t+dt , x+k3)\n if order ==1: return x + k1\n elif order ==2: return x + k2\n elif order ==3: return x + (k1 + 4*k2 + k3)/6\n elif order ==4: return x + (k1 + 2*(k2 + k3) + k4)/6\n else: raise NotImplementedError", "def sim(self, (K,T), (j0,x0), dt, rx, **params):\n dt0 = dt \n\n k = 0\n t = [0.]\n j = deepcopy(j0)\n x = [deepcopy(x0)]\n\n trj = dict(k=k,t=t,j=j,x=x)\n trjs = []\n\n while ( trj['t'][-1] <= T # don't exceed max continuous time\n and trj['k'] < K # don't exceed max discrete transitions\n and not trj['j'] is None ): # don't allow discrete state is None\n k0 = trj['k']\n t0 = trj['t'][-1]\n j0 = trj['j']\n x0 = trj['x'][-1]\n if 0: # forward Euler\n dx = self.F((k0,t0), (j0,x0), **params)\n else: # 4th-order Runge-Kutta \n f = lambda t,x : self.F((k0,t), (j0,x), **params)\n dx1 = f( t0, x0 ) * dt\n dx2 = f( t0+.5*dt, x0+.5*dx1 ) * dt\n dx3 = f( t0+.5*dt, x0+.5*dx2 ) * dt\n dx4 = f( t0+dt, x0+dx3 ) * dt\n dx = (1./6.)*( dx1 + 2*dx2 + 2*dx3 + dx4 ) / dt\n\n k = k0\n j = j0\n t = t0 + dt\n x = x0 + dt * dx\n g = self.G((k,t), (j,x), **params)\n\n # halve step size until trajectory doesn't violate guard more than rx\n i = 0\n imax = 50\n while np.any(g < -rx) and (i <= imax):\n dt = dt/2.\n t = t0 + dt\n x = x0 + dt * dx\n g = self.G((k,t), (j,x), **params)\n i += 1\n\n #if (i >= imax):\n # raise RuntimeError,'(sim) guard iterations exceeded -- you probably have a buggy guard'\n\n # append state to trj\n trj['t'].append(t)\n trj['x'].append(x)\n\n if 0 and 'debug' in params and params['debug']:\n print ' : (k,t)=(%s,%0.3f), (j,x)=(%s,%s), dt=%0.2e, g = %s, x = %s, dx = %s' % (k,t,j,x,dt,g,x,dx)\n\n # if in guard \n if np.any(g < 0):\n\n # spend time in guard\n if i >= imax:\n t = t + rx\n else:\n t = t + (rx + g.min())\n trj['t'].append(t)\n trj['x'].append(x)\n\n if 'debug' in params and params['debug']:\n print 'rx: (k,t)=(%s,%0.3f), (j,x)=(%s,%s), dt=%0.2e, g = %s, x = %s' % (k,t,j,x,dt,g,x)\n\n # append trj to trjs\n trjs.append(trj)\n\n if 'Zeno' in params and params['Zeno'] and (len(trj['t']) <= 4):\n\n print '(sim) possible Zeno @ stepsize dt = %0.6f' % dt0\n print 'rx: (k,t)=(%s,%0.3f), (j,x)=(%s,%s), dt=%0.2e, g = %s, x = %s' % (k,t,j,x,dt,g,x)\n return trjs\n\n # apply reset to modify trj\n (k,t),(j,x) = self.R((k,t), (j,x), **params)\n trj = dict(k=k,t=[t],j=j,x=[x])\n\n # re-initialize step size\n dt = dt0\n\n if 'debug' in params and params['debug']:\n g = self.G((k,t), (j,x), **params)\n print 'rx: (k,t)=(%s,%0.3f), (j,x)=(%s,%s), dt=%0.2e, g = %s, x = %s' % (k,t,j,x,dt,g,x)\n\n trjs.append(trj)\n\n return trjs", "def rk4(f, y, t, dt):\n k1 = f(y, t)\n k2 = f(y + 0.5 * k1 * dt, t + 0.5 * dt)\n k3 = f(y + 0.5 * k2 * dt, t + 0.5 * dt)\n k4 = f(y + k3 * dt, t + dt)\n\n res = y + float(1) / 6 * dt * (k1 + 2 * k2 + 2 * k3 + k4)\n return res", "def rk4(s,t0,tf,h=30):\n\n t = t0\n\n if tf < t0:\n h = -h\n\n while(abs(tf-t) > 0.00001):\n if (abs(tf-t) < abs(h)):\n h = tf-t\n\n k1 = h*sdot(s)\n k2 = h*sdot(s+k1/2)\n k3 = h*sdot(s+k2/2)\n k4 = h*sdot(s+k3)\n\n s = s+(k1+2*k2+2*k3+k4)/6\n t = t+h\n\n # if (s[2]<0 and s[2]>-200 and s[5]>0):\n # dt = -s[2]/s[5]\n # print(t+dt)\n\n return s", "def rkf45(s,t0,tf,h=10,tol=1e-6):\n\n t = t0\n while(tf-t > 0.00001):\n if (tf-t < h):\n h = tf-t\n\n k1 = h*sdot(s)\n k2 = h*sdot(s+k1/4)\n k3 = h*sdot(s+3/32*k1+9/32*k2)\n k4 = h*sdot(s+1932/2197*k1-7200/2197*k2+7296/2197*k3)\n k5 = h*sdot(s+439/216*k1-8*k2+3680/513*k3-845/4104*k4)\n k6 = h*sdot(s-8/27*k1+2*k2-3544/2565*k3+1859/4104*k4-11/40*k5)\n\n y = s+25/216*k1+1408/2565*k3+2197/4104*k4-k5/5\n z = s+16/135*k1+6656/12825*k3+28561/56430*k4-9/50*k5+2/55*k6\n\n s = y\n t = t+h\n\n err = np.linalg.norm(y-z)\n h = h*0.84*(tol/err)**0.25\n\n return s", "def runge_kutta(func, x0, time):\n dt = time[1] - time[0]\n x = np.array(x0)\n val = []\n\n for t in time:\n val.append(x)\n\n k1 = np.array([f(t, x) for f in func])\n k2 = np.array([f(t+dt/2, x+dt*k1/2) for f in func])\n k3 = np.array([f(t+dt/2, x+dt*k2/2) for f in func])\n k4 = np.array([f(t+dt, x+dt*k3) for f in func])\n\n x = x + dt*(k1 + 2*k2 + 2*k3 + k4)/6\n\n return val", "def MyRK3_step(f, t, qn, dt, R, e, w):\r\n assert((not np.any(np.isnan(t))) and np.all(np.isfinite(t)) and\r\n np.all(np.isreal(t))),\\\r\n \"t must be real, finite and not NaN\"\r\n assert((not np.any(np.isnan(dt))) and np.all(np.isfinite(dt)) and\r\n np.all(np.isreal(dt))),\\\r\n \"dt must be real, finite and not NaN\"\r\n assert(len(qn) == 2), \"qn must have length 2\"\r\n assert(hasattr(f, '__call__')),\\\r\n \"f must be a callable function\"\r\n assert((not np.any(np.isnan(R))) and np.all(np.isfinite(R)) and\r\n np.all(np.isreal(R))),\\\r\n \"r must be real, finite and not NaN\"\r\n assert((not np.any(np.isnan(e))) and np.all(np.isfinite(e)) and\r\n np.all(np.isreal(e))),\\\r\n \"e must be real, finite and not NaN\"\r\n assert((not np.any(np.isnan(w))) and np.all(np.isfinite(w)) and\r\n np.all(np.isreal(w))),\\\r\n \"w must be real, finite and not NaN\"\r\n k1 = f(t, qn, R, e, w)\r\n k2 = f(t + dt/2.0, qn + dt*(k1/2.0), R, e, w)\r\n k3 = f(t + dt, qn + dt*(-k1 + 2*k2), R, e, w)\r\n qnp1R = qn + (dt*(k1+4*k2+k3))/6.0\r\n return qnp1R", "def rk4_general(fn, time_step, t, y, *args):\n k1 = time_step*fn(t, y, *args)\n k2 = time_step*fn(t + time_step/2, y + k1/2, *args)\n k3 = time_step*fn(t + time_step/2, y + k2/2, *args)\n k4 = time_step*fn(t + time_step, y + k3, *args)\n return y + (1/6)*(k1 + 2*k2 + 2*k3 + k4)", "def runge_kutta(self, y_n, t_n, delta):\n k1 = delta*self.y_prime(t_n, y_n)\n k2 = delta*self.y_prime(t_n+delta/2, y_n+k1/2)\n k3 = delta*self.y_prime(t_n+delta/2, y_n+k2/2)\n k4 = delta*self.y_prime(t_n+delta, y_n+k3)\n return y_n + (k1 + 2*(k2+k3) + k4)/6 #, t_n+delta", "def RK4_step(x, dt, flow): #copied from cpt\r\n n = len(x)\r\n k1 = [ dt * k for k in flow(x) ]\r\n x_temp = [ x[i] + k1[i] / 2.0 for i in range(n) ]\r\n k2 = [ dt * k for k in flow(x_temp) ]\r\n x_temp = [ x[i] + k2[i] / 2.0 for i in range(n) ]\r\n k3 = [ dt * k for k in flow(x_temp) ]\r\n x_temp = [ x[i] + k3[i] for i in range(n) ]\r\n k4 = [ dt * k for k in flow(x_temp) ]\r\n for i in range(n):\r\n x[i] += (k1[i] + 2.0 * k2[i] + 2.0 * k3[i] + k4[i]) / 6.0", "def cal_f_RK(yt, dyt, f, df, int_INV_D_pre, vw_div_vw0, fcn_D, cond_GT):\n phi_b = cond_GT['phi_bulk']\n ed = cond_GT['epsilon_d']\n\n y_new = yt + dyt\n f_new = f + df\n int_INV_D = int_INV_D_pre\n if df != 0.: # it is related with half-step for RK4 method\n int_INV_D += (dyt/2.)*(1./fcn_D(f, cond_GT) + 1./fcn_D(f_new, cond_GT))\n return (-1./ed)*(vw_div_vw0/fcn_D(f_new, cond_GT))*(f_new - phi_b*(1. - exp(-(vw_div_vw0/ed)*int_INV_D)))", "def RK4(f, u0, t0, t_max, dt, args=()):\n u = np.array(u0)\n t = np.array(t0)\n u_all = [u0]\n t_all = [t0]\n while t+dt < t_max:\n k1 = dt*f(t, u, *args)\n k2 = dt*f(t + 0.5*dt, u + 0.5*k1, *args)\n k3 = dt*f(t + 0.5*dt, u + 0.5*k2, *args)\n k4 = dt*f(t + dt, u + k3, *args)\n u = u + (1/6)*(k1 + 2*k2 + 2*k3 + k4)\n u_all.append(u)\n t = t + dt\n t_all.append(t)\n if u[3] <= 0:\n break # terminate at ground\n return np.array(u_all), np.array(t_all)", "def rk4_sde(self, x, rv_n):\n a21 = 2.71644396264860\n a31 = - 6.95653259006152\n a32 = 0.78313689457981\n a41 = 0.0\n a42 = 0.48257353309214\n a43 = 0.26171080165848\n a51 = 0.47012396888046\n a52 = 0.36597075368373\n a53 = 0.08906615686702\n a54 = 0.07483912056879\n\n q1 = 2.12709852335625\n q2 = 2.73245878238737\n q3 = 11.22760917474960\n q4 = 13.36199560336697\n\n n = self.mp.params[0]; k = self.mp.params[1];\n gamma = self.mp.params[2]; dt = self.mp.params[3];\n\n if x.get_shape()[1] > 1:\n evolve_fun = self.evolve_system\n else:\n evolve_fun = self.evolve\n\n x1 = x\n k1 = dt * evolve_fun(x1, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x2 = x1 + a21 * k1\n k2 = dt * evolve_fun(x2, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x3 = x1 + a31 * k1 + a32 * k2\n k3 = dt * evolve_fun(x3, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x4 = x1 + a41 * k1 + a42 * k2\n k4 = dt * evolve_fun(x4, n, k, gamma) + tf.sqrt(dt) * x * rv_n\n\n x_new = x1 + a51 * k1 + a52 * k2 + a53 * k3 + a54 * k4\n\n return tf.cast(x_new, tf.float32)", "def RK45_method(RHS, theta_0, omega_0, t_1, dt):\n initialValues = [theta_0,omega_0]\n\n timeSpan = [0,t_1 + dt]\n\n timeArr = np.arange(0,t_1+dt,dt)\n \n solution = DestoryerOfOrdinaryDifferentialEquations(RHS,timeSpan,initialValues,method=\"RK45\",t_eval=timeArr)\n \n theta = solution.y[0, : ]\n omega = solution.y[1, : ]\n times = solution.t\n \n \n return theta, omega, times", "def rk4(self, t, h,G) :\r\n k1 = h*self.calc_diff_eqn(t, self.quant_vec,G,self.mass_vec)\r\n k2 = h*self.calc_diff_eqn(t + 0.5*h , self.quant_vec + 0.5*k1 ,G, self.mass_vec)\r\n k3 = h*self.calc_diff_eqn(t + 0.5*h , self.quant_vec + 0.5*k2 ,G, self.mass_vec)\r\n k4 = h*self.calc_diff_eqn(t + h , self.quant_vec + k3 ,G, self.mass_vec)\r\n y_new = self.quant_vec + ((k1 + 2*k2 + 2*k3 + k4)/6)\r\n return y_new", "def _Delta_rk4(dt_func, dt_func_data, integrator_time, dop, H, Lk, tstep):\n k1 = dt_func(dop, H, Lk, dt_func_data, integrator_time)\n k2 = dt_func(dop + 0.5*tstep*k1, H, Lk, dt_func_data, integrator_time)\n k3 = dt_func(dop + 0.5*tstep*k2, H, Lk, dt_func_data, integrator_time)\n k4 = dt_func(dop + tstep*k3, H, Lk, dt_func_data, integrator_time)\n return tstep*(k1/6. + k2/3. + k3/3. + k4/6.)" ]
[ "0.7320876", "0.70679027", "0.6769503", "0.6722183", "0.67205286", "0.6682625", "0.6498219", "0.64427054", "0.63156617", "0.6256246", "0.6184781", "0.6130225", "0.61295813", "0.6067873", "0.60653573", "0.60502803", "0.59177953", "0.59087306", "0.59054327", "0.588712", "0.587222", "0.5866698", "0.58452797", "0.5837622", "0.5740804", "0.5713184", "0.5710483", "0.56799924", "0.565063", "0.5626387" ]
0.7259235
1
Pixel L1 loss within the hole / mask
def loss_hole(self, mask, y_true, y_pred): return self.l1((1-mask) * y_true, (1-mask) * y_pred)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def lapsharp(image, maskret = False):\n #padded_image = np.pad(img, (1, 1), mode = 'symmetric')\n # lap is linear therefore;\n # lap f(x,y) = f(x + 1, y) + f(x - 1, y) + f(x, y + 1) + f(x, y - 1) - 4f(x,y)...\n #--------------------\n c = -1 # Depends on kernel\n # make zero kernal\n lapmask = np.zeros((3, 3))\n \n # add values to kernel\n lapmask[0,0] = 1\n lapmask[0,1] = 1\n lapmask[0,2] = 1\n\n lapmask[1,0] = 1\n lapmask[1,1] = -8\n lapmask[1,2] = 1\n\n lapmask[2,0] = 1\n lapmask[2,1] = 1\n lapmask[2,2] = 1\n #--------------------\n mask = convolve2d(image, lapmask, mode = 'same')\n result = image + c*mask\n\n # Map values to 0-255\n g1 = image - np.min(image)\n g = g1/np.max(g1) *255\n g = g.astype('uint8')\n\n if maskret == True:\n return g, mask\n else:\n return g.astype('uint8')", "def l1_loss(y_true, y_pred, y_mask):\n y_shape = tf.shape(y_true)\n border = 3\n max_pixels_shifts = 2*border\n size_image = HR_SIZE\n size_croped_image = size_image - max_pixels_shifts\n clear_pixels = size_croped_image*size_croped_image\n cropped_predictions = y_pred[:, border:size_image -\n border, border:size_image-border]\n\n X = []\n for i in range(max_pixels_shifts+1): # range(7)\n for j in range(max_pixels_shifts+1): # range(7)\n cropped_labels = y_true[:, i:i+(size_image-max_pixels_shifts),\n j:j+(size_image-max_pixels_shifts)]\n cropped_y_mask = y_mask[:, i:i+(size_image-max_pixels_shifts),\n j:j+(size_image-max_pixels_shifts)]\n\n cropped_y_mask = tf.cast(cropped_y_mask, tf.float32)\n\n cropped_predictions_masked = tf.cast(\n cropped_predictions, tf.float32)*cropped_y_mask\n cropped_labels_masked = cropped_labels*cropped_y_mask\n\n total_pixels_masked = tf.reduce_sum(cropped_y_mask, axis=[1, 2])\n\n # bias brightness\n b = (1.0/total_pixels_masked)*tf.reduce_sum(\n tf.subtract(cropped_labels_masked, cropped_predictions_masked),\n axis=[1, 2])\n\n b = tf.reshape(b, [y_shape[0], 1, 1, 1])\n\n corrected_cropped_predictions = cropped_predictions_masked+b\n corrected_cropped_predictions = corrected_cropped_predictions*cropped_y_mask\n\n l1_loss = (1.0/total_pixels_masked)*tf.reduce_sum(\n tf.abs(\n tf.subtract(cropped_labels_masked,\n corrected_cropped_predictions)\n ), axis=[1, 2]\n )\n X.append(l1_loss)\n X = tf.stack(X)\n min_l1 = tf.reduce_min(X, axis=0)\n\n return min_l1", "def masked_l1_loss(prediction, target, mask):\n abs_error = F.l1_loss(prediction, target, reduction='none')\n loss = weighted_mean(abs_error, mask)\n return loss", "def L1(im, ref, reduce=True):\n return torch.mean(torch.abs(im - ref))", "def sharp_ground(X):\n return img_conv(X, kernel_sharp)", "def __hsl_threshold(input, hue, sat, lum):\r\n out = cv2.cvtColor(input, cv2.COLOR_BGR2HLS)\r\n return cv2.inRange(out, (hue[0], lum[0], sat[0]), (hue[1], lum[1], sat[1]))", "def __hsl_threshold(input, hue, sat, lum):\n out = cv2.cvtColor(input, cv2.COLOR_BGR2HLS)\n return cv2.inRange(out, (hue[0], lum[0], sat[0]), (hue[1], lum[1], sat[1]))", "def luminance(self):\n \n return (self.r + self.g + self.b) // 3", "def ledaps(image):\n cmask = image.select('QA')\n\n valid_data_mask = tools.compute_bits(cmask, 1, 1, 'valid_data')\n cloud_mask = tools.compute_bits(cmask, 2, 2, 'cloud')\n snow_mask = tools.compute_bits(cmask, 4, 4, 'snow')\n\n good_pix = cloud_mask.eq(0).And(valid_data_mask.eq(0)).And(snow_mask.eq(0))\n result = image.updateMask(good_pix)\n\n return result", "def split_necessity(self):\n return max(self._color_var_rel) * self.n_pix\n # return reduce(int.__mul__, (l-u for u,l in self.bounds)) * self.n_pix", "def get_sharpness(img):\n return cv2.Laplacian(img, cv2.CV_64F).var()", "def Mask(self) -> int:", "def sanitize_mask(orig_x, orig_y, mask):\n contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)\n\n # Draw contours:\n cv2.drawContours(mask, contours, 0, (0, 255, 0), 2)\n # Calculate image moments of the detected contour\n num_objects = (len(contours))\n #threshold\n threshold = 3\n\n center_list = []\n # print(num_objects)\n if num_objects > 1:\n for item in range(num_objects):\n M = cv2.moments(contours[item])\n try:\n center_x = round(M['m10'] / M['m00'])\n center_y = round(M['m01'] / M['m00'])\n center_list.append([center_y , center_x ])\n except:\n pass\n\n # initialize retmask\n retmask = mask\n if num_objects > 1:\n for x, y in center_list:\n if orig_x - threshold <= x <= orig_x + threshold and orig_y - threshold <= y <= orig_y + threshold:\n pass\n else:\n def dfs_removal(px , py, mask):\n R = len(mask)\n C = len(mask[0])\n if mask[px][py ] != 255: \n return\n mask[px][py] = 0\n if 0 <= px - 1 and mask[px - 1][py ] == 255: dfs_removal(px - 1 , py , mask)\n if px + 1 < R and mask[px + 1][py ] == 255: dfs_removal(px + 1 , py , mask)\n if 0 <= py - 1 and mask[px][py - 1] == 255: dfs_removal(px, py -1 , mask)\n if py + 1 < C and mask[px][py + 1] == 255: dfs_removal(px, py + 1 , mask)\n\n dfs_removal(x,y, mask)\n\n return retmask", "def binarize(img, s_thres=(170, 255), l_thres=(50, 255), sobel_thres=(30, 80)):\n hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)\n\n clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))\n hls[:, :, 1] = clahe.apply(hls[:, :, 1])\n\n l_image = hls[:, :, 1]\n l_blur = cv2.GaussianBlur(l_image, (0, 0), 9)\n l_image = cv2.addWeighted(l_image, 1, l_blur, -1, 0)\n l_image = cv2.normalize(l_image, np.zeros_like(l_image), 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)\n l_binary = np.zeros_like(l_image)\n l_binary[(l_image >= l_thres[0]) & (l_image <= l_thres[1])] = 1\n\n # Sobel x\n # gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n # gray = hls[:, :, 1]\n # sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0) # Take the derivative in x\n # abs_sobelx = np.absolute(sobelx) # Absolute x derivative to accentuate lines away from horizontal\n # scaled_sobel = np.uint8(255 * abs_sobelx / np.max(abs_sobelx))\n # sxbinary = np.zeros_like(scaled_sobel)\n # sxbinary[(scaled_sobel >= sobel_thres[0]) & (scaled_sobel <= sobel_thres[1])] = 1\n # sxbinary = s_binary\n\n s_channel = hls[:, :, 2]\n s_channel = cv2.normalize(s_channel, np.zeros_like(s_channel), 0, 255, cv2.NORM_MINMAX, cv2.CV_8UC1)\n s_binary = np.zeros_like(s_channel)\n s_binary[(s_channel >= s_thres[0]) & (s_channel <= s_thres[1])] = 1\n\n # Combine the two binary thresholds\n combined_binary = np.zeros_like(s_binary)\n combined_binary[(s_binary == 1) | (l_binary == 1)] = 1\n\n # we filter out the lines with too many active pixels\n combined_binary_rows = combined_binary.sum(1)\n combined_binary[combined_binary_rows > (combined_binary.shape[1] / 2)] = 0\n\n return combined_binary", "def mask(self):", "def sharp_laplace1(img):\n\n # Shapening the image with laplacian involves adding the image concolved\n # with the laplacian back to the original image. Since laplace operator\n # can generate negative values we need to use a int type image\n img = np.asarray(img, dtype=np.int)\n\n # Perform the operation\n sharp = img - ndi.laplace(img)\n\n # Clip, cast and return the result\n return np.asarray(np.clip(sharp, 0, 255), dtype=np.uint8)", "def masked_l2(preds, actuals, mask):\n loss = tf.nn.l2(preds, actuals)\n mask = tf.cast(mask, dtype=tf.float32)\n mask /= tf.reduce_mean(mask)\n loss *= mask\n return tf.reduce_mean(loss)", "def calc_one_img_loss(self, y_pred, y_true):\n if len(y_true) == 0:\n return 0.0\n\n pred_box = y_pred[:4]\n pred_label = y_pred[4:]\n\n # Pos\n pos_box_mask = self.get_pos_mask(y_true)\n pos_mask = pos_box_mask[:, 0]\n true_label = pos_box_mask[:, 1]\n\n # loc loss\n hat_box = torch.Tensor(self.get_hat(pos_box_mask)).to(pred_box.device)\n pred_pos_box = pred_box[:, pos_mask]\n loc_loss = self.smooth_l1(pred_pos_box, hat_box).mean()\n\n # conf loss\n conf_pos = -self.log_softmax(pred_label[:, pos_mask])\n conf_pos = conf_pos[true_label, range(len(true_label))]\n\n neg_mask = np.array(np.ones(pred_box.shape[-1], dtype=np.bool))\n neg_mask[np.array(pos_mask, dtype=np.int32)] = False\n conf_neg = -self.log_softmax(pred_label[:, neg_mask])\n conf_neg, _ = torch.topk(conf_neg[0], len(pos_mask) * self.scale_neg)\n\n conf_loss = conf_pos.mean() + conf_neg.mean()\n return loc_loss + conf_loss", "def detection(test_img):\n # TODO: Step 2 : Your Detection code should go here.\n no_row = len(test_img)\n no_col = len(test_img[0])\n thresh = 100\n label_img = np.zeros((no_row, no_col))\n uf = []\n \n #first pass\n l = 1\n for i in range(0, no_row):\n for j in range(0, no_col):\n if test_img[i,j] < 255 - thresh:\n if i == 0 and j == 0:\n label_img[i,j] = l\n l = l+1\n elif i == 0 and j != 0:\n if label_img[i,j-1] == 0:\n label_img[i,j] = l\n l = l+1\n else:\n label_img[i,j] = label_img[i,j-1]\n elif i != 0 and j == 0:\n if label_img[i-1,j] == 0:\n label_img[i,j] = l\n l = l+1\n else:\n label_img[i,j] = label_img[i-1,j]\n else:\n if label_img[i-1,j] == 0 and label_img[i,j-1] == 0:\n label_img[i,j] = l\n l = l+1\n elif label_img[i-1,j] == 0 and label_img[i,j-1] != 0:\n label_img[i,j] = label_img[i,j-1]\n elif label_img[i-1,j] != 0 and label_img[i,j-1] == 0:\n label_img[i,j] = label_img[i-1,j]\n else:\n if label_img[i,j-1] == label_img[i-1,j]:\n label_img[i,j] = label_img[i,j-1]\n else:\n label_img[i,j] = min(label_img[i-1,j],label_img[i,j-1])\n uf.append([min(label_img[i,j-1],label_img[i-1,j]),max(label_img[i,j-1],label_img[i-1,j])])\n l = l - 1\n \n #2nd pass\n def ufds(x,l):\n b = []\n for i in range(1,l+1):\n b.append([i])\n for j in x:\n i1 = 0\n i2 = 0\n for k in range(0,len(b)):\n if j[0] in b[k]:\n i1 = k\n if j[1] in b[k]:\n i2 = k\n if i1 != i2:\n b[i1] = b[i1] + b[i2]\n del b[i2]\n return b\n \n bman = ufds(uf,l)\n \n #3rd pass\n uf_arr = np.zeros(l)\n for i in range(0,len(uf_arr)):\n for j in bman:\n if i+1 in j:\n uf_arr[i] = min(j) \n \n fin_img = np.zeros((no_row, no_col))\n for i in range(0, no_row):\n for j in range(0, no_col):\n if label_img[i,j] != 0:\n fin_img[i,j] = uf_arr[int(label_img[i,j] - 1)]\n \n all_label = []\n '''\n for i in bman:\n all_label.append(min(i))\n ''' \n for i in range(0, no_row):\n for j in range(0, no_col):\n if fin_img[i,j] !=0 and fin_img[i,j] not in all_label:\n all_label.append(fin_img[i,j])\n \n # main image\n k_img = np.zeros((no_row, no_col))\n for i in range(0, no_row):\n for j in range(0, no_col):\n if fin_img[i,j] != 0:\n k_img[i,j] = (all_label.index(fin_img[i,j]) + 1)\n \n k_list = []\n for i in range(1,len(all_label)+1):\n x = None\n y = None\n hx = None\n hy = None\n for j in range(0,no_row):\n for k in range(0,no_col):\n if k_img[j,k] == i:\n if y == None and x == None:\n y = j\n hy = j\n x = k\n hx = k\n else:\n if y > j:\n y = j\n if hy < j:\n hy = j\n if x > k:\n x = k\n if hx < k:\n hx = k\n k_list.append({\"bbox\": [x, y, hx-x, hy-y]})\n \n return k_list\n \n #raise NotImplementedError", "def determine_goodpixels(logLam, lamRangeTemp, z, width=800):\n# -----[OII]----- Hdelta Hgamma Hbeta -----[OIII]----- [OI] -----[NII]----- Halpha -----[SII]-----\n lines = np.array([3726.03, 3728.82, 4101.76, 4340.47, 4861.33, 4958.92, 5006.84, 6300.30, 6548.03, 6583.41, 6562.80, 6716.47, 6730.85])\n dv = np.full_like(lines, width) # width/2 of masked gas emission region in km/s\n c = 299792.458 # speed of light in km/s\n\n flag = False\n for line, dvj in zip(lines, dv):\n flag |= (np.exp(logLam) > line*(1 + z)*(1 - dvj/c)) \\\n & (np.exp(logLam) < line*(1 + z)*(1 + dvj/c))\n\n flag |= np.exp(logLam) > lamRangeTemp[1]*(1 + z)*(1 - 900/c) # Mask edges of\n flag |= np.exp(logLam) < lamRangeTemp[0]*(1 + z)*(1 + 900/c) # stellar library\n\n return np.flatnonzero(~flag)", "def mask_image(image):\n pass", "def preprocess(image):\n return image - MEAN_PIXEL", "def check_for_white(img):\n return white_percentage(img, 220, 0.8)", "def check_for_white(img):\n return white_percentage(img, 220, 0.8)", "def preprocess(image):\n return (image / 255) * 2 - 1", "def paint_shadow(img, sheared_img, l):\n img_h = img.shape[0] # source image data height\n img_w = img.shape[1] # source image data width\n\n # white data\n new_img = np.full((img_h, img_w, 3), 255, dtype='uint8')\n\n for i in range(img_h):\n row_offset = int((img_h - i)*l) if l>=0 else int(i*l)\n for j in range(img_w):\n # if in source img data is not white paint source image\n if(not np.all(img[i, j] >= (240, 240, 240))):\n new_img[i, j] = img[i, j]\n # else if in sheared img we have non white\n elif(j > row_offset and not np.all(sheared_img[i, j] >= (230, 230, 230))):\n new_img[i, j] = (100, 100, 100)\n else:\n new_img[i, j] = img[i, j]\n return new_img", "def find_tfl_lights(image: np.ndarray):\n kernel = np.array(\n [[0, 0, 0],\n [0, 0, 0],\n [0, 1, 0],\n [1, 3, 1],\n [0, 1, 0]])\n\n kernel = kernel - kernel.mean()\n\n red_image = image.copy()\n red_image = red_image[:, :, 0]\n _, red_image = cv2.threshold(red_image, 200, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(red_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n red_points = np.where(mask)\n positions = []\n final_red_points = []\n for point1 in range(len(red_points[0])):\n point = (red_points[0][point1], red_points[1][point1])\n pixel = image[point[0], point[1]]\n if (pixel[1] < 170 or pixel[2] < 120) and pixel[0] >= 200:\n final_red_points.append(point)\n final_red_points = filter_points(final_red_points)\n positions += final_red_points\n auxilary = ['r'] * len(positions)\n red_x = [val[1] for val in final_red_points]\n red_y = [val[0] for val in final_red_points]\n green_image = image.copy()\n green_image = green_image[:, :, 1]\n _, green_image = cv2.threshold(green_image, 190, 255, cv2.THRESH_BINARY)\n output = cv2.filter2D(green_image, -1, kernel)\n output_copy = output.copy()\n output = ndimage.maximum_filter(output, size=30)\n output = output - output_copy\n mask = ((output == 0) & (output_copy > 0))\n green_points = np.where(mask)\n final_green_points = []\n for point1 in range(len(green_points[0])):\n point = (green_points[0][point1], green_points[1][point1])\n pixel = image[point[0], point[1]]\n if pixel[0] <= 180 and pixel[1] >= 220 and pixel[2] >= 160:\n final_green_points.append(point)\n\n final_green_points = filter_points(final_green_points)\n positions += final_green_points\n auxilary += ['g'] * len(final_green_points)\n green_x = [val[1] for val in final_green_points]\n green_y = [val[0] for val in final_green_points]\n print(f\"There are {len(green_x) + len(red_x)} points\")\n return positions, auxilary", "def dither_img(img,num_pxl,bw_threshold=128):\n\n img_copy = np.copy(img)\n img_copy[img_copy >= bw_threshold] = 255\n img_copy[img_copy < bw_threshold] = 0\n\n h = img_copy.shape[0]\n w = img_copy.shape[1]\n\n coordinates_0 = np.where(img_copy == 0)\n coordinates_0 = tuple(zip(coordinates_0[0],coordinates_0[1]))\n\n coordinates_255 = np.where(img_copy == 255)\n coordinates_255 = tuple(zip(coordinates_255[0],coordinates_255[1]))\n\n if num_pxl == 0:\n return img_copy\n\n selected_coordinates_0 = random.sample(coordinates_0,min(num_pxl,min(len(coordinates_0),len(coordinates_255))))\n selected_coordinates_255 = random.sample(coordinates_255,min(num_pxl,min(len(coordinates_0),len(coordinates_255))))\n\n selected_coordinates_0 = tuple(zip(*selected_coordinates_0))\n selected_coordinates_255 = tuple(zip(*selected_coordinates_255))\n\n img_copy[selected_coordinates_0[0],selected_coordinates_0[1]] = 255\n img_copy[selected_coordinates_255[0],selected_coordinates_255[1]] = 0\n\n return img_copy", "def process(img):\n img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) \n x_t = cv2.resize(img, (112, 160), interpolation=cv2.INTER_AREA)\n x_t = np.nan_to_num(x_t)\n x_t = cv2.Laplacian(x_t,cv2.CV_8U)\n\n return x_t.astype(np.uint8)", "def make_lungmask(img, display=False):\n row_size= img.shape[0]\n col_size = img.shape[1]\n \n mean = np.mean(img)\n std = np.std(img)\n img = img-mean\n img = img/std\n\n # uses hounsfield values near lungs to normalize images\n\n middle = img[int(col_size/5):int(col_size/5*4),int(row_size/5):int(row_size/5*4)] \n mean = np.mean(middle) \n max = np.max(img)\n min = np.min(img)\n img[img==max]=mean\n img[img==min]=mean\n \n # uses kmeans to separate foreground (soft tissue / bone) and background (lung/air)\n\n kmeans = KMeans(n_clusters=2).fit(np.reshape(middle,[np.prod(middle.shape),1]))\n centers = sorted(kmeans.cluster_centers_.flatten())\n threshold = np.mean(centers)\n thresh_img = np.where(img<threshold,1.0,0.0)\n\n # performs erosion and dilation\n\n eroded = morphology.erosion(thresh_img,np.ones([3,3]))\n dilation = morphology.dilation(eroded,np.ones([8,8]))\n\n labels = measure.label(dilation) # Different labels are displayed in different colors\n label_vals = np.unique(labels)\n regions = measure.regionprops(labels)\n good_labels = []\n for prop in regions:\n B = prop.bbox\n if B[2]-B[0]<row_size/10*9 and B[3]-B[1]<col_size/10*9 and B[0]>row_size/5 and B[2]<col_size/5*4:\n good_labels.append(prop.label)\n mask = np.ndarray([row_size,col_size],dtype=np.int8)\n mask[:] = 0\n\n # makes mask\n\n for N in good_labels:\n mask = mask + np.where(labels==N,1,0)\n mask = morphology.dilation(mask,np.ones([10,10])) # one last dilation\n final = mask * img\n \n # shows and saves output\n\n plt.imshow(final)\n im = Image.fromarray(final*128)\n im = im.convert(\"L\")\n im.save(S)\n \n return" ]
[ "0.65443623", "0.6356551", "0.6331319", "0.60069346", "0.59986883", "0.5970008", "0.5963153", "0.59551394", "0.58635867", "0.5861597", "0.58614784", "0.58338666", "0.5818547", "0.5816668", "0.58154553", "0.5807206", "0.57979506", "0.5765049", "0.57532686", "0.57518965", "0.5751064", "0.5741342", "0.57369417", "0.57369417", "0.5733889", "0.5727057", "0.57186353", "0.56798834", "0.5652946", "0.56382775" ]
0.68490756
0
Perceptual loss based on VGG16, see. eq. 3 in paper
def loss_perceptual(self, vgg_out, vgg_gt, vgg_comp): loss = 0 for o, c, g in zip(vgg_out, vgg_comp, vgg_gt): loss += self.l1(o, g) + self.l1(c, g) return loss
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def vae_loss(x, t_decoded):\r\n return K.mean(reconstruction_loss(x, t_decoded))", "def transformed_outcome_loss(tau_pred, y_true, g, prob_treatment):\n # Transformed outcome\n y_trans = (g - prob_treatment) * y_true / (prob_treatment * (1-prob_treatment))\n loss = np.mean(((y_trans - tau_pred)**2))\n return loss", "def loss_total(self, mask):\n\n def loss(y_true, y_pred):\n\n # Compute predicted image with non-hole pixels set to ground truth\n y_comp = mask * y_true + (1-mask) * y_pred\n\n # Compute the vgg features. \n if self.vgg_device:\n with tf.device(self.vgg_device):\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n else:\n vgg_out = self.vgg(y_pred)\n vgg_gt = self.vgg(y_true)\n vgg_comp = self.vgg(y_comp)\n \n # Compute loss components\n l1 = self.loss_valid(mask, y_true, y_pred)\n l2 = self.loss_hole(mask, y_true, y_pred)\n l3 = self.loss_perceptual(vgg_out, vgg_gt, vgg_comp)\n l4 = self.loss_tv(mask, y_comp)\n l5 = - 0.5 * K.sum(1 + self.z_log_var -self.cl - K.square(self.z_mean)/K.exp(self.cl) - K.exp(self.z_log_var)/K.exp(self.cl))\n # Return loss function\n return l1 + 6*l2 + 0.05*l3 + 0.1*l4 +l5 \n return loss", "def loss_vgg(style_images, content_image, output_images, vggfile):\n c_layers = C_WEIGHTS.keys()\n s_layers = S_WEIGHTS.keys()\n vgg16_filters = load_vgg(vggfile)\n vgg16 = nn_build.Network(\n VGG16DESC, 'vgg16', initial=vgg16_filters, process=True)\n c_net = vgg16.layers(content_image, c_layers)\n\n c_loss = 0.\n s_loss = 0.\n tv_loss = 0.\n for style in style_images:\n s_net = vgg16.layers(style_images[style], s_layers)\n o_net = vgg16.layers(output_images[style], set(c_layers+s_layers))\n for layer in c_layers:\n _, h, w, c = c_net[layer].get_shape().as_list()\n c_loss += C_WEIGHTS[layer]*tf.nn.l2_loss(\n o_net[layer]-c_net[layer])/(h*w*c)\n for layer in s_layers:\n bs, _, _, c = o_net[layer].get_shape().as_list()\n s_loss += S_WEIGHTS[layer]*tf.nn.l2_loss(\n Gram(o_net[layer], bs) - Gram(s_net[layer], bs))\n tv_loss += TV_WEIGHTS*(\n tf.nn.l2_loss(output_images[style][:,1:,:,:]\n - output_images[style][:,:-1,:,:])\n + tf.nn.l2_loss(output_images[style][:,:,1:,:]\n - output_images[style][:,:,:-1,:]))\n style_num = len(style_images)\n return c_loss/style_num, s_loss/style_num, tv_loss/style_num", "def compute_loss(self):", "def tv_loss(img, tv_weight):\n # Your implementation should be vectorized and not require any loops!\n # *****START OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****\n\n pass\n\n # *****END OF YOUR CODE (DO NOT DELETE/MODIFY THIS LINE)*****", "def tv_loss(x, name='tv_loss'):\n raise NotImplementedError(\"Please use tensorflow total_variation loss.\")", "def mse_loss1_rgb(y_true,y_pred):\n y_true = tensor_ycbcr2rgb(y_true)/255.\n return ((tf.keras.losses.MSE(tf.expand_dims(y_true, axis=0),tf.expand_dims(y_pred, axis=0))))", "def mse_loss1_rgb_col(y_true,y_pred):\n y_pred = tf_rgb2ycbcr(y_pred)/255.\n \n y_c_pred,cb_c_pred,cr_c_pred=tf.split(y_pred, 3 , axis=-1)\n y_c_true,cb_c_true,cr_c_true=tf.split(y_true, 3 , axis=-1) \n \n return ((tf.keras.losses.MSE(tf.expand_dims(cb_c_pred, axis=0),tf.expand_dims(cb_c_true, axis=0))) + (tf.keras.losses.MSE(tf.expand_dims(cr_c_pred, axis=0),tf.expand_dims(cr_c_true, axis=0))))", "def loss_fn(y_true,y_pred): \n loss = tf.nn.softmax_cross_entropy_with_logits_v2(y_true,\n y_pred,\n axis=-1,\n )\n loss = tf.reduce_mean(loss,name=\"loss\")\n return loss", "def ovo_crossentropy_loss(y_true, y_pred):\n # Bei OvO wird als Aktivierungsfunktion 'tanh' verwendet. Diese produziert Werte aus (-1, 1)\n # Auf Wertebereich [0,1] hochskalieren (eigentlich möchte man (0,1) erreichen um später im Logarithmus\n # keine undefinierten Werte zu erhalten, aber wegen numerischen Problemen sind auch 0 und 1 denkbare Werte)\n y_true_scaled = (y_true + 1.0) / 2.0\n y_pred_scaled = (y_pred + 1.0) / 2.0\n\n # Wertebereich von y_pred_scaled von [0,1] auf [0.00001, 0.99999] einschränken wegen Logarithmen. Näherung an (0,1)\n\n zeroes = tf.zeros_like(y_pred_scaled) # Tensor mit gleicher Dimension wie 'y_pred_scaled' bestehend aus nur 0en\n # Alle kleineren Werte als 0.00001 in 'y_pred_scaled' auf 0.00001 setzen (untere Schranke für Wertebereich)\n y_pred_scaled = tf.where(y_pred_scaled < 0.00001, zeroes + 0.00001, y_pred_scaled)\n # Alle größeren Werte als 0.99999 in 'y_pred_scaled' auf 0.99999 setzen (obere Schranke für Wertebereich)\n y_pred_scaled = tf.where(y_pred_scaled > 0.99999, zeroes + 0.99999, y_pred_scaled)\n\n # J_{OvO} aus Pawara et al. anwenden\n log_function = tf.log if tf.__version__ == \"1.13.1\" else tf.math.log # flexibel für neue / alte Version\n loss = - tf.reduce_mean(\n y_true_scaled * log_function(y_pred_scaled) + (1 - y_true_scaled) * log_function(1 - y_pred_scaled))\n return loss", "def hpm_loss(self, x, y, t, Ex_u, Ex_v):\n\n x = x.view(-1)\n y = y.view(-1)\n t = t.view(-1)\n\n u, v, f_u, f_v = self.net_pde(x, y, t)\n\n Ex_u = Ex_u.view(-1)\n Ex_v = Ex_v.view(-1)\n\n hpmLoss = torch.mean(f_u ** 2) + torch.mean(f_v ** 2) + torch.mean((u - Ex_u) ** 2) + torch.mean((v - Ex_v) ** 2) \n return hpmLoss", "def mse_loss1_rgb_soloY(y_true,y_pred):\n y_pred = tf_rgb2ycbcr(y_pred)/255.\n y_c_pred,cb_c_pred,cr_c_pred=tf.split(y_pred, 3 , axis=-1)\n y_c_true,cb_c_true,cr_c_true=tf.split(y_true, 3 , axis=-1) \n \n return (tf.keras.losses.MSE(tf.expand_dims(y_c_pred, axis=0),tf.expand_dims(y_c_true, axis=0)) )", "def genLoss(self, *data):\r\n _, (x_unlab, _) = data\r\n z = self.getInputNoise(self.hypers['ul_BS'])\r\n fake_logits = self.D(self.G(z))\r\n g_losses = -1*logOneMinusSoftmax(fake_logits)[:,self.D.numClasses-1]\r\n return torch.mean(g_losses)", "def loss(loss_name):\n \n def contrastive_loss(y_true, y_pred, margin = 1):\n \"\"\"Implementation of the triplet loss function\n\n\n Contrastive loss = 0.5 * mean( (1-true_value) * square(distance) + true_value * square( max(margin-distance, 0) ))\n\n Args:\n y_true (int): true label, positive pair (same class) -> 0, \n negative pair (different class) -> 1\n \n y_pred (list): python list containing two objects in a pair of tensors:\n left : the encodings for one image data in a pair\n right : the encodings for the other image data in a pair\n margin (float, optional): m > 0 determines how far the embeddings of \n a negative pair should be pushed apart. Defaults to 1.\n\n\n Returns:\n loss (float): real number, value of the loss\n \"\"\"\n\n left = y_pred[0]\n right = y_pred[1]\n\n distance = tf.math.sqrt(tf.math.reduce_sum(tf.math.square(left - right), axis=-1))\n\n loss_positive = tf.math.square(distance)\n loss_negative = tf.math.square(tf.maximum(0., margin - distance))\n \n loss = y_true * loss_negative + (1 - y_true) * loss_positive\n loss = 0.5 * tf.math.reduce_mean(loss)\n\n return loss\n\n def triplet_loss(y_true, y_pred, margin = 1):\n \"\"\"Implementation of the triplet loss function\n\n Arguments:\n y_true : true labels, required when you define a loss in Keras, \n not applied in this function.\n\n y_pred (list): python list containing three objects:\n anchor : the encodings for the anchor data\n positive : the encodings for the positive data (similar to anchor)\n negative : the encodings for the negative data (different from anchor)\n \n margin (float, optional): m > 0 determines how far the embeddings of \n a negative data should be pushed apart. Defaults to 1.\n\n Returns:\n loss (float): real number, value of the loss\n \"\"\"\n\n anchor = y_pred[0]\n positive = y_pred[1]\n negative = y_pred[2]\n\n # squared distance between the anchor and the positive\n pos_dist = tf.math.reduce_sum(tf.math.square(anchor - positive), axis=-1)\n\n # squared distance between the anchor and the negative\n neg_dist = tf.math.reduce_sum(tf.math.square(anchor - negative), axis=-1)\n\n # compute loss\n basic_loss = margin + pos_dist - neg_dist\n loss = tf.math.maximum(basic_loss,0.0)\n loss = tf.math.reduce_mean(loss)\n return loss\n\n \n if loss_name == 'contrastive_loss':\n return contrastive_loss\n \n if loss_name == 'triplet_loss':\n return triplet_loss", "def cross_entropy(y_observed, p):\n\n pass", "def loss(data, y_pred):\n # TODO: Try using points other than the training data points for the divergence calculation.\n y_true = data[:,:2]\n p1 = data[:,2:5]\n p2 = data[:,5:8]\n p3 = data[:,8:11]\n p4 = data[:,11:14]\n\n ### Calculate divergence using model predictions:\n\n # Step 1: Use the model to calculate predicted wind field in the surrounding points p1, p2, p3 and p4.\n y_pred_p1 = model(p1)\n y_pred_p2 = model(p2)\n y_pred_p3 = model(p3)\n y_pred_p4 = model(p4)\n\n # Step 2: Calculate the partial derivatives with a three-point centered difference.\n scale_x = self.scaler_data.scale_[0] #scale-factor for x\n scale_y = self.scaler_data.scale_[1] #scale-factor for y\n\n dudx = (y_pred_p1[:, 0] - y_pred_p3[:, 0]) / (p1[:,0] - p3[:,0]) # <- pj = transformed data\n dvdy = (y_pred_p2[:, 1] - y_pred_p4[:, 1]) / (p2[:,1] - p4[:,1]) # <- pj = transformed data\n\n # Step 3: Calculate the divergence.\n divergence = ( dudx / scale_x + dvdy / scale_y ) * np.mean([scale_x, scale_y])\n #tf.print(K.mean(K.abs(divergence)))\n\n # Step 4: Calculate and return total loss.\n return K.mean(K.square(y_true - y_pred)) + gamma*K.mean(K.square(divergence))", "def svm_loss(x, y):\n x = np.squeeze(x)\n N = x.shape[0]\n yt = y\n yt[y==0]=-1\n tmp = 1-yt*x\n mask = np.ones_like(tmp)\n mask[tmp<=0] = 0\n tmp = tmp*mask\n loss = np.sum(tmp)/N\n \n dx = -yt*mask/N\n # dx = np.reshape(dx,[dx.shape[0],1])\n return loss, dx", "def tversky_loss(yhat, ytrue):\n return torch.mean(1 - tversky_index(yhat, ytrue))", "def loss(self, X, y):\n\n # Initialize the loss to zero.\n loss = 0.0\n num_classes = self.W.shape[0] # C = num_classes\n num_train = X.shape[0]\n \n exp_a = np.zeros((num_classes,num_train))\n # ================================================================ #\n # YOUR CODE HERE:\n # Calculate the normalized softmax loss. Store it as the variable loss.\n # (That is, calculate the sum of the losses of all the training \n # set margins, and then normalize the loss by the number of \n # training examples.)\n # ================================================================ #\n \n \n for i in np.arange(num_train):\n \n Loss = 0.0\n\n class_scores = np.dot(self.W,X[i,:].T) # calculating class scores (C x 1 vector)\n class_scores -= np.max(class_scores) # considering the possible issue for numerical instability and account for it\n\n exp_a[:,i] = np.exp(class_scores) # turning class scores to probabilities (C x 1 vector), without normalization\n\n Loss -= np.log(exp_a[y[i],i]/np.sum(exp_a[:,i]))\n \n\n #p[:,i] = exp_a[:,i]/np.sum(exp_a[:,i]) # p now is a valid probability matrix\n #print(p[:,i])\n\n loss += Loss \n #print(Loss,i) \n \n pass\n loss /= num_train\n # ================================================================ #\n # END YOUR CODE HERE\n # ================================================================ #\n\n return loss", "def loss_fn(gr_truth, pred):\n return 100 * dice_loss(pred, gr_truth) + softmax_weighted_loss(pred, gr_truth)", "def loss_fn(params):\n logits = models.ProgramTransformer(config).apply(\n {'params': params},\n inputs,\n outputs,\n programs,\n rngs={'dropout': train_rng})\n loss, weight_sum = compute_weighted_cross_entropy(logits, programs, weights)\n mean_loss = loss / weight_sum\n return mean_loss, logits", "def compute_loss(self, features, mode, params, precomputed):\n raise NotImplementedError(\"Model does not implement loss.\")", "def calculate_loss(self, pred, gold, smoothing=False):\n gold = gold.contiguous().view(-1)\n if smoothing:\n epsilon = 0.1\n n_class = pred.size(1)\n one_hot = torch.zeros_like(pred).scatter(1, gold.view(-1, 1), 1)\n one_hot = one_hot * (1 - epsilon) + \\\n (1 - one_hot) * epsilon / (n_class - 1)\n\n log_prb = F.log_softmax(pred, dim=1)\n # create non-padding mask with torch.ne()\n non_pad_mask = gold.ne(self.constants.PAD)\n loss = -(one_hot * log_prb).sum(dim=1)\n # losses are averaged later\n loss = loss.masked_select(non_pad_mask).sum()\n else:\n loss = F.cross_entropy(\n pred, gold, ignore_index=self.constants.PAD, reduction='sum')\n return loss", "def backward_G(self):\n # Calculate regularzation loss to make transformed feature and target image feature in the same latent space\n self.loss_reg_gen = self.loss_reg * self.opt.lambda_regularization\n\n # Calculate l1 loss \n loss_app_gen = self.L1loss(self.img_gen, self.input_P2)\n self.loss_app_gen = loss_app_gen * self.opt.lambda_rec \n \n # parsing loss\n label_P2 = self.label_P2.squeeze(1).long()\n #print(self.input_SPL2.min(), self.input_SPL2.max(), self.parsav.min(), self.parsav.max())\n self.loss_par = self.parLoss(self.parsav,label_P2)# * 20. \n self.loss_par1 = self.L1loss(self.parsav, self.input_SPL2) * 100 \n\n # Calculate GAN loss\n base_function._freeze(self.net_D)\n D_fake = self.net_D(self.img_gen)\n self.loss_ad_gen = self.GANloss(D_fake, True, False) * self.opt.lambda_g\n\n # Calculate perceptual loss\n loss_content_gen, loss_style_gen = self.Vggloss(self.img_gen, self.input_P2) \n self.loss_style_gen = loss_style_gen*self.opt.lambda_style\n self.loss_content_gen = loss_content_gen*self.opt.lambda_content\n\n total_loss = 0\n\n for name in self.loss_names:\n if name != 'dis_img_gen':\n #print(getattr(self, \"loss_\" + name))\n total_loss += getattr(self, \"loss_\" + name)\n total_loss.backward()", "def loss(W_vect, X, T):\n # log_prior = - 0.5 * L2_reg * jnp.dot(W_vect, W_vect)\n return jnp.mean((predictions(W_vect, X) - T)**2) + 0.5*jnp.log(2*jnp.pi)", "def prec_loss(self, y_pred):\n vol_shape = y_pred.get_shape().as_list()[1:-1]\n ndims = len(vol_shape)\n \n sm = 0\n for i in range(ndims):\n d = i + 1\n # permute dimensions to put the ith dimension first\n r = [d, *range(d), *range(d + 1, ndims + 2)]\n y = K.permute_dimensions(y_pred, r)\n df = y[1:, ...] - y[:-1, ...]\n sm += K.mean(df * df)\n\n return 0.5 * sm / ndims", "def div_loss(gamma, model):\n\n def loss(data, y_pred):\n # TODO: Try using points other than the training data points for the divergence calculation.\n \"\"\"Punish non-zero divergence. Each input of size N (batch size) is expanded into\n 4 sets of surrounding points, p1, p2, p3, and p4, where the elements pj_i (j = 1,2,3,4 and\n i = 1,2,3...N) are defined according to:\n\n y\n | p2_i\n |\n | p1_i P_i p3_i\n |\n ------------- x p4_i\n\n\n The sets p1, p2, p3, and p4 are used in estimating the divergence for each point P_i. The partial\n derivatives are calculated with a three-point centered difference.\n\n The extra points are \"smuggled\" into the loss function in the data argument:\n\n data.head() =\n < y_true > <------ p1 -------> <------- p2 ------> <------- p3 ------> <--------p4 ------>\n | u | v | x1 | y1 | h | x2 | y2 | h | x3 | y3 | h | x4 | y4 | h |\n \"\"\"\n y_true = data[:,:2]\n p1 = data[:,2:5]\n p2 = data[:,5:8]\n p3 = data[:,8:11]\n p4 = data[:,11:14]\n\n ### Calculate divergence using model predictions:\n\n # Step 1: Use the model to calculate predicted wind field in the surrounding points p1, p2, p3 and p4.\n y_pred_p1 = model(p1)\n y_pred_p2 = model(p2)\n y_pred_p3 = model(p3)\n y_pred_p4 = model(p4)\n\n # Step 2: Calculate the partial derivatives with a three-point centered difference.\n scale_x = self.scaler_data.scale_[0] #scale-factor for x\n scale_y = self.scaler_data.scale_[1] #scale-factor for y\n\n dudx = (y_pred_p1[:, 0] - y_pred_p3[:, 0]) / (p1[:,0] - p3[:,0]) # <- pj = transformed data\n dvdy = (y_pred_p2[:, 1] - y_pred_p4[:, 1]) / (p2[:,1] - p4[:,1]) # <- pj = transformed data\n\n # Step 3: Calculate the divergence.\n divergence = ( dudx / scale_x + dvdy / scale_y ) * np.mean([scale_x, scale_y])\n #tf.print(K.mean(K.abs(divergence)))\n\n # Step 4: Calculate and return total loss.\n return K.mean(K.square(y_true - y_pred)) + gamma*K.mean(K.square(divergence))\n return loss", "def loss_fun(model: GPModel, params: dict) -> float:\n py = model.module.call(params, train_ds['index_points'])\n return -py.log_prob(train_ds['y'])", "def loss_fn(self, targets, outputs, model):" ]
[ "0.66081715", "0.65532786", "0.6524559", "0.6420034", "0.63748574", "0.6323795", "0.6241175", "0.62063944", "0.6180593", "0.61634547", "0.61377925", "0.6124796", "0.6107711", "0.6096048", "0.6095119", "0.60922927", "0.60557693", "0.60514146", "0.6050098", "0.60289675", "0.6025815", "0.60202575", "0.60173357", "0.60069627", "0.5971377", "0.5960961", "0.59432614", "0.59371525", "0.5936926", "0.5925429" ]
0.7599071
0
Sets pandas to display really big data frames.
def big_dataframe_setup(): # pragma: no cover pd.set_option("display.max_colwidth", sys.maxsize) pd.set_option("max_colwidth", sys.maxsize) # height has been deprecated. # pd.set_option('display.height', sys.maxsize) pd.set_option("display.max_rows", sys.maxsize) pd.set_option("display.max_columns", sys.maxsize) pd.set_option("display.width", sys.maxsize) pd.set_option("display.colheader_justify", "center") pd.set_option("display.column_space", sys.maxsize) pd.set_option("display.max_seq_items", sys.maxsize) pd.set_option("display.expand_frame_repr", True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def display_df(df):\n with pd.option_context(\"display.max_rows\", 1000, \"display.max_columns\", 100):\n display(df.head(10))", "def display_all(df):\n with pd.option_context(\"display.max_rows\", 1000):\n with pd.option_context(\"display.max_columns\", 1000):\n display(df)", "def display_all(df):\n with pd.option_context(\"display.max_rows\", 1000, \"display.max_columns\", 1000):\n display(df)", "def dataframe_displayer(df):\n\n #On paramètre les options d'affichage du module pandas\n pd.set_option('display.max_rows', None)\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', None)\n pd.set_option('display.max_colwidth', -1)\n\n print(df)", "def print_full(df):\n pandas.set_option('display.max_rows', len(df))\n print df\n pandas.reset_option('display.max_rows')", "def show_dataframe(self, df, **kwargs):\n show_index = False\n if 'show_index' in kwargs:\n show_index = kwargs['show_index']\n\n exceed_limit = len(df) > self.max_result\n header_buf = StringIO(\"\")\n if show_index:\n idx_name = str(df.index.name) if df.index.name is not None else \"\"\n header_buf.write(self.normalizeColumn(idx_name) + \"\\t\")\n header_buf.write(self.normalizeColumn(str(df.columns[0])))\n for col in df.columns[1:]:\n header_buf.write(\"\\t\")\n header_buf.write(self.normalizeColumn(str(col)))\n header_buf.write(\"\\n\")\n\n body_buf = StringIO(\"\")\n rows = df.head(self.max_result).values if exceed_limit else df.values\n rowNumber = len(rows)\n index = df.index.values\n for idx, row in zip(index, rows):\n if show_index:\n body_buf.write(\"%html <strong>{}</strong>\".format(idx))\n body_buf.write(\"\\t\")\n body_buf.write(self.normalizeColumn(str(row[0])))\n for cell in row[1:]:\n body_buf.write(\"\\t\")\n body_buf.write(self.normalizeColumn(str(cell)))\n # don't print '\\n' after the last row\n rowNumber -=1\n if rowNumber != 0:\n body_buf.write(\"\\n\")\n body_buf.seek(0)\n header_buf.seek(0)\n print(\"%table \" + header_buf.read() + body_buf.read())\n body_buf.close()\n header_buf.close()\n if exceed_limit:\n print(\"\\n%html <font color=red>Results are limited by {}.</font>\".format(self.max_result))", "def df_print(df):\n with pd.option_context('display.max_rows', None, 'display.max_columns', 3):\n print(df)", "def print_full(x):\r\n pd.set_option('display.max_rows', len(x))\r\n pd.set_option('display.max_columns', 1000)\r\n pd.set_option('display.expand_frame_repr', False)\r\n print(x)\r\n pd.reset_option('display.max_rows')\r\n pd.reset_option('display.max_columns')\r\n pd.reset_option('display.expand_frame_repr')", "def debug_print_dataframe(data, num_rows=2, debug=False):\n if debug:\n with pandas.option_context('display.max_rows', None, 'display.max_columns',\n None):\n print(data[:num_rows])", "def glass_pandas(self):\n # pandas.set_option('display.width', 120)\n # TODO timeit (git_implementation) vs (my_implementation)\n # * df = pd.DataFrame(json.loads(r.text))\n # * df = df.set_index('t')\n # * df.index = pd.to_datetime(df.index, unit='s')\n # * df = df.sort_index()\n # * s = df.v\n # * s.name = '_'.join(url.split('/')[-2:])\n # * return s\n # for elem in self.loaded:\n # _metric, _data = elem[1]['_metrics'], elem[1]['_data']\n # try:\n # frame_keys = ['t'] + list(_data[0]['o'].keys())\n # framed = pandas.DataFrame(\n # data=[{k: (_data[iters]['t'] if k in 't' else _data[iters]['o'][k])\n # for k in frame_keys} for iters in range(len(_data))],\n # columns=frame_keys)\n # except KeyError:\n # framed = pandas.DataFrame(_data)\n # framed.set_index('t', inplace=True)\n # framed.index = pandas.to_datetime(\n # framed.index.to_flat_index(), unit='s', infer_datetime_format=True)\n # framed.sort_index(inplace=True)\n # framed.name = _metric\n # print(framed.name)\n # print(framed)", "def enable():\n pd.options.display.html.table_schema = True", "def set_display_options(rows, columns): # 3rd: pandas option\n pd.set_option('display.max_rows', rows)\n pd.set_option('display.max_columns', columns)\n return rows, columns", "def show_data(df):\n printmd(str(\"The Data contains **\" + str(df.shape[0])+ '** rows.'))\n printmd(\"*__Sample of the data :__*\")\n display(df.head(n=5))\n print(\"\")\n print(\"\")", "def disable():\n pd.options.display.html.table_schema = False", "def disp(df):\n display(HTML(df.to_html(index=False)))", "def pd_set_display(max_col=True, max_row=True, col_wrap=False):\n if max_col:\n pd.set_option(\"max_columns\", None) # Showing only two columns\n if max_row:\n pd.set_option(\"max_rows\", None)\n\n if not col_wrap:\n pd.set_option('display.expand_frame_repr', False)", "def pandas_write_buffer(self, df):\n\n buffer = io.StringIO()\n df.to_markdown(buffer)\n print(buffer.getvalue())\n if self.file_logging:\n with open(self.log_filename, mode='a') as f:\n print(buffer.getvalue(), file=f)\n buffer.close()", "def display_df_info(df, df_name, max_rows=None, max_columns=None):\n # Head\n display(HTML('<h4>{name}</h4>'.format(\n name=df_name)))\n with pd.option_context('display.max_rows', max_rows, 'display.max_columns', max_columns):\n display(df)\n\n # Attributes\n display(HTML(\"<h4>Data attributes</h4>\"))\n display_df = pd.DataFrame.from_dict(\n {'Null counts': df.isnull().sum(), 'Data types': df.dtypes, 'Unique values': df.nunique()})\n display(display_df)", "def df():\n fs.df()", "def print_dataframe_content(dataframe):\n\n dataframe_len = len(dataframe.columns)\n dataframe_columns_index = list(range(0, dataframe_len))\n dataframe_column_index_len = len(dataframe_columns_index)\n\n if dataframe.empty:\n print('Empty dataframe.')\n else:\n should_continue_process = True\n current_process_batch_size = 0\n\n while should_continue_process:\n slide_object = slice(current_process_batch_size,\n current_process_batch_size+MAX_ROWS_TO_PRINT)\n dataframe_index_columns = dataframe_columns_index[slide_object]\n\n current_process_batch_size = current_process_batch_size+MAX_ROWS_TO_PRINT\n\n dataframe_buffer = dataframe.iloc[:,\n dataframe_index_columns].copy()\n print(tabulate(dataframe_buffer, headers='keys', tablefmt='psql'))\n\n if current_process_batch_size >= dataframe_column_index_len:\n should_continue_process = False", "def print_dataframe(df):\n print (\"\")\n if df.shape[0] > 20:\n print (df.head())\n print (df.tail())\n else: \n print (df)", "def make_dataframes(folders, file_stem):\n\n print \"Making one big dataframe...\"\n df_orig = load_df(folders, file_stem, n_files=500)\n # df_orig = load_df(folders, \"output\")\n # df_orig = load_df(folders, \"output_ma1Lt11\")\n # df_orig = load_df(folders, \"output_good\")\n\n print len(df_orig.index), 'entries in dataframe'\n\n # Drop columns to save space\n drop_cols = [\n 'h1u', 'h1d', 'h1b', 'h1V', 'h1G', 'h1A',\n 'h2u', 'h2d', 'h2b', 'h2V', 'h2G', 'h2A',\n 'Brh3gg', 'Brh3tautau', 'Brh3bb', 'Brh3ww',\n 'Brh3zz', 'Brh3gammagamma', 'Brh3zgamma',\n 'Brh3h1h1', 'Brh3h2h2', 'Brh3h1h2',\n 'Brh3a1a1', 'Brh3a1z',\n # 'bsgamma', 'bsmumu', 'btaunu', 'delms', 'delmd']\n ]\n\n for col in drop_cols:\n if col in df_orig.columns.values:\n df_orig.drop(col, inplace=True, axis=1)\n print \"After dropping columns:\", df_orig.columns.values, len(df_orig.columns.values), \"columns\"\n\n # Remove any duplicate entries\n df_orig.drop_duplicates(inplace=True)\n\n # Load up the glu-glu cross sections for 13 TeV\n print \"Adding in cross-sections...\"\n # cs = pd.read_csv(\"parton_lumi_ratio.csv\")\n cs = pd.read_csv(\"YR3_cross_sections.csv\")\n masses = cs[\"MH [GeV]\"]\n mass_len = len(masses)\n xsec_ggf13 = cs[\"ggF 13TeV Cross Section [pb]\"]\n xsec_vbf13 = cs[\"VBF 13TeV Cross Section [pb]\"]\n # xsec_wh13 = cs[\"WH 13TeV Cross Section [pb]\"]\n # xsec_zh13 = cs[\"ZH 13TeV Cross Section [pb]\"]\n xsec_ggf8 = cs[\"ggF 8TeV Cross Section [pb]\"]\n xsec_vbf8 = cs[\"VBF 8TeV Cross Section [pb]\"]\n\n def find_closest_mass_ind(mass):\n pos = bisect_left(masses, mass)\n if pos == mass_len:\n return mass_len - 1\n return pos\n\n print 'Storing nearest-mass indices'\n df_orig['mass_ind_h1'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh1']), axis=1)\n df_orig['mass_ind_h2'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh2']), axis=1)\n df_orig['mass_ind_h3'] = df_orig.apply(lambda row: find_closest_mass_ind(row['mh3']), axis=1)\n\n # ALL XSEC STORED ARE CORRECTLY SCALED BY REDUCED COUPLING\n print \"Storing 13 TeV gg xsec\"\n df_orig[\"xsec_ggf13_h1\"] = df_orig['h1ggrc2'] * xsec_ggf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf13_h2\"] = df_orig['h2ggrc2'] * xsec_ggf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf13_h3\"] = df_orig['h3ggrc2'] * xsec_ggf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 13 TeV vbf xsec\"\n df_orig[\"xsec_vbf13_h1\"] = df_orig['h1vvrc2'] * xsec_vbf13[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf13_h2\"] = df_orig['h2vvrc2'] * xsec_vbf13[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf13_h3\"] = df_orig['h3vvrc2'] * xsec_vbf13[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV ggf xsec\"\n df_orig[\"xsec_ggf8_h1\"] = df_orig['h1ggrc2'] * xsec_ggf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_ggf8_h2\"] = df_orig['h2ggrc2'] * xsec_ggf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_ggf8_h3\"] = df_orig['h3ggrc2'] * xsec_ggf8[df_orig['mass_ind_h3']].values\n\n print \"Storing 8 TeV vbf xsec\"\n df_orig[\"xsec_vbf8_h1\"] = df_orig['h1vvrc2'] * xsec_vbf8[df_orig['mass_ind_h1']].values\n df_orig[\"xsec_vbf8_h2\"] = df_orig['h2vvrc2'] * xsec_vbf8[df_orig['mass_ind_h2']].values\n df_orig[\"xsec_vbf8_h3\"] = df_orig['h3vvrc2'] * xsec_vbf8[df_orig['mass_ind_h3']].values\n\n # Now add in individual channel xsec\n store_channel_xsec(df_orig)\n print df_orig.columns.values\n\n # Make some subsets here:\n print \"Making subsets...\"\n\n # Points passing all experimental constraints chosen\n df_pass_all = subset_pass_constraints(df_orig)\n # df_pass_all = None\n\n # subset with 2m_tau < ma1 < 10\n df_ma1Lt10 = None\n # df_ma1Lt10 = subset_var(df_pass_all, 3.554, 10.5, \"ma1\")\n\n mhmin, mhmax = 122.1, 128.1\n # subset with h1 as h_125\n # df_h1SM = subset_var(df_pass_all, mhmin, mhmax, \"mh1\")\n df_h1SM = None\n\n # subset with h2 as h_125\n # df_h2SM = subset_var(df_pass_all, mhmin, mhmax, \"mh2\")\n df_h2SM = None\n\n n_orig = len(df_orig.index)\n\n def percent_str(numerator, denominator):\n return \"%.3f %% \" % (100*numerator/float(denominator))\n\n print \"Running over\", n_orig, \"points\"\n if isinstance(df_pass_all, pd.DataFrame):\n n_pass_all = len(df_pass_all.index)\n print n_pass_all, \"points passing all constraints (= %s)\" % percent_str(n_pass_all, n_orig)\n # print len(df_ma1Lt10.index), \"of these have 2m_tau < ma1 < 10 GeV (= %s)\" % percent_str(len(df_ma1Lt10.index), n_pass_all)\n # print len(df_h1SM.index), \"points in the h1 = h(125) subset (= %s)\" % percent_str(len(df_h1SM.index), n_pass_all)\n # print len(df_h2SM.index), \"points in the h2 = h(125) subset (= %s)\" % percent_str(len(df_h2SM.index), n_pass_all)\n print \"\"\n\n return df_orig, df_pass_all, df_ma1Lt10, df_h1SM, df_h2SM", "def show_df_ui(df,transpose=False,default=\"Hide\",message=\"Show dataframe: \"):\n def make_btn(val):\n btn_widget=widgets.Button(\n value=False,\n description=val,\n disabled=False,\n button_style='',\n layout=widgets.Layout(width=\"80px\"),\n )\n return btn_widget\n \n def show_head():\n if not transpose:\n display(df.head(10))\n else:\n display(df.head(10).transpose())\n def show_tail():\n if not transpose:\n display(df.tail(10))\n else:\n display(df.tail(10).transpose())\n def show_full():\n if not transpose:\n display(df)\n else:\n display(df.transpose())\n def show_random():\n if not transpose:\n display(Frames.smart_sample(df,10))\n else:\n display(Frames.smart_sample(df,10).transpose())\n def hide_output():\n pass\n \n def refresh():\n Notebook.clear()\n Widgets.show_df_ui(df,transpose=transpose,message=message)\n \n def show_head_refresh(b):\n refresh()\n show_head()\n def show_tail_refresh(b):\n refresh()\n show_tail()\n def show_full_refresh(b):\n refresh()\n show_full()\n def show_random_refresh(b):\n refresh()\n show_random()\n def hide_output_refresh(b):\n refresh()\n \n behaviors={\n \"Hide\": hide_output,\n \"Head\": show_head,\n \"Tail\": show_tail,\n \"Random\": show_random,\n \"Full\": show_full\n }\n \n btn_head=make_btn(\"Head\")\n btn_random=make_btn(\"Random\")\n btn_tail=make_btn(\"Tail\")\n btn_full=make_btn(\"Full\")\n btn_hide=make_btn(\"Hide\")\n \n btn_head.on_click(show_head_refresh)\n btn_tail.on_click(show_tail_refresh)\n btn_full.on_click(show_full_refresh)\n btn_random.on_click(show_random_refresh)\n btn_hide.on_click(hide_output_refresh)\n \n ui_group=widgets.HBox([\n widgets.Label(value=message),\n btn_head,\n btn_random,\n btn_tail,\n btn_full,\n btn_hide,\n ])\n display(ui_group)\n if default in behaviors:\n behaviors[default]()", "def mdisplay(dfs: List[DataFrame], names:List[str]=[]):\n \n html_str = ''\n if names:\n html_str += ('<tr>' + \n ''.join(f'<td style=\"text-align:center\">{name}</td>' for name in names) + \n '</tr>')\n html_str += ('<tr>' + \n ''.join(f'<td style=\"vertical-align:top\"> {df.to_html(index=False)}</td>' \n for df in dfs) + \n '</tr>')\n html_str = f'<table>{html_str}</table>'\n html_str = html_str.replace('table','table style=\"display:inline\"')\n display_html(html_str, raw=True)", "def show(obj):\n if isinstance(obj, pd.Series):\n df = pd.DataFrame(obj)\n return df\n elif hasattr(obj, '__dict__'):\n return pd.DataFrame(pd.Series(obj.__dict__),\n columns=['value'])\n else:\n return obj", "def print_data_table_length(document_label, data_frame, debug=False):\n print('{}\\n'.format(document_label), len(data_frame))\n debug_print_dataframe(data_frame, debug=debug)", "def df_to_string(df):\n with pd.option_context('display.max_rows', None, 'display.max_columns', 3):\n return df.to_string()", "def sizeof(self):\n return DataFrameDefault.register(pandas.DataFrame.__sizeof__)(self)", "def __call__(self, df, size=None, **kwargs):\n return super().__call__(df, size=size, **kwargs)", "def meta_df(self):\n return NotImplemented" ]
[ "0.7420032", "0.73733944", "0.73476887", "0.71943045", "0.7025156", "0.6742269", "0.6694681", "0.66209984", "0.65980023", "0.6392972", "0.6373981", "0.6253341", "0.6170743", "0.616121", "0.6146324", "0.6051942", "0.6036005", "0.5962156", "0.59404975", "0.5803421", "0.5718344", "0.5715986", "0.5672375", "0.5646242", "0.5630978", "0.55911493", "0.55889344", "0.55523986", "0.5543984", "0.55153745" ]
0.8036398
0
Return a nicely formatted HTML code string for the given dataframe. Arguments
def df_to_html(df, percentage_columns=None): # pragma: no cover big_dataframe_setup() try: res = "<br><h2> {} </h2>".format(df.name) except AttributeError: res = "" df.style.set_properties(**{"text-align": "center"}) res += df.to_html( formatters=_formatters_dict( input_df=df, percentage_columns=percentage_columns ) ) res += "<br>" return res
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def disp(df):\n display(HTML(df.to_html(index=False)))", "def df_to_html(df, img_formatter=images_formatter):\n pd.set_option(\"display.max_colwidth\", -1)\n pd.set_option(\"display.max_columns\", -1)\n cond_formatter = lambda imgs: images_formatter(imgs, col=1)\n html_table = df.to_html(\n formatters={\n \"cond_imgs\": cond_formatter,\n \"out_imgs\": img_formatter,\n \"feat_imgs\": img_formatter,\n \"cond_feat\": img_formatter,\n \"ini_imgs\": img_formatter,\n },\n escape=False,\n border=0,\n )\n html = \"\"\"\n <html>\n <style>\n td{{\n border: 1px solid #444444;\n padding: 5px;\n }}\n table {{ \n border-spacing: 0px;\n border-collapse: separate;\n }}\n tr:nth-child(even) {{\n background: #f2f2f2; \n }}\n \n </style>\n\n <body>\n {}\n\n <br><br> <br><br> <br><br> <br><br> <br><br> \n </body>\n </html>\n \"\"\".format(\n html_table\n )\n return html", "def display_side_by_side(*args):\r\n\r\n html_string = ''\r\n for df in args:\r\n html_string += df.to_html(index=False, header=True)\r\n display_html(html_string.replace('table',\r\n 'table style=\"display:inline\"'), raw=True)", "def _repr_html_(self) -> str:\n return self.all(pandas=True)._repr_html_() # type: ignore", "def as_html(self, max_rows=0):\n if not max_rows or max_rows > self.num_rows:\n max_rows = self.num_rows\n omitted = max(0, self.num_rows - max_rows)\n labels = self.column_labels\n lines = [\n (0, '<table border=\"1\" class=\"dataframe\">'),\n (1, '<thead>'),\n (2, '<tr>'),\n (3, ' '.join('<th>' + label + '</th>' for label in labels)),\n (2, '</tr>'),\n (1, '</thead>'),\n (1, '<tbody>'),\n ]\n fmts = [self._formats.get(k, self.format_column(k, v[:max_rows])) for\n k, v in self._columns.items()]\n for row in itertools.islice(self.rows, max_rows):\n lines += [\n (2, '<tr>'),\n (3, ' '.join('<td>' + fmt(v) + '</td>' for v, fmt in zip(row, fmts))),\n (2, '</tr>'),\n (1, '</tbody>'),\n ]\n lines.append((0, '</table>'))\n if omitted:\n lines.append((0, '<p>... ({} rows omitted)</p'.format(omitted)))\n return '\\n'.join(4 * indent * ' ' + text for indent, text in lines)", "def display_side_by_side(*args:\"pandas.DataFrame, pandas.Series\", drop_index:\"bool\"=False)-> \"None\":\n from IPython.display import display_html\n \n strHtml = ''\n for df in args:\n \n if isinstance(df, pandas.Series):\n df = df.to_frame()\n if drop_index:\n df.reset_index(drop=True, inplace=True)\n \n strHtml += df.to_html()\n display_html(strHtml.replace('table','table style=\"display:inline\"'), raw = True)", "def mdisplay(dfs: List[DataFrame], names:List[str]=[]):\n \n html_str = ''\n if names:\n html_str += ('<tr>' + \n ''.join(f'<td style=\"text-align:center\">{name}</td>' for name in names) + \n '</tr>')\n html_str += ('<tr>' + \n ''.join(f'<td style=\"vertical-align:top\"> {df.to_html(index=False)}</td>' \n for df in dfs) + \n '</tr>')\n html_str = f'<table>{html_str}</table>'\n html_str = html_str.replace('table','table style=\"display:inline\"')\n display_html(html_str, raw=True)", "def df2html(df, name=None, dom=\"Brt\", show_index=False, pageLength=15):\n\n if name is None:\n name = uuid.uuid1().time_low\n # looks like datatable does not like ID made of numbers, even in string\n # so we convert to ABCDEFGH values\n name = \"\".join([chr(65 + int(x)) for x in str(name)])\n\n datatable = DataTable(df, name, index=show_index)\n datatable.datatable.datatable_options = {\n \"pageLength\": pageLength,\n \"scrollCollapse\": \"false\",\n \"dom\": dom,\n \"buttons\": [\"copy\", \"csv\"],\n }\n\n # identify links (columns ending in _links)\n df.columns = [str(x) for x in df.columns]\n for column in df.columns:\n if column.endswith(\"_links\"):\n prefix = column.replace(\"_links\", \"\")\n if prefix in df.columns:\n datatable.datatable.set_links_to_column(column, prefix)\n\n js = datatable.create_javascript_function()\n html = datatable.create_datatable(float_format=\"%.6g\")\n return js + html", "def create_html_report():\r\n\r\n #Sample DataFrame\r\n df = pd.DataFrame(np.random.randn(7,4)\r\n ,columns=['one','two','three','four']\r\n ,index=['a','b','c','d','e','f','g'])\r\n\r\n #Formatting rule\r\n def color_negative_red(val):\r\n color = 'red' if val<0 else 'black'\r\n return f'color: {color}'\r\n\r\n styler = df.style.applymap(color_negative_red)\r\n\r\n #Chart plotting\r\n filename = \"\".join([APP_ROOT, \"\\\\static\\\\images\\\\\" , \"plot.svg\"])\r\n #Plot\r\n ax = df.plot.bar()\r\n fig = ax.get_figure()\r\n fig.savefig(filename)\r\n\r\n #Template handling\r\n env = jinja2.Environment(loader=jinja2.FileSystemLoader(searchpath='./templates/'))\r\n template = env.get_template('template.html')\r\n\r\n filename = \"file:///\" + filename\r\n html = template.render(my_table=styler.render(), img_url=filename)\r\n\r\n return html", "def _construct_html_table(self, df: Table) -> str:\n string = attach_tag_tr('\\n'.join(map(attach_tag_th, df.columns)))\n stringified_df = _stringify_table(df)\n\n for (i, row_elements) in stringified_df.iterrows():\n # Commented out code is a bit sleaker, but restrictive\n #string += '\\n' + attach_tag_tr('\\n'.join(map(attach_tag_td,\n # row_elements)))\n table_content: List = []\n for col, val in row_elements.iteritems():\n if col == 'cost':\n table_content.append(attach_tag_td_rjust(val))\n else:\n table_content.append(attach_tag_td(val))\n\n string += '\\n' + attach_tag_tr('\\n'.join(table_content))\n\n return attach_tag_table(\n attach_tag_caption(f'All Costs of {self.trip_title}')\n + '\\n'\n + attach_tag_tbody(string))", "def _repr_html_(self):\n return \"<td><b>{0}</b></td><td>{1}</td>\".format(self.id, self.title)", "def _df_formatter_with_interactive_hint(dataframe, buttons=None):\n key = 'df-' + str(_uuid.uuid4())\n _noninteractive_df_refs[key] = dataframe\n\n # Ensure our last value cache only contains one item.\n _last_noninteractive_df.clear()\n _last_noninteractive_df[key] = dataframe.copy(deep=False)\n\n convert_func = 'convertToInteractive'\n if convert_func not in _output_callbacks:\n _output_callbacks[convert_func] = _output.register_callback(\n convert_func, _convert_to_interactive\n )\n if not buttons:\n buttons = []\n buttons.insert(0, _get_button_html(key))\n return _get_html(dataframe, key, buttons)", "def create_html(self):\n rows = self.check()\n htmlrows = \"\"\n for row in rows:\n data = self._format_row(row)\n htmlrows += data\n \n return self.TEMPLATE.format(content=htmlrows)", "def __str__(self) -> str:\n if self.data is not None:\n list_of_params = []\n for key, data_dict in self.data.to_dict(orient=\"index\").items():\n data_dict[\"index\"] = key\n list_of_params.append(data_dict)\n formated_list_of_params = self.format_params(list_of_params)\n return f\"\\n{tabulate(formated_list_of_params, headers='keys', tablefmt='fancy_grid')}\"\n else:\n return \"Empty DataFrame\"", "def create_html_layout(self):\n page = \"\"\"<!DOCTYPE html>\n <!doctype html>\n <html lang=\"en\">\n <head>\n <meta charset=\"utf-8\">\n <meta name=\"viewport\" content=\"width=device-width, initial-scale=1, shrink-to-fit=no\">\n </head>\n </html>\n <head>\n \t<meta charset=\"UTF-8\">\n </head>\n <body>\n <div class=\"container\">\n <div class=\"row\">\n <div class=\"col-sm\">\n <h4>eda report: Exploratory data analysis</h4>\n </div>\n <div class=\"col-sm\">\n <h3>Inspecting dataframe of size: {size}\n </div>\n </div>\n </div>\n \t<table class=\"table table-hover\" style=\".table\">\n <thead>\n <tr style=\"font-size: 15px;\">\n <th width=\"5%\" align=\"left\" scope=\"col\">Variable Name</th>\n <th width=\"12%\" align=\"left\" scope=\"col\">Data Type</th>\n <th width=\"15%\" align=\"left\" scope=\"col\">Histogram</th>\n <th width=\"11%\" align=\"left\" scope=\"col\">Stats</th>\n <th width=\"7%\" align=\"left\" scope=\"col\">Missing NA</th>\n <th width=\"5%\" align=\"left\" scope=\"col\">Outliers</th>\n </tr>\n </thead>\n <tbody>\"\"\".format(size=self.df.size)\n\n end_page = \"\"\" \n </tbody>\n </table>\n </body>\n \"\"\"\n rows_html = []\n for i, column in enumerate(self.df.columns):\n Summary = ColumnSummary(data=self.df[column])\n datatype = Summary.data_type()\n missing = Summary.missing_values()\n stats = Summary.statistic_summary()\n outliers = Summary.outliers()\n Summary.create_histogram(i)\n html = f\"\"\"\n <tr>\n <td style=\"font-size: 15px;\" width=\"10%\" align=\"left\"> {column}</td>\n <td style=\"font-size: 15px;\"width=\"10%\" align=\"left\"> {datatype}</td>\n <td><img class=\"img-fluid\" src=\"hist_images/histogram{i}.png?{random.randint(0,\n 2e9)}\" style=\"width:800px\"> </td>\n <td style=\"font-size: 15px;\">mean: {stats.mean}<br>\n mode: {stats.mode}<br><br>\n min: {stats.min}<br>\n max: {stats.max}<br><br>\n lower-bound: {stats.lower}<br>\n upper-bound: {stats.upper}<b</td>\n <td style=\"font-size: 15px;\">{missing}</td>\n <td style=\"font-size: 15px;\">{outliers}</td>\n </tr>\n \"\"\"\n rows_html.append(html)\n\n merged_html = page + \"\".join(rows_html) + end_page\n return merged_html", "def patchPandasHTMLrepr(self, **kwargs):\n global defHTMLFormatter_write_cell\n global defPandasGetAdjustment\n\n # Two things have to be done:\n # 1. Disable escaping of HTML in order to render img / svg tags\n # 2. Avoid truncation of data frame values that contain HTML content\n\n # The correct patch requires that two private methods in pandas exist. If\n # this is not the case, use a working but suboptimal patch:\n def patch_v1():\n with pd.option_context('display.max_colwidth', -1): # do not truncate\n kwargs['escape'] = False # disable escaping\n return defPandasRendering(self, **kwargs)\n\n try:\n import pandas.io.formats.html # necessary for loading HTMLFormatter\n except:\n # this happens up until at least pandas v0.22\n return patch_v1()\n else:\n if not hasattr(pd.io.formats.html, 'HTMLFormatter') or \\\n not hasattr(pd.io.formats.html.HTMLFormatter, '_write_cell') or \\\n not hasattr(pd.io.formats.format, '_get_adjustment'):\n return patch_v1()\n\n # The \"clean\" patch:\n # 1. Temporarily set escape=False in HTMLFormatter._write_cell\n defHTMLFormatter_write_cell = pd.io.formats.html.HTMLFormatter._write_cell\n\n # 2. Pandas uses TextAdjustment objects to measure the length of texts\n # (e.g. for east asiacopied over from rdkit repo yn languages). We take advantage of this mechanism\n # and replace the original text adjustment object with a custom one.\n # This \"RenderMoleculeAdjustment\" object assigns a length of 0 to a\n # given text if it is valid HTML. And a value having length 0 will not\n # be truncated.\n\n # store original _get_adjustment method\n defPandasGetAdjustment = pd.io.formats.format._get_adjustment\n\n try:\n # patch methods and call original to_html function\n pd.io.formats.format._get_adjustment = _patched_get_adjustment\n pd.io.formats.html.HTMLFormatter._write_cell = _patched_HTMLFormatter_write_cell\n return defPandasRendering(self, **kwargs)\n except:\n pass\n finally:\n # restore original methods\n pd.io.formats.format._get_adjustment = defPandasGetAdjustment\n pd.io.formats.html.HTMLFormatter._write_cell = defHTMLFormatter_write_cell\n\n # If this point is reached, an error occurred in the previous try block.\n # Use old patch:\n return patch_v1()", "def table_to_html(df, ev, html_id=\"\", add_class=\"\"):\n formatters = ev.getColumnFormatters(df)\n\n # apply sortlevel\n df = ev.sortDataFrame(df)\n\n tableclasses = 'ipet-table rb-table-data {}\" width=\"100%'.format(add_class)\n\n htmlstr = df.to_html(border=0,\n na_rep=NONE_DISPLAY, formatters=formatters, justify=\"right\",\n table_id=html_id, classes=tableclasses)\n\n return html.fromstring(htmlstr)", "def render(cls, df: DataFrame, *args, **kwargs):\n from labext.widgets.data_table import DataTable\n dt = DataTable(df, *args, **kwargs)\n display(dt.widget, *dt.get_auxiliary_components())", "def as_html(table): \n if isinstance(table,Table):\n html = \"<table width=\\\"\" + str(table.total_width()) + \"\\\"\" + table.html_attributes + \" ><colgroup>\\n\"\n if table.col_width_dict:\n for i in range(table.no_of_columns()):\n html += \"<col width=\\\"\" + str(table.col_width_percent(i)) + \"%\\\"/>\\n\"\n html += \"</colgroup><tbody>\\n\" \n row = \"<tr>\"\n for c in range(table.no_of_columns()):\n row += \"<th width=\\\"\"+str(table.col_width_percent(c))+\"%\\\">\" + table.cell(0,c) +\"</th>\"\n row += \"</tr>\\n\"\n html += row\n for r in range(1,table.no_of_rows()):\n row = \"<tr>\"\n for c in range(table.no_of_columns()):\n row += \"<td>\" + table.cell(r,c) + \"</td>\"\n row += \"</tr>\\n\"\n html += row\n return mark_safe(html)\n else:\n return table", "def to_html(self) -> str:\n if self.coverage < 0:\n coverage_str = '1e308'\n coverage_class = 'na'\n elif self.coverage == 0:\n coverage_str = '0'\n coverage_class = 'zero'\n else:\n coverage_str = str(self.coverage)\n coverage_class = 'all'\n\n sorted_branches = sorted(self.branches.values(), key=lambda s: s.id_)\n branches_html = ''.join(b.to_html() for b in sorted_branches)\n\n return '<tr id=\"line-{2}\" class=\"cov-health-{0}\"><td>{4}</td><td>{1}</td><td>{2}</td><td>{3}</td></tr>\\n'.format(\n coverage_class, coverage_str, self.linenum, escape(self.source),\n branches_html\n )", "def htmlise(s):\n return '<div><pre class=\"tablecell\">' + html.escape(s) + '</pre></div>'", "def show_dataframe(self, df, **kwargs):\n show_index = False\n if 'show_index' in kwargs:\n show_index = kwargs['show_index']\n\n exceed_limit = len(df) > self.max_result\n header_buf = StringIO(\"\")\n if show_index:\n idx_name = str(df.index.name) if df.index.name is not None else \"\"\n header_buf.write(self.normalizeColumn(idx_name) + \"\\t\")\n header_buf.write(self.normalizeColumn(str(df.columns[0])))\n for col in df.columns[1:]:\n header_buf.write(\"\\t\")\n header_buf.write(self.normalizeColumn(str(col)))\n header_buf.write(\"\\n\")\n\n body_buf = StringIO(\"\")\n rows = df.head(self.max_result).values if exceed_limit else df.values\n rowNumber = len(rows)\n index = df.index.values\n for idx, row in zip(index, rows):\n if show_index:\n body_buf.write(\"%html <strong>{}</strong>\".format(idx))\n body_buf.write(\"\\t\")\n body_buf.write(self.normalizeColumn(str(row[0])))\n for cell in row[1:]:\n body_buf.write(\"\\t\")\n body_buf.write(self.normalizeColumn(str(cell)))\n # don't print '\\n' after the last row\n rowNumber -=1\n if rowNumber != 0:\n body_buf.write(\"\\n\")\n body_buf.seek(0)\n header_buf.seek(0)\n print(\"%table \" + header_buf.read() + body_buf.read())\n body_buf.close()\n header_buf.close()\n if exceed_limit:\n print(\"\\n%html <font color=red>Results are limited by {}.</font>\".format(self.max_result))", "def to_html(self) -> str:\n source_name = escape(self.source_name)\n (covered, lines) = self.coverage_stats()\n lines_stats = \"{} / {} ({} lines of code)\".format(covered, lines, len(self.source_code))\n (br_covered, br_count, calls_covered, calls_count) = self.branch_stats()\n branch_stats = \"{} / {}\".format(br_covered, br_count)\n call_stats = \"{} / {}\".format(calls_covered, calls_count)\n (fn_covered, fn_count) = self.function_stats()\n fn_stats = \"{} / {}\".format(fn_covered, fn_count)\n\n self.decode_cpp_function_names()\n\n result = [\"\"\"\n <!DOCTYPE html>\n <html>\n <head>\n <meta charset=\"utf-8\">\n <title>Coverage report of file \"\"\" + source_name + \"\"\"</title>\n <style type=\"text/css\">\n /*<![CDATA[*/\n .cov-health-zero td { color: white; }\n .cov-health-zero a { color: #CCCCFF; }\n .cov-health-zero a:visited { color: #FFCCFF; }\n .cov-health-zero:nth-child(odd) td { background-color: #CC0000; }\n .cov-health-zero:nth-child(even) td { background-color: #DD0000; }\n .cov-health-na td { color: silver; }\n .cov-health-na td:nth-child(2) { visibility: hidden; }\n .branch { cursor: help; }\n .branch-taken { color: silver; }\n .branch-taken:hover { color: black; }\n .branch-not-taken { color: red; }\n .branch-not-taken:hover { color: maroon; }\n #source tbody td:last-child, #funcs tbody td:first-child\n { text-align: left; font-family: monospace; white-space: pre; }\n .sortable { border-collapse: collapse; }\n div { width: 100%; overflow: hidden; }\n .sortable td { text-align: right; padding-left: 2em; }\n .sortable tbody tr:nth-child(odd) { background-color: #FFFFCC; }\n .sortable tbody tr:nth-child(even) { background-color: #FFFFDD; }\n #source tbody tr:hover td:last-child { font-weight: bold; }\n #source tbody td:first-child { max-width: 7em; font-size: smaller; word-wrap: break-word; }\n #source tbody td:nth-child(2) { font-size: smaller; color: silver; }\n #summary { float: right; border-collapse: collapse; }\n #summary td { border: 1px solid black; }\n caption { font-weight: bold; }\n /*]]>*/\n </style>\n <script src=\"sorttable.js\"></script>\n </head>\n <body>\n <p><a href=\"index.html\">&lArr; Back</a> | Go to line #<input type=\"number\" id=\"goto\" /></p>\n <h1>\"\"\" + source_name + \"\"\"</h1>\n <div>\n <table id=\"summary\">\n <caption>Summary</caption>\n <tr><td>Lines</td><td>\"\"\" + lines_stats + \"\"\"</td></tr>\n <tr><td>Branches</td><td>\"\"\" + branch_stats + \"\"\"</td></tr>\n <tr><td>Calls</td><td>\"\"\" + call_stats + \"\"\"</td></tr>\n <tr><td><a href=\"#functions\">Functions</a></td><td>\"\"\" + fn_stats + \"\"\"</td></tr>\n </ul>\n </table>\n <table class=\"sortable\" id=\"source\">\n <thead><tr><th>Branches</th><th>Cov</th><th>Line</th><th class=\"sorttable_nosort\">Source</th></tr></thead>\n <tbody>\n \"\"\"]\n result.extend(line.to_html() for line in self.source_code)\n result.append(\"\"\"\n </tbody>\n </table>\n </div>\n <h2 id=\"functions\">Functions</h2>\n <div>\n <table class=\"sortable\" id=\"funcs\">\n <thead><tr><th>Function</th><th>Calls</th><th>Ret.</th><th>Blk. Exec.</th></tr></thead>\n <tbody>\"\"\")\n result.extend(func.to_html() for func in self.source_functions)\n result.append(\"\"\"\n </tbody>\n </table>\n </div>\n <script>\n //<![CDATA[\n document.getElementById('goto').onchange = function()\n {\n location = \"#line-\" + this.value;\n }\n //]]>\n </script>\n </body>\n </html>\n \"\"\")\n return '\\n'.join(result)", "def to_html(self) -> str:\n coverage_class = 'zero' if self.called == 0 else 'all'\n return '''<tr id=\"func-{}\" class=\"cov-health-{}\">\n <td><a href=\"#line-{}\">{}</a></td>\n <td>{}</td><td>{}%</td><td>{}%</td>\n </tr>\\n'''.format(\n self.name, coverage_class, self.linenum, self.pretty_name, self.called,\n self.returned, self.blocks\n )", "def _repr_html_(self):\n return (\n f'<b>{self.__class__.__name__}</b>'\n f'<br> <b>defined by:</b> {self._str_meta_()}'\n f'<br> <b>with columns:</b> {self._str_colnames()}'\n f'<br> {len(self)} objects'\n f'<br> {self._html_table()}'\n )", "def dataframe_displayer(df):\n\n #On paramètre les options d'affichage du module pandas\n pd.set_option('display.max_rows', None)\n pd.set_option('display.max_columns', None)\n pd.set_option('display.width', None)\n pd.set_option('display.max_colwidth', -1)\n\n print(df)", "def table_html(table_rows: List[str]) -> str:\n return \"<table>{}</table>\".format(\"\".join(table_rows))", "def print_dataframe(self, df):\n header = [\n '일련번호',\n '학생 id',\n '이름',\n '생년월일',\n '중간고사',\n '기말고사',\n '평균',\n 'Grade'\n ]\n\n header_str = '{:10s}' * len(header)\n print(header_str.format(*header))\n print(df.to_string(header=False, col_space=10))", "def as_html(self):\r\n template = get_template(self.template)\r\n request = build_request()\r\n return template.render(RequestContext(request, {'table': self}))", "def _html_repr(self):\n html = '<table id=%s>' % (self._id,)\n\n for row in range(self.rows):\n html += '<tr>'\n for col in range(self.columns):\n if row == 0 and self.header_row or col == 0 and self.header_column:\n tag = 'th'\n else:\n tag = 'td'\n html += '<%(tag)s id=%(id)s></%(tag)s>' % {\n 'tag': tag,\n 'id': self._get_cell_id(row, col),\n }\n html += '</tr>'\n html += '</table>'\n return html" ]
[ "0.7709254", "0.73659784", "0.71026874", "0.69458956", "0.67958844", "0.676329", "0.67408717", "0.6718637", "0.66966647", "0.6656633", "0.6579785", "0.651067", "0.64621204", "0.64261204", "0.6367933", "0.6338818", "0.63245225", "0.62568015", "0.6219889", "0.6177059", "0.61611307", "0.61477715", "0.6144172", "0.6141057", "0.61234415", "0.61187714", "0.61141455", "0.61124927", "0.6051049", "0.60058373" ]
0.76079047
1
Produce the CSV of a solution
def produce_solution(y): with open('out.csv', 'w', newline='') as csvfile: writer = csv.writer(csvfile, delimiter=',', lineterminator="\n") writer.writerow(['id', 'y']) for i in range(y.shape[0]): writer.writerow([i, y[i]])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def generateSolution(self, cont):\n solnf = self.outdir + \"/tracks_soln.csv\"\n old = os.dup(1)\n sys.stdout.flush()\n os.close(1)\n os.open(solnf, os.O_WRONLY | os.O_CREAT)\n cont.printallSolutions(yetkin=self.yetkin)\n sys.stdout.flush()\n os.close(1)\n os.dup(old)\n os.close(old)", "def csv_output(self):\r\n fh = open(\"output.csv\",'w')\r\n for i in range(len(self.population.columns)):\r\n if i != len(self.population.columns)-1:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.columns[i]))\r\n fh.write(\"\\n\")\r\n\r\n for i in range(len(self.population.data)):\r\n for j in range(len(self.population.data[i])):\r\n if j != len(self.population.data[i])-1:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\",\")\r\n else:\r\n fh.write(str(self.population.data[i][j]))\r\n fh.write(\"\\n\")\r\n fh.close()", "def generate_csv(self, output_file):\n try: # We are going to \"try\" something\n csv_file = open(output_file, 'w+') # open \"output_file\" as a writable file and return a handle called \"csv_file\"\n except OSError as err: # If something goes wrong with the open, we catch the exception\n fatal(\"{0}\".format(err), -1) # exit with something other than 0 so the shell knows something went wrong\n \n writer = csv.writer(csv_file) # create a CSV writing object that's pointing at our open file handle\n\n writer.writerow([\"Question\",\"Answers\"]) # Let's write the top row\n for k in self.questions.keys(): # Let's walk down the directory by key\n # write the \"key\" (which is the question) and then let's take the list of answers and create a comma delmited list.\n # this is likely totally wrong since you could have an answer in it that also has a comma...\n writer.writerow([k, \",\".join(self.questions[k].answers)]) # insert a key (which is the question) and then let's take the array of \n\n csv_file.close() # close the csv_file file handle", "def write_to_csv(self):\n\n dump_list = []\n\n # add rows one by one, each as a list, even if only 1 element\n\n dump_list.append([\"challenge execution ID\",self.ID])\n dump_list.append([\"challenge execution name\",self.name])\n\n dump_list.append([\"challenge definition ID\",self.challenge_def_ID])\n challenge_def_name = get_indexed_item_from_file(self.challenge_def_ID, FILE_CHALLENGE_DEFINITIONS)\n dump_list.append([\"challenge definition name\",challenge_def_name])\n\n if self.start_time != None:\n dump_list.append([\"challenge start time\",self.start_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n if self.stop_time != None:\n dump_list.append([\"challenge stop time\",self.stop_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n\n if self.log.length() > 0 :\n dump_list.append([\"Log:\"])\n for item in self.log.get_timestamped_strings():\n dump_list.append([item])\n\n if self.CLI_responses.length() > 0 :\n dump_list.append([\"CLI responses:\"])\n for item in self.CLI_responses.get_timestamped_strings():\n dump_list.append([item])\n\n if self.API_responses.length() > 0 :\n dump_list.append([\"API responses:\"])\n for item in self.API_responses.get_timestamped_strings():\n dump_list.append([item])\n\n try:\n # output CSV file name: challDefExec + ID + start time + .csv\n file_name = \"challDefExec\" + \"{0:0=3d}\".format(self.challenge_def_ID) + \"-\" + self.start_time.strftime(\"%Y-%m-%d-%H-%M-%S\") + \".csv\"\n with open(file_name, \"w\", newline=\"\") as file:\n csv_file_writer = csv.writer(file)\n csv_file_writer.writerows(dump_list)\n except Exception as e:\n print(type(e), e)\n sys.exit()", "def create_explanations_csv():\n with open('output/' + dataset_name + '_' + model_name + '.csv', mode='w', newline='') as file:\n writer = csv.writer(file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(\n [\"index\", \"original text\", \"true class\", \"decoded text\", \"black box prediction\",\n \"decision tree prediction\", \"fidelity\", \"exemplars\", \"counter exemplars\", \"top exemplar words\",\n \"top counter exemplar words\"])\n for i in range(len(idx)):\n writer.writerow(\n [idx[i], X_original[i], y_original[i], final_decoded_sentences[i][0], bbpreds[i], dtpreds[i],\n fidelities[i], exemplars[i], counter_exemplars[i], top_exemplar_words_dict_list[i],\n top_counter_exemplar_words_dict_list[i]])", "def output():\n\n if args.top and not args.tfidf and not args.svd:\n most_frequent(vector).to_csv(path_or_buf=\"top{}_vectorfile.csv\".format(args.top))\n\n elif args.top and args.tfidf and not args.svd:\n tfidf_transform(most_frequent(vector)).to_csv(path_or_buf=\"tfidf_top{}.csv\".format(args.top))\n\n elif args.top and args.tfidf and args.svd:\n svd_transform(tfidf_transform(most_frequent(vector)), indexes).to_csv(path_or_buf=\"svd{}_tfidf_topn.csv\".format(args.svd))\n\n elif args.tfidf and not args.top and not args.svd:\n tfidf_transform(vector).to_csv(path_or_buf=\"tfidf.csv\")\n\n elif args.svd and not args.top and not args.tfidf:\n svd_transform(vector, indexes).to_csv(path_or_buf=\"svd{}_vector.csv\".format(args.svd))\n\n elif args.tfidf and args.svd and not args.top:\n svd_transform(tfidf_transform(vector), indexes).to_csv(path_or_buf=\"svd{}_tfidf.csv\".format(args.svd))\n\n else:\n vector.to_csv(path_or_buf=\"vectorfile.csv\")", "def generate_report(self, output_path):\n with open(output_path, 'w', newline='', encoding=\"utf-8\") as csv_fd:\n writer = csv.writer(csv_fd, quoting=csv.QUOTE_NONNUMERIC, doublequote=False, escapechar=\"\\\\\")\n writer.writerow([\"category\", \"level\", \"description\", \"method\", \"parameter\", \"url\", \"body\"])\n writer.writerows(self._vulns)\n writer.writerows(self._anomalies)\n writer.writerows(self._additionals)", "def build_report(rows):\n\n outfile = NamedTemporaryFile(suffix='.csv', delete=False)\n\n with open(outfile.name, 'wb') as csvfile:\n writer = csv.writer(csvfile, delimiter='\\t',\n quotechar='|', quoting=csv.QUOTE_MINIMAL)\n writer.writerow(['Column #1', 'Column #2', 'Column #3'])\n for i in range(int(rows)):\n writer.writerow(['Row #%d' % i, 'from task', 'build_report'])\n\n outfile.close()\n return outfile.name", "def write_csv(self):\n with open(paths.CSV_FILE, 'w', newline='') as csv_file:\n writer = csv.writer(csv_file)\n assg = AssignmentConfig().get_assignment()\n writer.writerow([\"Student\"] + assg.get_test_list() + assg.get_programs_list() +\n [\"normalised_test_score\"] + [\"normalised_prog_score\"] + [\"total\"] + [\"total_rounded\"])\n\n for (submitter, submitter_data) in sorted(self.snapshot['results'].items()):\n total_score = submitter_data[\"normalised_test_score\"] + submitter_data[\"normalised_prog_score\"]\n total_rounded = round(total_score * 2) / 2 # total score rounded to nearest 0.5\n writer.writerow([submitter] +\n [submitter_data[\"tests\"][test] for test in sorted(submitter_data[\"tests\"])] +\n [submitter_data[\"progs\"][prog] for prog in sorted(submitter_data[\"progs\"])] +\n [submitter_data[\"normalised_test_score\"]] +\n [submitter_data[\"normalised_prog_score\"]] +\n [round(total_score, 2)] +\n [total_rounded])", "def create_csv(self):\n try:\n # Convert List of Lists to DataFrame and write it to a CSV\n pd.DataFrame(self.data, columns=self.header) \\\n .to_csv(os.path.join(self.file_path, self.file_name), index=False)\n self.successful_run = True\n except:\n # TODO create Exception Handling\n raise", "def write_solution(mm):\n\n m = mm.model\n\n solution_file = \"{0}_sol.csv\".format(mm.filename)\n\n harv_data = []\n harv_data.append([\"Harvest data\"])\n harv_data.append([\"Species\", \"Region\", \"Period\", \"Value\"])\n # write harv variable solution values\n harv = pg.get_variables(m, \"harv\")\n for h in harv:\n name = h.varName.split(\",\")\n species = name[0].split(\"[\")[1]\n region = name[1]\n period = name[-1][:-1]\n harv_data.append(\n [species, region, period, h.X])\n\n age_data = []\n age_data.append([\"Age data\"])\n age_data.append([\"Region\", \"Period\", \"Value\"])\n age = pg.get_variables(m, \"age\")\n for a in age:\n name = a.varName.split(\",\")\n region = name[0].split(\"[\")[1]\n period = name[-1][:-1]\n age_data.append(\n [region, period, a.X])\n\n with open(solution_file, \"w+\") as wrf:\n wf = csv.writer(wrf)\n wf.writerows(harv_data)\n wf.writerows(age_data)", "def make_nptabel_csv(obs_id, module, qa_dir, output_path=''):\n\n logger.info(\n \"Reading param information for {0} of {1}\".format(module, obs_id))\n summary_data = extract_all_beams(obs_id, module, qa_dir)\n logger.info(\n \"Reading param information for {0} of {1}... Done\".format(module, obs_id))\n\n i = 0\n if module == 'transfer':\n while len(summary_data[i]) <= 1:\n i += 1\n if len(summary_data[i]) > 1:\n break\n else:\n while len(summary_data[i]) <= 2:\n i += 1\n if len(summary_data[i]) > 2:\n break\n\n csv_columns = summary_data[i].keys()\n\n csv_columns.sort()\n dict_data = summary_data\n\n # save the file\n if output_path == '':\n csv_file = str(obs_id)+\"_\"+str(module)+\"_summary.csv\"\n else:\n csv_file = os.path.join(output_path, str(\n obs_id)+\"_\"+str(module)+\"_summary.csv\")\n\n try:\n with open(csv_file, 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\n writer.writeheader()\n for data in dict_data:\n writer.writerow(data)\n except Exception as e:\n logger.warning(\"Creating file {} failed\".format(csv_file))\n logger.exception(e)\n\n # print(\"Created file: \"+str(obs_id)+\"_\"+str(module)+\"_summary.csv\")\n logger.info(\"Creating file: {} ... Done\".format(csv_file))", "def write_solution(n,solution, output_file=\"out.csv\", delimiter=','):\n to_print = solution[int(n):-2]\n with open(output_file,'w') as _file:\n for i in range(len(to_print)):\n s = str(i+1) + delimiter + str(to_print[i]) + str(\"\\n\")\n _file.write(s)\n _file.close()", "def to_csv(self):\n if not self._fitted:\n self.fit()\n #self._message(\"Saving results into a csv (comma separated values) file.\")\n v=np.array([list(self.initialConcentration.values()),\n list(self.fitting_error.values()),\n list(self.k.values()),\n list(self.Fb.values()),\n list(self.slope.values())]).T\n k=list(self.initialConcentration.keys())\n d=pd.DataFrame(v,columns=['Initial Concentration','Fitting Error','k','Fb','Slope'],index=k)\n fn=get_valid_fname(self.ID)\n self.csvname=\"%s_initial_concentrations.csv\"%(fn)\n self.fullcsvname=\"%s/%s_initial_concentrations.csv\"%(self.info['resultsdir'],fn)\n self.info['csvname_initialConcentration']=self.csvname\n print(self.csvname)\n d.to_csv('%s/%s'%(self.info['resultsdir'],self.csvname))", "def __create_output_csv(self, df, score_list, elapsed_list):\n df['Similar']=score_list\n df['Elapsed']=elapsed_list\n df.to_csv('Output.csv',index=False)\n return df", "def _save_results(projects, year):\n titles = [p[0] for p in projects]\n descriptions = [p[1] for p in projects]\n students = [p[2] for p in projects]\n orgs = [p[3] for p in projects]\n rows = zip(titles, descriptions, students, orgs)\n\n with open('data/'+year+'.csv', 'wb') as f:\n writer = csv.writer(f)\n for row in rows:\n writer.writerow(row)", "def _write_csv(self):\n\n # add the label to the header\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self.header.append('Date')\n else:\n self.header.append('sample id')\n\n key_list = []\n\n for i, cube in enumerate(self.cube_list):\n if self.input_data.get_value(InputType.TIME_PERIOD) == 'all':\n self._write_sample_with_date(cube, i, key_list)\n else:\n self._write_sample(cube, i, key_list)\n\n output_data_file_path = self._get_full_file_name()\n self._write_data_dict(output_data_file_path, key_list)\n\n return [output_data_file_path]", "def file(self):\n result = []\n completePath = CompletePath(self.path, self.filename) \n with open(completePath.path(), 'w', newline='') as csvfile:\n fieldnames = ['Activity', 'Points']\n writer = csv.DictWriter(csvfile, fieldnames = fieldnames)\n writer.writeheader()\n for i in range ( len( self.groupPriority.rows() ) ):\n tmp = self.groupPriority.rows()[i]\n self.log.info ( \"FinalCSV\", \"file\",\"data {0},{1}\".format( tmp.activity(), tmp.points() ) )\n writer.writerow({'Activity': tmp.activity(), 'Points': tmp.points()})\n self.log.info(\"FinalCSV\", \"file\", \"Elaborated file: {0}\".format ( completePath.path() ) )", "def create_model_csv(self):\n\n self.model_df.to_csv(self.model_output_file)", "def generate_csv(lists, output_file):\n if os.path.isfile(output_file):\n with open(output_file, 'a') as file:\n dataset = tablib.Dataset()\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n file.write(dataset.csv)\n else:\n with open(output_file, 'w+') as fp:\n dataset = tablib.Dataset(headers=['Original ASIN', 'Associated ASIN', 'Title', 'Price', 'Currency Code', 'Relationship'])\n for l in lists:\n dataset.append([l['Original ASIN'], l['Associated ASIN'], l['Title'], l['Price'], l['Currency Code'], l['Relationship']])\n fp.writelines(dataset.csv)", "def outputFunc(filename, resultList):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n for i in range(len(resultList)):\n print resultList[0]\n writer.writerow(resultList[i])\n \n finally:\n f.close()", "def outputFunc(filename, parks,roading,private):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n writer.writerow(days)\n writer.writerow(parks)\n writer.writerow(roading)\n writer.writerow(private)\n finally:\n f.close()", "def generate_csv(results, keys, options):\n if results and keys:\n with open(options.output_file, mode=fd_write_options) as fd_output:\n spamwriter = csv.writer(fd_output, delimiter=options.delimiter, quoting=csv.QUOTE_ALL, lineterminator='\\n')\n \n if not(options.skip_header):\n spamwriter.writerow(keys)\n \n for group in results:\n output_line = []\n \n for key in keys:\n if key in group.keys():\n if \"member\" == key:\n output_line.append(\"\\n\".join(group[key].split(\" \")))\n else:\n output_line.append(group[key])\n else:\n output_line.append('')\n \n spamwriter.writerow(output_line)\n if options.newline:\n spamwriter.writerow('')\n \n fd_output.close()\n \n return None", "def generate_csv(inf, outf):\n o = csv.writer(outf)\n o.writerow(COLUMNS)\n for row in reformat_data(inf):\n o.writerow([inf.name] + row)", "def _csvWriter(self):\r\n # Initialize Header\r\n table = []\r\n voltageRow = []\r\n for i in range(len(self._voltages)):\r\n voltageRow.append(self._voltages[i][0])\r\n voltageRow.append(\" \")\r\n if self._vna.isTwoComponents():\r\n voltageRow.append(\" \")\r\n table.append(voltageRow)\r\n \r\n # Fill table with data\r\n # if self._vna.isTwoComponents():\r\n # for i in range(len(self._frequency[0])):\r\n # row = []\r\n # for j in range(len(self._frequency)):\r\n # row.append(self._frequency[j][i])\r\n # row.append(self._intensity[j][2*i])\r\n # row.append(self._intensity[j][2*i + 1])\r\n # table.append(row)\r\n # else: \r\n for i in range(len(self._frequency[0])):\r\n row = []\r\n for j in range(len(self._frequency)):\r\n row.append(self._frequency[j][i])\r\n row.append(self._intensity[j][i])\r\n table.append(row)\r\n\r\n # Write to CSV\r\n filename = 'CSVs/' + self._vna.getDateFormatted() + '.csv'\r\n with open(filename, 'w', newline='') as csvfile:\r\n dataWriter = csv.writer(csvfile, delimiter=',', quoting=csv.QUOTE_MINIMAL)\r\n for i in range(len(table)):\r\n dataWriter.writerow(table[i])", "def generate_csv_output(payslip_data):\n payslip_output = StringIO(newline=None)\n csvFileWriter = csv.writer(payslip_output, delimiter=',')\n\n data = [['Full Name', 'Payment Period', 'Gross Income',\n 'Income Tax', 'Net Income', 'Super']]\n\n for employee in payslip_data:\n data.append([\n employee['full_name'],\n employee['payment_period'],\n str(employee['gross_income']),\n str(employee['income_tax']),\n str(employee['net_income']),\n str(employee['super_amount'])\n ])\n\n csvFileWriter.writerows(data)\n\n return payslip_output", "def generate_csv(table, header):\n with open(\"%s.csv\" % header, \"w\") as csvfile:\n for i in range(len(table)):\n for j in range(len(table[i])):\n if j != len(table[i])-1:\n tmp = table[i][j] + \",\"\n else:\n tmp = table[i][j] + \"\\n\"\n csvfile.write(tmp)", "def outputLevelCsv(self):\n # extract level information from result info\n extract_level = []\n extract_level = [item for item in self._result_info if self._result_info[2][0:5]=='LEVEL']\n if extract_level == []:\n print('No Result of LEVEL')\n return None\n # copy need information\n for i, item in enumerate(extract_level):\n self._level_csv_list[i][0] = item[1]\n self._level_csv_list[i][1] = item[2].split('-')[1]\n self._level_csv_list[i][2] = item[2].split('-')[2]\n self._level_csv_list[i][3] = item[4]\n # set csv file name\n csv_file_name = self._filename.rsplit('.', 1)[1] + '.csv'\n # write csv\n with open(csv_file_name, 'w') as csv_file:\n writer = csv.writer(csv_file)\n writer.writerows(self._level_csv_list)", "def write_output(self) -> None:\n self.home.round(2).to_csv(var.indicators_base_cumsum + \"home_\" + str(self.year) + \".csv\")\n self.away.round(2).to_csv(var.indicators_base_cumsum + \"away_\" + str(self.year) + \".csv\")", "def calculated_data_to_csv(transmissivity_calculated, conductivity_calculated,\n confirmed_wells, feature_class_name):\n utm_e = [i[0][0] for i in confirmed_wells]\n utm_n = [i[0][1] for i in confirmed_wells]\n np.set_printoptions(suppress=True) #removes scientific notation\n location = np.array([utm_e, utm_n])\n location = location.transpose()\n transmissivity_calculated = np.array(transmissivity_calculated)\n conductivity_calculated = np.array(conductivity_calculated)\n joined_data = np.concatenate((location, transmissivity_calculated, conductivity_calculated), axis = 1)\n my_df = pd.DataFrame(joined_data)\n header_list = ['UTME', 'UTMN', 'T_min', 'T_raw', 'T_max', 'K_min', 'K_raw', 'K_max', 'Well ID']\n raw_csv_name = f\"{feature_class_name}.csv\"\n my_df.to_csv(raw_csv_name, index = False, header = header_list)\n return my_df, raw_csv_name" ]
[ "0.6973687", "0.68380153", "0.6710647", "0.65229183", "0.6497935", "0.6473903", "0.6452836", "0.64365023", "0.6412565", "0.6315289", "0.6276529", "0.6232242", "0.6230959", "0.62285423", "0.61607337", "0.61379147", "0.6136664", "0.6123499", "0.6109184", "0.609388", "0.6078443", "0.6076977", "0.6073963", "0.6070604", "0.6048073", "0.6035948", "0.603509", "0.6034474", "0.60054535", "0.59891146" ]
0.6963844
1
Build a cached dict with settings.INSTALLED_APPS as keys and the 'templates' directory of each application as values.
def app_templates_dirs(self): app_templates_dirs = {} for app in settings.INSTALLED_APPS: if not six.PY3: fs_encoding = (sys.getfilesystemencoding() or sys.getdefaultencoding()) try: mod = import_module(app) except ImportError as e: # pragma: no cover raise ImproperlyConfigured( # pragma: no cover 'ImportError %s: %s' % ( app, e.args[0])) templates_dir = os.path.join(os.path.dirname(mod.__file__), 'templates') if os.path.isdir(templates_dir): if not six.PY3: templates_dir = templates_dir.decode(fs_encoding) app_templates_dirs[app] = templates_dir if '.' in app: app_templates_dirs[app.split('.')[-1]] = templates_dir return app_templates_dirs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_templates_dirs(self): \n from pkg_resources import resource_filename\n return [ resource_filename(__name__, 'templates') ]\n # return []", "def get_local_app_list():\n\t\tapp_list = [\n\t\t\t{\n\t\t\t\t'name': app,\n\t\t\t\t'dir': os.path.dirname(os.path.abspath(import_module(app).__file__)),\n\t\t\t}\n\t\t\tfor app in settings.INSTALLED_APPS\n\t\t]\n\t\treturn [app for app in app_list if settings.BASE_DIR in app['dir']]", "def _load_installed_applications(self):\n for application in self.settings.get('apps', None) or []:\n path = None\n if isinstance(application, six.string_types):\n application_name = application\n if application.startswith('gordon.contrib.'):\n app_parts = application.split('.')\n path = os.path.join(self.root, 'contrib', app_parts[-1])\n application_name = '_'.join(app_parts[1:])\n settings = {}\n elif isinstance(application, dict):\n application_name = application.keys()[0]\n settings = application.values()[0]\n else:\n raise exceptions.InvalidAppFormatError(application)\n\n with indent(2):\n self.puts(colored.cyan(\"{}:\".format(application_name)))\n\n self.add_application(\n App(\n name=application_name,\n settings=settings,\n project=self,\n path=path\n )\n )", "def get_templates_dirs(self):\n\t\tfrom pkg_resources import resource_filename\n\t\treturn [resource_filename(__name__, 'templates')]", "def _make_mako_template_dirs(settings):\n if settings.ENABLE_COMPREHENSIVE_THEMING:\n themes_dirs = get_theme_base_dirs_from_settings(settings.COMPREHENSIVE_THEME_DIRS)\n for theme in get_themes_unchecked(themes_dirs, settings.PROJECT_ROOT):\n if theme.themes_base_dir not in settings.MAKO_TEMPLATE_DIRS_BASE:\n settings.MAKO_TEMPLATE_DIRS_BASE.insert(0, theme.themes_base_dir)\n return settings.MAKO_TEMPLATE_DIRS_BASE", "def get_templates_dirs(self):\n from pkg_resources import resource_filename\n return [resource_filename(__name__, 'templates')]", "def get_django_template_dirs():\n template_dirs = []\n if 'django.template.loaders.filesystem.load_template_source' in\\\n settings.TEMPLATE_LOADERS or\\\n 'django.template.loaders.filesystem.Loader' in\\\n settings.TEMPLATE_LOADERS:\n template_dirs.extend(settings.TEMPLATE_DIRS)\n if 'django.template.loaders.app_directories.load_template_source' in\\\n settings.TEMPLATE_LOADERS or\\\n 'django.template.loaders.app_directories.Loader' in\\\n settings.TEMPLATE_LOADERS:\n from django.template.loaders.app_directories import app_template_dirs\n template_dirs.extend(app_template_dirs)\n return template_dirs", "def load_template_files(self):\n templates = dict()\n template_path = settings.CUSTOM_VERTO_TEMPLATES\n templates.update(self.read_template_files(template_path))\n if hasattr(self, \"extra_converter_templates_directory\"):\n directory = self.extra_converter_templates_directory\n template_path = os.path.join(template_path, directory)\n templates.update(self.read_template_files(template_path))\n return templates", "def test_app_loader(self):\n\n with mock.patch('template_tree.template_finder.apps', new=self.mock_apps):\n self.assertEqual(\n list(template_finder.templates_for_engine(self.engine_config)),\n [\n ('abc.html', '/tmp/project/project/templates/abc.html'),\n ('my_app/def.html', '/tmp/project/my_app/templates/my_app/def.html'),\n ('your_app/def.html', '/tmp/project/your_app/templates/your_app/def.html'),\n ]\n )", "def test_include_admin_apps(self):\n mock_apps = Apps(collections.OrderedDict([\n ('project', AppConfig('/tmp/project/project/')),\n ('admin', AppConfig('/tmp/project/my_app/')),\n ('your_app', AppConfig('/tmp/project/your_app/'))\n ]))\n\n with mock.patch('template_tree.template_finder.apps', new=mock_apps):\n self.assertEqual(\n list(\n template_finder.templates_for_engine(\n self.engine_config,\n []\n )\n ),\n [\n ('abc.html', '/tmp/project/project/templates/abc.html'),\n ('my_app/def.html', '/tmp/project/my_app/templates/my_app/def.html'),\n ('your_app/def.html', '/tmp/project/your_app/templates/your_app/def.html'),\n ]\n )", "def _GetDjangoAppDirs(self):\r\n \r\n manage_file, settings_file = self._FindKeyFiles()\r\n if manage_file is None:\r\n return None\r\n\r\n dirname = os.path.dirname(manage_file)\r\n files = os.listdir(dirname)\r\n appnames = []\r\n for fn in files:\r\n dn = os.path.join(dirname, fn)\r\n if os.path.isdir(dn) and os.path.isfile(os.path.join(dn, 'models.py')) and \\\r\n os.path.isfile(os.path.join(dn, 'views.py')):\r\n appnames.append(dn)\r\n \r\n return appnames", "def get_templates_dirs(self):\n return [resource_filename(__name__, 'templates')]", "def get_templates_dirs(self):\n return [resource_filename(__name__, 'templates')]", "def theme_template_base(context):\n template_path = os.path.join(settings.THEME_NAME, 'theme.html')\n return {'THEME_TEMPLATE': template_path}", "def get_installed_models():\n global _installed_models_cache\n if _installed_models_cache is not None:\n return _installed_models_cache\n _installed_models_cache = []\n for a in settings.INSTALLED_APPS:\n try:\n _installed_models_cache.append(__import__(a + '.models', '', '', ['']))\n except ImportError:\n pass\n return _installed_models_cache", "def get_installed_apps():\n installed_apps = []\n checked = set()\n for app in settings.INSTALLED_APPS:\n if not app.startswith('django.') and not app in checked:\n mod = import_module(app)\n checked.add(app)\n if exists(mod.__file__) and isdir(dirname(mod.__file__)):\n appdir = dirname(mod.__file__)\n installed_apps.append((appdir, mod, mod.__name__.split('.')[-1]))\n return installed_apps", "def test_default_app_exclusion(self):\n mock_apps = Apps(collections.OrderedDict([\n ('project', AppConfig('/tmp/project/project/')),\n ('admin', AppConfig('/tmp/project/my_app/')),\n ('your_app', AppConfig('/tmp/project/your_app/'))\n ]))\n\n with mock.patch('template_tree.template_finder.apps', new=mock_apps):\n self.assertEqual(\n list(\n template_finder.templates_for_engine(self.engine_config)\n ),\n [\n ('abc.html', '/tmp/project/project/templates/abc.html'),\n ('your_app/def.html', '/tmp/project/your_app/templates/your_app/def.html'),\n ]\n )", "def load_app_manifests(self):\n self.app_manifests = []\n apps_lib_path = os.path.join(self.apps_dir_path, \"lib\")\n for app_dir in os.listdir(apps_lib_path):\n if app_dir not in (\"__init__.py\", \"__init__.pyc\"):\n if app_dir.find(\"_v\") > 1:\n app_name = app_dir[:app_dir.find(\"_v\")]\n self.app_manifests.append(json.load(file(os.path.join(self.apps_dir_path, 'lib', app_dir, \"manifest.json\"))))\n log.info(\"Manifest for %s app was loaded\" % (app_dir))\n else:\n log.info(\"Directory %s will be skipped from app loader . Doesn't match naming convention .\" % app_dir)", "def get_apps(self):\n return self.apps", "def run():\r\n template_locations = settings.MAKO_TEMPLATES\r\n for namespace, directories in template_locations.items():\r\n clear_lookups(namespace)\r\n for directory in directories:\r\n add_lookup(namespace, directory)", "def get_template_data(self) -> dict:\n template_data = self._get_template_data()\n\n @dataclass\n class FileEntry:\n \"\"\"Provides an entry into manifest object.\"\"\"\n\n name: str\n size: str\n md5: Optional[str]\n\n template_data[\"resource_files\"] = [\n FileEntry(entry.name, convert_size(entry.size), entry.md5)\n for entry in self.resource.get_manifest().entries.values()\n if not entry.name.startswith(\"statistics\")\n and entry.name != \"index.html\"]\n template_data[\"resource_files\"].append(\n FileEntry(\"statistics/\", \"\", \"\"))\n return template_data", "def _build_app_dict(self, request, label=None):\n app_dict = {}\n\n if label:\n models = {\n m: m_a for m, m_a in self._registry.items()\n if m._meta.app_label == label\n }\n else:\n models = self._registry\n\n for model, model_admin in models.items():\n app_label = model._meta.app_label\n\n has_module_perms = model_admin.has_module_permission(request)\n if not has_module_perms:\n continue\n\n perms = model_admin.get_model_perms(request)\n\n # Check whether user has any perm for this module.\n # If so, add the module to the model_list.\n if True not in perms.values():\n continue\n\n info = (app_label, model._meta.model_name)\n model_dict = {\n 'order': model._meta.order if hasattr(model._meta, 'order') else 99,\n 'name': capfirst(model._meta.verbose_name_plural),\n 'object_name': model._meta.object_name,\n 'perms': perms,\n 'admin_url': None,\n 'add_url': None,\n }\n if perms.get('change') or perms.get('view'):\n model_dict['view_only'] = not perms.get('change')\n try:\n model_dict['admin_url'] = reverse('admin:%s_%s_changelist' % info, current_app=self.name)\n except NoReverseMatch:\n pass\n if perms.get('add'):\n try:\n model_dict['add_url'] = reverse('admin:%s_%s_add' % info, current_app=self.name)\n except NoReverseMatch:\n pass\n\n if app_label in app_dict:\n app_dict[app_label]['models'].append(model_dict)\n else:\n config = apps.get_app_config(app_label)\n app_dict[app_label] = {\n 'icon': config.icon if hasattr(config, 'icon') else 'fa fa-circle-o',\n 'order': config.order if hasattr(config, 'order') else 99,\n 'name': config.verbose_name,\n 'app_label': app_label,\n 'app_url': reverse(\n 'admin:app_list',\n kwargs={'app_label': app_label},\n current_app=self.name,\n ),\n 'has_module_perms': has_module_perms,\n 'models': [model_dict],\n }\n\n if label:\n return app_dict.get(label)\n return app_dict", "def get_template_keys(self, template_name, template_dirs=None):\n if not template_dirs:\n template_dirs = settings.TEMPLATE_DIRS\n for template_dir in template_dirs:\n if template_dir.endswith(\"/\"):\n template_dir = template_dir[:-1]\n yield \"/\".join([template_dir, template_name])", "def get_all_theme_template_dirs():\n themes = get_themes()\n template_paths = list()\n\n for theme in themes:\n template_paths.extend(theme.template_dirs)\n\n return template_paths", "def applicationsdetails():\n appdicts = db.hgetall('applications')\n finaldict = OrderedDict()\n for appname in sorted(appdicts):\n instances = json.loads(appdicts.get(appname))\n instance_map = OrderedDict()\n for key in sorted(instances):\n instance_map.__setitem__(key,instances.get(key))\n finaldict.__setitem__(appname,instance_map)\n return render_template('robots.html', appdicts=finaldict)", "def get_apps(self) -> List[str]:\n return list(self.config[\"apps\"].keys())", "def _FindKeyFiles(self):\r\n \r\n if self.__fCachedFiles is not None:\r\n return self.__fCachedFiles\r\n \r\n app = wingapi.gApplication\r\n proj = app.GetProject()\r\n files = proj.GetAllFiles()\r\n manage_files = []\r\n settings_files = []\r\n for fn in files:\r\n if os.path.basename(fn) == 'manage.py' and not os.path.dirname(fn).endswith('project_template') and os.path.isfile(fn):\r\n manage_files.append(fn)\r\n elif os.path.basename(fn) == 'settings.py' and not os.path.dirname(fn).endswith('project_template') and os.path.isfile(fn):\r\n settings_files.append(fn)\r\n\r\n pairs = []\r\n for manage_file in manage_files:\r\n for settings_file in settings_files:\r\n manage_dir = os.path.dirname(manage_file)\r\n settings_dir = os.path.dirname(settings_file)\r\n if manage_dir == settings_dir:\r\n pairs.append((manage_file, settings_file))\r\n if len(pairs) > 1:\r\n app.SetStatusMessage(\"Warning: Multiple manage.py/settings.py pairs found in project\")\r\n \r\n if len(pairs) > 0:\r\n self.__fCachedFiles = pairs[0]\r\n else:\r\n self.__fCachedFiles = (None, None)\r\n \r\n return self.__fCachedFiles", "def sync_apps(self):\n pass", "def refresh(self):\n self._themes = {}\n for theme in starchain(ldr(self.app) for ldr in self.loaders):\n if self.valid_app_id(theme.application):\n self.themes[theme.identifier] = theme\n self.register_theme_assets()", "def get_app_list(self):\n return self.get_setting('applications', 'installed_apps')" ]
[ "0.62339866", "0.6075647", "0.60284287", "0.60173535", "0.6008311", "0.59243536", "0.5887615", "0.58762103", "0.58131", "0.57841635", "0.57560146", "0.5703265", "0.5703265", "0.56275016", "0.5590184", "0.5588373", "0.55697465", "0.55501664", "0.55084664", "0.5405607", "0.53680766", "0.53649265", "0.5350998", "0.5349395", "0.53455675", "0.533838", "0.5315657", "0.5305269", "0.5297924", "0.5292161" ]
0.7934003
0
Validates that the given path looks like a valid chip repository checkout.
def ValidateRepoPath(context, parameter, value): if value.startswith('/TEST/'): # Hackish command to allow for unit testing return value for name in ['BUILD.gn', '.gn', os.path.join('scripts', 'bootstrap.sh')]: expected_file = os.path.join(value, name) if not os.path.exists(expected_file): raise click.BadParameter( ("'%s' does not look like a valid repository path: " "%s not found.") % (value, expected_file)) return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def path_validate(path):\n # functionality to be added later\n return path", "def validpath(self, path):\n root = self.realpath(self.root)\n path = self.realpath(path)\n if not self.root.endswith(os.sep):\n root = self.root + os.sep\n if not path.endswith(os.sep):\n path = path + os.sep\n if path[0:len(root)] == root:\n return True\n return False", "def _validate_path(self, path: str, is_file: bool) -> bool:\n is_valid_path = True\n if is_file and not os.path.isfile(path):\n is_valid_path = False\n elif not is_file and not os.path.isdir(path):\n is_valid_path = False\n if is_valid_path:\n logging.info('github_source_interceptor: Located path: ' + path)\n else:\n logging.error('github_source_interceptor: Could not locate path: ' + path)\n\n return is_valid_path", "def verify_path(path):\n if path is None:\n sys.exit('Program terminated. You must specify a correct path.')\n path = Path(path)\n assert path.exists(), f'The specified path was not found: {path}.'\n return path", "def is_valid(path):\n return (\n bool(path)\n and os.path.isabs(path)\n and os.path.exists(path)\n and (not is_apple() or path.endswith(\".dylib\"))\n )", "def verify_git_clean(path):\n\n sys.stdout.write(\" - Checking for uncommitted changes:\")\n result = run_in_component(path, ['git', 'status', '--porcelain=v1'])\n\n lines = [x for x in result.splitlines() if len(x) > 0]\n\n if len(lines) == 0:\n print(\" OKAY\")\n return\n\n print(\" FAILED\")\n\n raise GenericError(\"There are uncommitted changes in the component, please commit or stash them\")", "def check_valid_path(path):\n\n path = os.path.normpath(path)\n if not os.path.exists(path):\n print(f\"{path} doesn't exist\")\n print('Code execution exit')\n sys.exit()", "def checkGit(directory):", "def test_worktree_does_checkout(repository: Repository, path: Path) -> None:\n updatefile(path)\n branch = repository.heads.create(\"branch\")\n\n with repository.worktree(branch) as worktree:\n assert (worktree.path / path.name).is_file()", "def ValidatePath(self, root_path: str) -> bool:\n if 'gold' in root_path:\n return True\n\n return False", "def test_path(tmp_path: Path) -> None:\n path = tmp_path / \"repository\"\n repository = Repository.init(path)\n assert path == repository.path", "def validate_short_path(short_path):", "def is_valid_path(input_path):\n if not os.path.exists(input_path):\n print('\\'{}\\' is not a valid path.'.format(input_path))\n exit(1)\n return input_path", "def svn_fs_check_path(*args):\r\n return _fs.svn_fs_check_path(*args)", "def ValidatePath(self, root_path: str) -> bool:\n if 'silver' in root_path:\n return True\n\n return False", "def valid_tpkg_file(self, path):\n\n\t\tprint(self.config[\"daemon\"][\"rootdir\"] + path)\n\t\tif os.path.exists(self.config[\"daemon\"][\"rootdir\"] + \"/\" + path):\n\t\t\treturn self.fetch_remote_hashcode(path) == self.fetch_local_hashcode(path)\n\t\telse:\n\t\t\tprint(\"Package: \" + path + \" has not been downloaded.\");\n\t\treturn False", "def _can_checkout(wit_path) -> bool:\n\n current_id = _get_head(wit_path)\n changes_to_be_committed = _return_as_string(_get_changes_to_be_committed, wit_path, current_id)\n changes_not_staged_for_commit = _return_as_string(_get_changes_not_staged_for_commit, wit_path)\n if changes_to_be_committed + changes_not_staged_for_commit == '':\n return True\n logging.error(FileNotSavedError('Some files are not saved. Try \"status\" command to view them.'))\n return False", "def test_verify_path_7(self):\n result = basic.verify_path(str(self.test_directory1), \"invalid\")\n self.assertFalse(result)", "def validateObjectPath(p):\n if not p.startswith('/'):\n raise MarshallingError('Object paths must begin with a \"/\"')\n if len(p) > 1 and p[-1] == '/':\n raise MarshallingError('Object paths may not end with \"/\"')\n if '//' in p:\n raise MarshallingError('\"//\" is not allowed in object paths\"')\n if invalid_obj_path_re.search(p):\n raise MarshallingError('Invalid characters contained in object path')", "def is_valid_path(path):\n if not os.path.exists(path):\n raise IOError(\"{path} is not a valid path\".format(path=path))\n if not os.access(path, os.R_OK):\n raise OSError(\"{path} is not a readable path\".format(path=path))", "def check_repo(self):\n if not os.path.exists(self.path):\n log.error(\"no dots repository found at '{}'\".format(self.path))\n if not os.path.exists(self.files_path):\n log.error(\"corrupted repository, the 'files' subfolder is missing\")\n if not os.path.exists(self.enc_files_path):\n log.error(\"corrupted repository, the 'encrypted' subfolder is missing\")\n if not os.path.exists(os.path.join(self.path, '.git')):\n log.error(\"corrupted repository, folder exists but is not versioned\")\n self.git_repo = Repo(self.path)", "def validate_path_cmd(txt):\n \n nf = \"([-+]?([0-9]*\\.[0-9]+|[0-9]+))\" # Expression matching a number\n \n # Expressions for validating svg path commands (the \"d\" attribute)\n expr = (\n \"^[lLmMtT](\\s+%s){2}$\" % (nf), # Line, move and smooth quadratic beizer commands\n \"^[hHvV]\\s+%s$\" % (nf), # Horisontal and vertical commands\n \"^[cC](\\s+%s){6}$\" % (nf), # Cubic beizer command\n \"^[qQsS](\\s+%s){4}$\" % (nf), # Quadratic and Smooth beizer command\n \"^[aA](\\s+%s){3}\\s[0-1]\\s[0-1](\\s+%s){2}$\" % (nf,nf), # Eliptical arc command\n \"^[zZ]$\") # Close path command\n\n for val in expr:\n m = re.match(val, txt)\n if m:\n return m.group(0)\n \n return False", "def check_path(self,path):\r\n self.__path=path\r\n list_of_file_names={'Structure.txt','test.csv','train.csv'}\r\n if list_of_file_names <= set(os.listdir(self.__path)):#checking if the path have all the requierd files\r\n if os.path.getsize(os.path.join(self.__path,'Structure.txt')) > 0 and os.path.getsize(os.path.join(self.__path,'test.csv')) >0 and os.path.getsize(os.path.join(self.__path,'train.csv'))>0:#checking if the files are not empty\r\n self.view.Build_Button.config(state='active')\r\n self.view.Bins_Entry.configure(state='normal',text='Enter number of bins',font=(\"Calibri\",12),justify=\"center\",exportselection=0)\r\n #sending to the view the error messages\r\n else:\r\n self.view.file_error_handling(\"\",\"One or more of the files is empty\")\r\n else:\r\n self.view.file_error_handling(\"required files are missing\", \"The directory must have Structure.txt,test.csv and train.csv files\")", "def validate_path(self, path):\n return True # Allow anything in path, even spaces\n # pattern = r'(/?[a-zA-Z_][a-zA-Z0-9_]*)+$' # require start with letter\n # pattern = r'(/?[a-zA-Z0-9_]*)+$' # allow start with number\n pattern = r'^([^ ]+)$' # allow anything except spaces\n if path == '' or re.match(pattern, path):\n return\n raise ValueError(\"Invalid path (spaces not allowed):\\n'%s'\" % path)", "def check_path(path, diagnostic):\n if not os.path.exists(path):\n print(f\"Could not find {path}. {diagnostic}\")\n sys.exit(1)", "def test_worktree_no_checkout(repository: Repository, path: Path) -> None:\n updatefile(path)\n branch = repository.heads.create(\"branch\")\n\n with repository.worktree(branch, checkout=False) as worktree:\n assert not (worktree.path / path.name).is_file()", "def test_save_with_no_raw_file(self) -> None:\n repository = self.create_repository(tool_name='Git')\n\n form = GitTool.create_repository_form(\n repository=repository,\n data={\n 'path': 'https://github.com/reviewboard/reviewboard',\n 'mirror_path': '[email protected]:reviewboard/reviewboard.git',\n })\n form.full_clean()\n\n self.assertFalse(form.is_valid())\n\n manual_url = get_manual_url() + 'admin/configuration/repositories/git/'\n self.assertEqual(form.errors['path'], [\n 'Remote Git repositories cannot be accessed without a Raw File '\n 'URL Mask. See the <a href=\"%s\">documentation</a> for more '\n 'details.'\n % manual_url,\n ])", "def test_save_with_local_path(self) -> None:\n repository = self.create_repository(tool_name='Git')\n\n form = GitTool.create_repository_form(\n repository=repository,\n data={\n 'path': '/opt/repositories/repo1/.git',\n })\n form.full_clean()\n\n self.assertTrue(form.is_valid())\n\n form.save()\n self.assertEqual(repository.path, '/opt/repositories/repo1/.git')", "def test_path_not_repo(folder):\n\n with pytest.raises(ValueError):\n gitb.pull(folder)", "def check_path(filename):\n return not bool(checkPath(filename))" ]
[ "0.57880497", "0.5727862", "0.57209194", "0.5713962", "0.563717", "0.56051886", "0.5598306", "0.55910486", "0.5558308", "0.5513613", "0.5482505", "0.54587203", "0.54361695", "0.5406059", "0.5382245", "0.531769", "0.53093684", "0.5304615", "0.5287788", "0.5281228", "0.52625215", "0.52344", "0.52308214", "0.52084124", "0.5194242", "0.51934767", "0.5185536", "0.51811856", "0.51739264", "0.51661015" ]
0.6077597
0
Turn on/off the joint lock so you can manipulate the WAM by hand.
def set_wam_joint_hold(hold): msg = HoldRequest() msg.hold = hold wam_hold_service = rospy.ServiceProxy('/wam/hold_joint_pos', Hold) try: resp1 = wam_hold_service(msg) except rospy.ServiceException as exc: print("Service did not process request: " + str(exc))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def turn_on(self, **kwargs):\n self.set_graceful_lock(True)\n self.robot.start_cleaning()", "def ToggleLock(self, event):\n pass", "def turn_on(self):\n self._data.homestatus.setroomThermpoint(\n self._data.home_id, self._room_id, STATE_NETATMO_HOME\n )\n self.update_without_throttle = True\n self.schedule_update_ha_state()", "def lockMeshes():\n setLockOnMeshes(2)", "def lock (self):\n self.locked = True\n self._changed = False", "def lightning_turnon(self):\n self.turnOn()", "def unlockMeshes():\n setLockOnMeshes(0)", "def turnOn(self):\n self.off = False\n self.turnOnAnimation()", "def force_switch_on(self):\n self.turn_on_modem()", "def enable_lock(self, lock_on=True):\n if lock_on:\n self.write('ScanM_Mode=2') #Search\n time.sleep(10)\n self.write('ScanM_Mode=3') #Lock, its unclear from manual if\n #this is redundant. i.e. autolocks\n #at end of search\n if not self.query_lock_status():\n raise ac_excepts.CouplingkError('Not meeting threshold power',\n self.enable_lock)\n if not lock_on:\n self.write('ScanM_Mode=0') #Off", "def turn_on(self):\n self._remote.power(1)", "def setLockOnMeshes(lock):\n meshes = getMeshes()\n for mesh in meshes:\n try:\n mesh.overrideEnabled.set(1)\n mesh.overrideDisplayType.set(lock)\n except RuntimeError as error:\n pm.warning('Can\\'t set lock on mesh! ' + str(error))", "def action_lock(self):\n self.state = 'locked'", "def set_lock_status(use_lock):\r\n get_lock.lock_is_enabled = use_lock", "def turn_on(self):\n self._lms.query(self._id, 'power', '1')\n self.update_ha_state()", "def setJ04Zero(self):\n self.robot.set_joint({'j0':0})\n self.robot.set_joint({'j4':0})\n self.robot.save_config()", "def turn_off(self):\n if self._module_type == NA_VALVE:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id,\n self._room_id,\n STATE_NETATMO_MANUAL,\n DEFAULT_MIN_TEMP,\n )\n elif self.hvac_mode != HVAC_MODE_OFF:\n self._data.homestatus.setroomThermpoint(\n self._data.home_id, self._room_id, STATE_NETATMO_OFF\n )\n self.update_without_throttle = True\n self.schedule_update_ha_state()", "def switch_on(self,name):\n self.circles[name].switch_on()\n self.cursor.execute(\"\"\"UPDATE sensors_powersensor SET state=1 WHERE target=%s\"\"\", (name,))", "def turn_on(self):\n self._interrupt_flash()\n if not self.on:\n GPIO.output(self.pin, GPIO.HIGH)\n self.on = True", "def applyLock(self, pkmn):\n pkmn.actionLock = ActionLock(pkmn, \\\n pkmn.lastAction, self.turns-1)", "def turn_on(self):\n self._state = True\n self.write_state(bytes([9]))\n self.schedule_update_ha_state()", "def lock_gate(self):\n self.fsm_gate.clear()", "def f_unlock(self):\n self._locked = False", "def turnOffMotors(self) -> None:\n mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)", "def unlock_door_interlock(self):\n if self.detector_distance_hwobj.getPosition() < 340:\n self.detector_distance_hwobj.move(500)\n gevent.sleep(1)\n\n if not self.use_door_interlock:\n logging.getLogger().info('Door interlock is disabled')\n return\n\n if self.door_interlock_state:\n gevent.spawn(self.unlock_doors_thread)\n else:\n logging.getLogger().info('Door is Interlocked')", "def turn_on(self, **kwargs: Any) -> None:\n self._set_light(ON_STATE)", "def lockAtTarget(self, initial_call):\n if initial_call:\n self.chassis.setBrakeMode()\n if not self.isAligned():\n self.next_state(\"driveToTarget\")\n else:\n self.chassis.stop()", "def turn_on(self):\n GPIO.output(self.gpio, True) # turn on light", "def on(self):\n if not self._is_on:\n self._pwms.enable(self._pin_index, self._frequency)\n self._is_on = True", "def turnOffMotors(self):\n self.mh.getMotor(1).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(2).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(3).run(Adafruit_MotorHAT.RELEASE)\n self.mh.getMotor(4).run(Adafruit_MotorHAT.RELEASE)" ]
[ "0.6700339", "0.6349209", "0.6309134", "0.6272996", "0.61883575", "0.6179182", "0.61776733", "0.61142284", "0.6089046", "0.6087645", "0.60502875", "0.603042", "0.60217047", "0.59520024", "0.59362584", "0.59181535", "0.58999264", "0.5894002", "0.5885579", "0.58486074", "0.5844748", "0.58357406", "0.5835067", "0.5808107", "0.5787806", "0.57860553", "0.5773019", "0.57729745", "0.57703394", "0.57694125" ]
0.63843507
1
Create a trajectory from start_position to end_position. The trajectory is the linear interpolation from start to end. It will last duration_of_trajectory seconds. Be careful that you pick your start/end points such that the hand doesn't turn into the arm.
def create_joint_trajectory(start_position, end_position, duration_of_trajectory, frequency_of_trajectory): frequency_of_ros_messages = frequency_of_trajectory # in Hz. number_of_way_points = duration_of_trajectory * frequency_of_ros_messages number_of_joints = start_position.__len__() trajectory = np.zeros((number_of_joints, number_of_way_points)) for i in xrange(number_of_joints): trajectory[i] = np.linspace(start_position[i], end_position[i], number_of_way_points) trajectory = trajectory.T.copy() vel_lims = np.diff(trajectory, axis=0) #Because this is discrete differentiation, # the last value is missing: len(vel_lims) = len(trajectory) - 1 # so we just repeat the last calculated velocity. vel_lims = np.append(vel_lims, [[x for x in vel_lims[-1,:]]], axis = 0) vel_lims = vel_lims * frequency_of_trajectory vel_lims = np.absolute(vel_lims) if vel_lims.all() > 1.0: raise ValueError("One or more of the values in the specified velocities" "Exceed 1 rad / second. The robot won't like this." "Adjust the trajectory so that each point can be " "reached without exceeding this limit.") return trajectory, vel_lims
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_trajectory(self, NextwpPosition, NextwpOrientation):\n d = np.linalg.norm(self.CurrentPosition - NextwpPosition)\n inter_segment_distance = 1\n self.no_of_segments = 1+int(d//inter_segment_distance)\n \n\n # enter sequence of waypoints: no of points should be self.no_of_segments+1\n x_wp = np.linspace(self.CurrentPosition[0], NextwpPosition[0], self.no_of_segments+1)\n y_wp = np.linspace(self.CurrentPosition[1], NextwpPosition[1], self.no_of_segments+1)\n z_wp = np.linspace(self.CurrentPosition[2], NextwpPosition[2], self.no_of_segments+1)\n \n # add intial and final condiions vel, acc, jerk\n x_ic = np.array([0, 0, 0])\n x_fc = np.array([0, 0, 0])\n x0 = np.array([x_wp[0], x_ic[0], x_ic[1], x_ic[2]])\n xT = np.array([x_wp[-1], x_fc[0], x_fc[1], x_fc[2]])\n\n y_ic = np.array([0, 0, 0])\n y_fc = np.array([0, 0, 0])\n y0 = np.array([y_wp[0], y_ic[0], y_ic[1], y_ic[2]])\n yT = np.array([y_wp[-1], y_fc[0], y_fc[1], y_fc[2]])\n \n z_ic = np.array([0, 0, 0])\n z_fc = np.array([0, 0, 0])\n z0 = np.array([z_wp[0], z_ic[0], z_ic[1], z_ic[2]])\n zT = np.array([z_wp[-1], z_fc[0], z_fc[1], z_fc[2]])\n\n path = [np.sqrt((x_wp[i]-x_wp[i-1])**2 + (y_wp[i]-y_wp[i-1])**2 + (z_wp[i]-z_wp[i-1])**2) for i in range(1, self.no_of_segments+1, 1)]\n\n \n T = []; T.insert(0, 0)\n T.insert(1, T[-1] + path[0]/self.reduced_speed)\n for i in range(1, len(path)-1, 1):\n T.append(T[-1] + path[i]/self.average_speed)\n T.insert(len(T)+1, T[-1]+path[-1]/self.reduced_speed) \n\n\n\n\n #T = []; T.insert(0, 0) # insert 0 at 0 position\n #for i in range(self.no_of_segments): \n # T.append(T[-1]+path[i]/self.average_speed)\n\n r = self.r\n N = 1 + self.N # because number of terms in a polynomial = degree+1\n\n QQ = []; AA_inv = []\n\n for i in range(self.no_of_segments): \n q = self.construct_Q(N, r, T[i], T[i+1])\n a = self.construct_A(N, r, T[i], T[i+1])\n a_inv = scipy.linalg.pinv(a)\n QQ = block_diag(QQ, q)\n AA_inv = block_diag(AA_inv, a_inv)\n \n order = 2*r*self.no_of_segments\n R = np.dot(AA_inv.T, np.dot(QQ, AA_inv))\n \n bx = self.construct_b(x0, xT)\n by = self.construct_b(y0, yT)\n bz = self.construct_b(z0, zT)\n\n m = Model(\"qp\")\n order = 2*r*self.no_of_segments\n dx = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dx\")\n dy = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dy\") \n dz = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dz\") \n\n # making objective using quicksum, takes a lot of time \n #obj1 = quicksum(dx[i] * quicksum(R[i][j] * dx[j] for j in range(order)) for i in range(order))\n #obj2 = quicksum(dy[i] * quicksum(R[i][j] * dy[j] for j in range(order)) for i in range(order))\n #obj3 = quicksum(dz[i] * quicksum(R[i][j] * dz[j] for j in range(order)) for i in range(order))\n \n # using LinExpr for the second expression is significantly faster \n obj1 = quicksum(dx[i] * LinExpr([(R[i][j], dx[j]) for j in range(order)]) for i in range(order))\n obj2 = quicksum(dy[i] * LinExpr([(R[i][j], dy[j]) for j in range(order)]) for i in range(order))\n obj3 = quicksum(dz[i] * LinExpr([(R[i][j], dz[j]) for j in range(order)]) for i in range(order))\n obj = obj1 + obj2 + obj3\n j = 0\n for i in range(order): \n if i < r: \n m.addConstr(dx[i] == bx[i])\n m.addConstr(dy[i] == by[i])\n m.addConstr(dz[i] == bz[i])\n elif i >= order-r: \n m.addConstr(dx[i] == bx[r+j])\n m.addConstr(dy[i] == by[r+j])\n m.addConstr(dz[i] == bz[r+j])\n j += 1\n \n c = 1 # counter\n for i in range(r, order-2*r, 2*r): \n #m.addConstr(dx[i] == self.x_wp[c])\n #m.addConstr(dy[i] == self.y_wp[c])\n #m.addConstr(dz[i] == self.z_wp[c])\n m.addConstr(dx[i] <= x_wp[c] + 0.2)\n m.addConstr(dx[i] >= x_wp[c] - 0.2)\n m.addConstr(dy[i] <= y_wp[c] + 0.2)\n m.addConstr(dy[i] >= y_wp[c] - 0.2)\n m.addConstr(dz[i] <= z_wp[c] + 0.2)\n m.addConstr(dz[i] >= z_wp[c] - 0.2)\n c = c+1\n for j in range(r): \n m.addConstr(dx[i+j] == dx[i+j+r])\n m.addConstr(dy[i+j] == dy[i+j+r])\n m.addConstr(dz[i+j] == dz[i+j+r])\n #if j ==2: \n # m.addConstr(dx[i+j] == 2.0)\n\n m.setObjective(obj, GRB.MINIMIZE)\n #m.write('model.lp')\n m.setParam('OutputFlag', 0)\n m.setParam('PSDtol', 1e-1)\n m.optimize()\n\n\n runtime = m.Runtime\n\n\n x_coeff = [dx[i].X for i in range(order)]\n y_coeff = [dy[i].X for i in range(order)]\n z_coeff = [dz[i].X for i in range(order)]\n\n Dx = np.asarray(x_coeff)[np.newaxis].T\n Dy = np.asarray(y_coeff)[np.newaxis].T \n Dz = np.asarray(z_coeff)[np.newaxis].T \n pcx = np.dot(AA_inv, Dx); pcy = np.dot(AA_inv, Dy); pcz = np.dot(AA_inv, Dz)\n\n\n poly_coeff_x = pcx.T.ravel().tolist()\n poly_coeff_y = pcy.T.ravel().tolist()\n poly_coeff_z = pcz.T.ravel().tolist()\n\n return poly_coeff_x, poly_coeff_y, poly_coeff_z, T, time.time()\n #self.publish(poly_coeff_x, poly_coeff_y, poly_coeff_z)", "def joint_trajectory(theta_start, theta_end, Tf, N, method):\n\n N = int(N)\n timegap = Tf / (N - 1.0) # N points, N-1 line segments\n traj = np.zeros((len(theta_start), N)) # intitialize the trajectory matrix, 1D joint vars, 2D each time instance\n\n # for each line segment, from 0 to T, calculate the corresponding s value (0to1)\n for i in range(N):\n if method == 3:\n s = cubic_time_scaling(Tf, timegap * i)\n else:\n s = quintic_time_scaling(Tf, timegap * i)\n traj[:, i] = s * np.array(theta_end) + (1 - s) * np.array(theta_start) # xi = x_start + (0.whatever fraction s)(x_end-x_start)\n traj = np.array(traj).T\n return traj", "def cartesian_trajectory(X_start, X_end, Tf, N, method):\n\n N = int(N)\n timegap = Tf / (N - 1.0)\n traj = [[None]] * N\n R_start, p_start = ch3.transf_matrix_to_Rp(X_start)\n R_end, p_end = ch3.transf_matrix_to_Rp(X_end)\n\n R_start_end = np.dot(np.array(R_start).T,R_end)\n for i in range(N):\n if method == 3:\n s = cubic_time_scaling(Tf, timegap * i)\n else:\n s = quintic_time_scaling(Tf, timegap * i)\n\n R_start_end_fractioned = ch3.so3_to_rotation_matrix(ch3.rotation_matrix_to_so3(R_start_end) * s)\n R_start_s = np.dot(R_start, R_start_end_fractioned)\n\n p_start_s = s * np.array(p_end) + (1 - s) * np.array(p_start) #p_start + s(p_end-p_start)\n traj[i] = np.r_[np.c_[R_start_s, p_start_s], \\\n [[ 0, 0, 0, 1]]]\n\n \n return traj", "def target(self, time, points, dt, num_way):\n start_index = min(int(time / dt), num_way - 1)\n end_index = min(start_index + 1, num_way - 1)\n start_point = points[start_index]\n end_point = points[end_index]\n fraction = float(time % dt) / dt\n return linear_interpolation_two_points(start_point, end_point, fraction).reshape(3)", "def plot_joint_angles(t_start,t_stop):\n [time,\n ankle_l_trajectory,\n ankle_r_trajectory,\n foot_l_contact,\n foot_r_contact,\n muscle_lh_activations,\n muscle_rh_activations,\n muscle_lh_forces,\n muscle_rh_forces,\n joint_lh_positions,\n joint_rh_positions] = load_data()\n \n index_start = np.where(time == t_start)[0][0]\n index_end = np.where(time == t_stop)[0][0]\n \n time_plot = time[index_start:index_end+1]\n joint_lh_positions = joint_lh_positions[index_start:index_end+1,:]\n joint_rh_positions = joint_rh_positions[index_start:index_end+1,:]\n\n # Example to plot joint trajectories.\n # Feel free to change or use your own plot tools\n plt.figure()\n plt.subplot(3,1,1)\n plt.plot(time_plot, np.rad2deg(joint_lh_positions[:, 0]))\n plt.plot(time_plot, np.rad2deg(joint_rh_positions[:, 0]))\n plt.ylabel('Hip Angle [deg]')\n plt.legend(['Left','Right'],loc='upper right')\n plt.grid('on')\n plt.subplot(3,1,2)\n plt.plot(time_plot, np.rad2deg(joint_lh_positions[:, 1]))\n plt.plot(time_plot, np.rad2deg(joint_rh_positions[:, 1]))\n plt.ylabel('Knee Angle [deg]')\n plt.legend(['Left','Right'],loc='upper right')\n plt.grid('on')\n plt.subplot(3,1,3)\n plt.plot(time_plot, np.rad2deg(joint_lh_positions[:, 2]))\n plt.plot(time_plot, np.rad2deg(joint_rh_positions[:, 2]))\n plt.grid('on')\n plt.ylabel('Ankle Angle [deg]')\n plt.legend(['Left','Right'],loc='upper right')\n plt.xlabel('Time [s]')\n\n return", "def generate_trajectory(t, v, waypoints, coeff_x, coeff_y, coeff_z):\n global yaw\n global current_heading\n yawdot = 0.0\n pos = np.zeros(3)\n acc = np.zeros(3)\n vel = np.zeros(3)\n jerk = np.zeros(3)\n snap = np.zeros(3)\n yawddot = 0.0\n\n # distance vector array, represents each segment's distance\n distance = waypoints[0:-1] - waypoints[1:]\n # T is now each segment's travel time\n T = (1.0 / v) * np.sqrt(distance[:,0]**2 + distance[:,1]**2 + distance[:,2]**2)\n # accumulated time\n S = np.zeros(len(T) + 1)\n S[1:] = np.cumsum(T)\n\n # find which segment current t belongs to\n t_index = np.where(t >= S)[0][-1]\n\n # prepare the next desired state\n if t == 0:\n pos = waypoints[0]\n t0 = get_poly_cc(8, 1, 0)\n\n # get X-Y plane project of velocity vector ( this vector is tangent to curve )\n v_proj = np.array([coeff_x[0:8].dot(t0), coeff_y[0:8].dot(t0)])\n if(LA.norm(v_proj) == 0.0):\n # if velocity vector is of zero magnitude there should be no change in heading!\n pass\n else:\n current_heading = v_proj/LA.norm(v_proj) * (1.0 / T[0])\n \n\n # stay hover at the last waypoint position\n elif t > S[-1]:\n pos = waypoints[-1]\n else:\n # scaled time\n scale = (t - S[t_index]) / T[t_index]\n start = 8 * t_index\n end = 8 * (t_index + 1)\n\n t0 = get_poly_cc(8, 0, scale)\n pos = np.array([coeff_x[start:end].dot(t0), coeff_y[start:end].dot(t0), coeff_z[start:end].dot(t0)])\n\n t1 = get_poly_cc(8, 1, scale)\n # chain rule applied\n vel = np.array([coeff_x[start:end].dot(t1), coeff_y[start:end].dot(t1), coeff_z[start:end].dot(t1)]) * (1.0 / T[t_index])\n\n t2 = get_poly_cc(8, 2, scale)\n # chain rule applied\n acc = np.array([coeff_x[start:end].dot(t2), coeff_y[start:end].dot(t2), coeff_z[start:end].dot(t2)]) * (1.0 / T[t_index]**2)\n\n t3 = get_poly_cc(8, 3, scale)\n # apply chain rule\n jerk = np.array([coeff_x[start:end].dot(t3), coeff_y[start:end].dot(t3), coeff_z[start:end].dot(t3)]) * (1.0 / T[t_index]**3)\n\n t4 = get_poly_cc(8, 4, scale)\n # apply chain rule\n snap = np.array([coeff_x[start:end].dot(t4), coeff_y[start:end].dot(t4), coeff_z[start:end].dot(t4)]) * (1.0 / T[t_index]**4)\n\n # calculate desired yaw and yaw rate\n\n v_proj = np.array([vel[0], vel[1]])\n\n if( LA.norm(v_proj) == 0.0):\n # if velocity vector is zero, again there should be no change in heading\n next_heading = current_heading\n else:\n next_heading = v_proj/LA.norm(v_proj)\n\n \"\"\"\n try :\n #current_heading = v_proj/LA.norm(v_proj) #* (1.0 / T[0]) #np.array([coeff_x[0:8].dot(t0), coeff_y[0:8].dot(t0)]) * (1.0 / T[0])\n next_heading = v_proj/LA.norm(v_proj)\n except ZeroDivisionError:\n # velocity vector magnitude was zero so there should be no change in heading!\n next_heading = current_heading\n \"\"\" \n\n # angle between current vector with the next heading vector\n # from a * b = |a|*|b|cos(angle)\n delta_psi = np.arccos(np.dot(current_heading, next_heading) / (LA.norm(current_heading)*LA.norm(next_heading)))\n # cross product allow us to determine rotating direction\n norm_v = np.cross(current_heading,next_heading)\n\n if norm_v > 0:\n yaw += delta_psi\n elif norm_v < 0:\n yaw -= delta_psi\n else:\n # normv = 0! if there is no change in yaw, do not modify it!\n pass\n\n # dirty hack, quadcopter's yaw range represented by quaternion is [-pi, pi]\n while yaw > np.pi:\n yaw = yaw - 2*np.pi\n\n # print next_heading, current_heading, \"yaw\", yaw*180/np.pi, 'pos', pos\n current_heading = next_heading\n #print(current_heading)\n yawdot = delta_psi / 0.005 # dt is control period\n max_yawdot = 5.0 #rad/s\n if(abs(yawdot) > max_yawdot):\n yawdot = (yawdot/abs(yawdot))*max_yawdot # make it 5rad/s with appropriate direction\n \n yaw = np.sin(2*t)*0.0\n yawdot = 2*np.cos(2*t)*0.0\n yawddot = -4*np.sin(2*t)*0.0\n return DesiredState(pos, vel, acc, jerk, snap, yaw, yawdot, yawddot)", "def screw_trajectory(X_start, X_end, Tf, N, method):\n N = int(N)\n timegap = Tf / (N - 1.0)\n traj = [[None]] * N\n\n X_start_end = np.dot(ch3.transf_matrix_inverse(X_start), X_end)\n for i in range(N):\n if method == 3:\n s = cubic_time_scaling(Tf, timegap * i)\n else:\n s = quintic_time_scaling(Tf, timegap * i)\n\n fractioned_X_start_end = ch3.se3_to_transf_matrix(ch3.transf_matrix_to_se3(X_start_end) * s) # applying s to the se3 matrix instead of the transf matrix, it works\n traj[i] = np.dot(X_start, fractioned_X_start_end)\n return traj", "def get_grasp_joint_trajectory(self, start_joints, target_pose, n_steps=40, ignore_orientation=False, link_name=None):\n link_name = link_name if link_name is not None else self.tool_frame\n \n assert len(start_joints) == len(self.joint_indices)\n assert target_pose.frame.count('base_link') == 1\n self.sim.update()\n \n # set active manipulator and start joint positions\n self.robot.SetDOFValues(start_joints, self.joint_indices)\n \n # initialize trajopt inputs\n rave_pose = tfx.pose(self.sim.transform_from_to(target_pose.matrix, target_pose.frame, 'world'))\n quat = rave_pose.orientation\n xyz = rave_pose.position\n quat_target = [quat.w, quat.x, quat.y, quat.z]\n xyz_target = [xyz.x, xyz.y, xyz.z]\n rave_mat = rave.matrixFromPose(np.r_[quat_target, xyz_target])\n \n# init_joint_target = None\n init_joint_target = self.sim.ik_for_link(rave_pose.matrix, self.manip, link_name, 0)\n if init_joint_target is not None:\n init_joint_target = self._closer_joint_angles(init_joint_target, start_joints)\n \n init_traj = self.ik_point(start_joints, xyz, n_steps=n_steps, link_name=link_name)\n \n request = self._get_grasp_trajopt_request(xyz_target, quat_target, n_steps,\n ignore_orientation=ignore_orientation, link_name=link_name, init_traj=init_traj)\n \n # convert dictionary into json-formatted string\n s = json.dumps(request) \n # create object that stores optimization problem\n prob = trajoptpy.ConstructProblem(s, self.sim.env)\n \n # TODO: worth doing?\n# tool_link = self.robot.GetLink(link_name)\n# def point_at(x):\n# self.robot.SetDOFValues(x, self.joint_indices, False)\n# T = tool_link.GetTransform()\n# local_dir = xyz.array - T[:3,3]\n# return T[1:3,:3].dot(local_dir)\n# \n# for t in xrange(int(0.8*n_steps), n_steps-1):\n# #prob.AddConstraint(point_at, [(t,j) for j in xrange(len(self.joint_indices))], \"EQ\", \"POINT_AT_%i\"%t)\n# prob.AddErrorCost(point_at, [(t,j) for j in xrange(len(self.joint_indices))], \"ABS\", \"POINT_AT_%i\"%t)\n\n # do optimization\n result = trajoptpy.OptimizeProblem(prob)\n \n prob.SetRobotActiveDOFs() # set robot DOFs to DOFs in optimization problem\n #num_upsampled_collisions = len(traj_collisions(result.GetTraj(), self.robot, n=100))\n num_upsampled_collisions = self._num_collisions(result.GetTraj())\n print('Number of collisions: {0}'.format(num_upsampled_collisions))\n self.robot.SetDOFValues(start_joints, self.joint_indices)\n if num_upsampled_collisions > 2:\n #if not traj_is_safe(result.GetTraj()[:], self.robot): # Check that trajectory is collision free\n return None\n else:\n return result.GetTraj()", "def create_and_send_wam_trajectory(wam_start, wam_end, duration, frequency=250):\n\n joint_traj, joint_vels = create_joint_trajectory(wam_start, wam_end,\n duration, frequency)\n send_joint_trajectory(joint_traj, joint_vels, frequency)", "def trajectory_point(self, t, jointspace):\n point = JointTrajectoryPoint()\n delta_t = .01\n if jointspace:\n x_t, x_t_1, x_t_2 = None, None, None\n ik_attempts = 0\n theta_t = theta_t_1 = theta_t_2 = None\n while theta_t_2 is None:\n theta_t_2 = self.get_ik(self.target_position(t-2*delta_t))\n while theta_t_1 is None:\n theta_t_1 = self.get_ik(self.target_position(t-delta_t))\n while theta_t is None:\n theta_t = self.get_ik(self.target_position(t))\n \n # we said you shouldn't simply take a finite difference when creating\n # the path, why do you think we're doing that here?\n point.positions = theta_t\n # print 'theta_t: {0}, {1}'.format(theta_t.shape, theta_t)\n # print 'theta_t_1: {0}, {1}'.format(theta_t_1.shape, theta_t_1)\n\n vel_t_prev = (theta_t_1 - theta_t_2) / delta_t\n vel_t_curr = (theta_t - theta_t_1) / delta_t\n vel_t_avg = (vel_t_prev + vel_t_curr) / 2.\n\n point.velocities = vel_t_avg #(theta_t - theta_t_1) / delta_t\n point.accelerations = (theta_t - 2*theta_t_1 + theta_t_2) / (2*delta_t)\n # if t >= 3.:\n # import pdb; pdb.set_trace()\n else:\n point.positions = self.target_position(t)\n point.velocities = self.target_velocity(t)\n point.accelerations = self.target_acceleration(t)\n point.time_from_start = rospy.Duration.from_sec(t)\n return point", "def create_csc_trajectory(self, origin, destination, rot_orig='l', rot_dest='l'):\n\n assert len(origin) == 4\n assert len(destination) == 4\n\n assert rot_orig.lower() == 'l' or rot_orig.lower() == 'r'\n assert rot_dest.lower() == 'l' or rot_dest.lower() == 'r'\n\n epsa = 1 if rot_orig.lower() == 'l' else -1\n epsb = 1 if rot_dest.lower() == 'l' else -1\n\n r = self.min_turn_radius\n\n po = origin[0:2]\n po_ang = origin[3]\n po_vec = (np.cos(po_ang), np.sin(po_ang))\n pd = destination[0:2]\n pd_ang = destination[3]\n pd_vec = (np.cos(pd_ang), np.sin(pd_ang))\n\n # po_vec and pd_vec are rotated ±90º in order to find the direction of the circle center\n co = po + epsa * r * np.array([-np.sin(po_ang), np.cos(po_ang)]) # po_vec is rotated ±90º\n cd = pd + epsb * r * np.array([-np.sin(pd_ang), np.cos(pd_ang)]) # pd_vec is rotated ±90º\n\n # plt.plot([co[0], cd[0]], [co[1], cd[1]], 'x')\n # plt.plot([po[0], pd[0]], [po[1], pd[1]], 'o')\n\n alpha = 0 #\n ell = 0 # Half the length of the straight part\n if np.sign(epsa) != np.sign(epsb): # LSR or RSL\n ell2 = 0.25 * np.linalg.norm(cd - co)**2 - r**2\n if ell2 < 0:\n return np.inf, None\n ell = np.sqrt(ell2)\n alpha = -epsa * np.arctan2(ell, r)\n\n if epsa * epsb == 1: # RSR or LSL\n ell = 0.5 * np.linalg.norm(cd - co)\n alpha = -epsa * np.pi / 2\n\n rot = np.array([[np.cos(alpha), -np.sin(alpha)], [np.sin(alpha), np.cos(alpha)]])\n do = co + (r / (np.linalg.norm(cd - co))) * rot @ (cd - co)\n dd = cd + epsa * epsb * (do - co)\n betao = DubinsUAV2D._sawtooth(DubinsUAV2D._angle(po - co, do - co), epsa)\n betad = DubinsUAV2D._sawtooth(DubinsUAV2D._angle(dd - cd, pd - cd), epsb)\n\n L = r * (abs(betad) + abs(betao) + 2 * ell)\n\n # DubinsUAV2D._draw_arc(co, po, betao,'black')\n # DubinsUAV2D._draw_arc(cd, pd, -betad,'blue')\n # plt.plot([do[0], dd[0]],[do[1], dd[1]], 'r')\n\n # returns:\n # L: lenght of the trajectory\n # r: radius of both circumferences\n # co: center of origin circumference\n # epsa: rotation orientation of origin circumference (1 = left or -1 = right\n # do: switching point between origin circ and line\n # betao: angle of do\n # cd: center of destination circumference\n # epsb: rotation orientation of destination circumference (1 = left or -1 = right\n # dd: switching point between line and destination circ\n # betad: angle of dd\n return L, r, co, epsa, do, betao, cd, epsb, dd, betad", "def accelerando(self, start, end):\n new_melody = self.copy()\n L = len(new_melody)\n for i in range(len(new_melody)):\n local_tempo = int(end * (i / L) + start * ((L - i)/L))\n new_melody.notes[i] = new_melody.notes[i].set_tempo(local_tempo)\n return new_melody", "def trajectory_point(self, t, jointspace):\n point = JointTrajectoryPoint()\n delta_t = .01\n if jointspace:\n x_t, x_t_1, x_t_2 = None, None, None\n ik_attempts = 0\n theta_t_2 = self.get_ik(self.target_position(t-2*delta_t))\n theta_t_1 = self.get_ik(self.target_position(t-delta_t))\n theta_t = self.get_ik(self.target_position(t))\n # print(self.target_position(t))\n #theta_t = np.array(theta_t)\n # print(theta_t)\n \n # we said you shouldn't simply take a finite difference when creating\n # the path, why do you think we're doing that here? cause you're mean\n\n point.positions = theta_t\n point.velocities = (theta_t - theta_t_1) / delta_t\n point.accelerations = (theta_t - 2*theta_t_1 + theta_t_2) / (2*delta_t)\n\n else:\n point.positions = self.target_position(t)\n point.velocities = self.target_velocity(t)\n point.accelerations = self.target_acceleration(t)\n point.time_from_start = rospy.Duration.from_sec(t)\n return point", "def parabolicPath(\n self, startTransform: Transformation, endTransform: Transformation, zdiff: float, sidediff: float, rotdiff: float, ratio: float\n ) -> Transformation:\n\n step_time = self.torsoStepTime()\n distance_between_step = Transformation.distance(startTransform, endTransform)\n if distance_between_step == 0.0:\n delta = 0.001\n angle = startTransform.orientation_euler[2]\n delta_tr = [np.cos(angle) * delta, np.sin(angle) * delta, 0]\n endTransform = deepcopy(endTransform)\n endTransform.position = endTransform.position + delta_tr\n distance_between_step = Transformation.distance(startTransform, endTransform)\n\n assert distance_between_step != 0.0\n height_per_step = np.linalg.norm([zdiff, sidediff])\n\n h = height_per_step\n a = distance_between_step / 2\n\n # Using Newton Approximation Method\n # https://math.stackexchange.com/questions/3129154/divide-a-parabola-in-segments-of-equal-length\n L = distance_between_step\n aa = 4 * h / L\n\n f = lambda x: x * np.sqrt(1 + (x**2)) + np.arcsinh(x) # f = @(x) x * sqrt(1+x^2) + asinh(x);\n s = ratio\n J = lambda X: 2 * np.sqrt(1 + (X**2)) # J = @(X) 2 * sqrt(1+X^2);\n r = lambda X: f(X) - (1 - (2 * s)) * f(aa) # r = @(X) f(X) - (1-2*s)*f(aa);\n\n X = 0\n while np.abs(r(X)) > 0.0001:\n X = X - r(X) / J(X)\n\n if aa == 0:\n dist = ratio * L\n else:\n dist = 0.5 * (1 - X / aa) * L\n\n # Calculate intermediate transform\n position_time = dist / distance_between_step * step_time\n if position_time < 0:\n position_time = 0\n\n ratio = position_time / step_time\n if ratio < 0:\n ratio = 0\n elif ratio > 1:\n ratio = 1\n\n # Interpolate between the two H-transforms\n t1 = Transformation.transformation_weighted_average(startTransform, endTransform, ratio)\n\n x = (-a) + dist\n y = h * (1 - (x**2) / (a**2))\n\n zdelta = np.cos(np.arctan2(sidediff, zdiff)) * y\n ydelta = np.sin(np.arctan2(sidediff, zdiff)) * y\n if rotdiff != 0:\n thetadelta = y / height_per_step * rotdiff\n else:\n thetadelta = 0\n\n t2 = Transformation(\n position=[0, ydelta, zdelta],\n quaternion=Transformation.get_quaternion_from_axis_angle(vector=[1, 0, 0], angle=thetadelta),\n )\n position = t1 @ t2\n return position", "def smooth_linear_ramp(t, kinematic_parameters):\n ramp_stage_acceleration = kinematic_parameters[0]\n ramp_start_time = kinematic_parameters[1]\n i_ramp_end_time = kinematic_parameters[2]\n steady_end_time = kinematic_parameters[3]\n end_ramp_end_time = kinematic_parameters[4]\n smooth_factor = kinematic_parameters[5]\n ramp_mode = kinematic_parameters[6]\n ramp_constant_time = kinematic_parameters[7]\n pitch_mode = kinematic_parameters[8]\n pitch_time = kinematic_parameters[9]\n pitch_delay_time_fraction = kinematic_parameters[10]\n pitch_acceleration = kinematic_parameters[11]\n pitch_acc_time_fraction = kinematic_parameters[12]\n section_location = kinematic_parameters[13]\n bstroke = kinematic_parameters[14]\n\n def logcosh(x):\n # s always has real part >= 0\n s = np.sign(x) * x\n p = np.exp(-2 * s)\n return s + np.log1p(p) - np.log(2)\n\n def omega(x):\n \"\"\"linear ramp rotation speed function\"\"\"\n # if ramp_start_time - ramp_constant_time <= x <= end_ramp_end_time + ramp_constant_time:\n # f_t0 = smooth_factor * (x - ramp_start_time)\n # f_t1 = smooth_factor * (x - i_ramp_end_time)\n # if ramp_mode == 'with_end_acc':\n # f_t2 = smooth_factor * (x - steady_end_time)\n # f_t3 = smooth_factor * (x - end_ramp_end_time)\n # elif ramp_mode == 'no_end_acc':\n # f_t2 = smooth_factor * ramp_start_time\n # f_t3 = smooth_factor * i_ramp_end_time\n\n # omegax = (ramp_stage_acceleration / 2) / smooth_factor * (\n # logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) - logcosh(f_t2))\n # else:\n # if bstroke == 'yes' and x <= 2 * (end_ramp_end_time +\n # ramp_constant_time):\n # x -= end_ramp_end_time + ramp_constant_time\n # f_t0 = smooth_factor * (x - ramp_start_time)\n # f_t1 = smooth_factor * (x - i_ramp_end_time)\n # if ramp_mode == 'with_end_acc':\n # f_t2 = smooth_factor * (x - steady_end_time)\n # f_t3 = smooth_factor * (x - end_ramp_end_time)\n # elif ramp_mode == 'no_end_acc':\n # f_t2 = smooth_factor * ramp_start_time\n # f_t3 = smooth_factor * i_ramp_end_time\n\n # omegax = -(ramp_stage_acceleration / 2) / smooth_factor * (\n # logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) -\n # logcosh(f_t2))\n # else:\n # omegax = 0\n\n if bstroke == 'no':\n f_t0 = smooth_factor * (x - ramp_start_time)\n f_t1 = smooth_factor * (x - i_ramp_end_time)\n if ramp_mode == 'with_end_acc':\n f_t2 = smooth_factor * (x - steady_end_time)\n f_t3 = smooth_factor * (x - end_ramp_end_time)\n elif ramp_mode == 'no_end_acc':\n f_t2 = smooth_factor * ramp_start_time\n f_t3 = smooth_factor * i_ramp_end_time\n\n omegax = (ramp_stage_acceleration / 2) / smooth_factor * (\n logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) - logcosh(f_t2))\n\n else:\n if x <= end_ramp_end_time + ramp_constant_time:\n f_t0 = smooth_factor * (x - ramp_start_time)\n f_t1 = smooth_factor * (x - i_ramp_end_time)\n if ramp_mode == 'with_end_acc':\n f_t2 = smooth_factor * (x - steady_end_time)\n f_t3 = smooth_factor * (x - end_ramp_end_time)\n elif ramp_mode == 'no_end_acc':\n f_t2 = smooth_factor * ramp_start_time\n f_t3 = smooth_factor * i_ramp_end_time\n\n omegax = (ramp_stage_acceleration /\n 2) / smooth_factor * (logcosh(f_t0) - logcosh(f_t1) +\n logcosh(f_t3) - logcosh(f_t2))\n\n else:\n x -= end_ramp_end_time + ramp_constant_time\n f_t0 = smooth_factor * (x - ramp_start_time)\n f_t1 = smooth_factor * (x - i_ramp_end_time)\n if ramp_mode == 'with_end_acc':\n f_t2 = smooth_factor * (x - steady_end_time)\n f_t3 = smooth_factor * (x - end_ramp_end_time)\n elif ramp_mode == 'no_end_acc':\n f_t2 = smooth_factor * ramp_start_time\n f_t3 = smooth_factor * i_ramp_end_time\n\n omegax = -(ramp_stage_acceleration / 2) / smooth_factor * (\n logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) -\n logcosh(f_t2))\n\n return omegax\n\n steady_rotation_omega = omega((i_ramp_end_time + steady_end_time) / 2)\n omega_print = steady_rotation_omega * np.pi / 180\n print('steady revolving omega = %s' % omega_print)\n\n dphi_data = []\n for ti in t:\n dphi_data.append(omega(ti))\n dphi_spl = UnivariateSpline(t, dphi_data, s=0)\n\n def ddphi(x):\n \"\"\"flapping angular acceleration function\"\"\"\n return dphi_spl.derivatives(x)[1]\n\n ramp_angle = dphi_spl.integral(0, i_ramp_end_time)\n print('initial linear ramp angle = %s' % ramp_angle)\n\n if ramp_mode == 'with_end_acc':\n end_ramp_angle = dphi_spl.integral(\n steady_end_time, end_ramp_end_time + ramp_constant_time)\n print('end linear ramp angle = %s' % end_ramp_angle)\n\n stroke_angle = dphi_spl.integral(0, end_ramp_end_time + ramp_constant_time)\n st_dist = np.abs(stroke_angle) * np.pi / 180 * section_location\n print('2d wing travel distance = %s' % st_dist)\n\n def phi(x):\n \"\"\"rotation angle function\"\"\"\n return dphi_spl.integral(0, x)\n\n #--pitching motion functions--\n if pitch_mode == 'with_end_pitch':\n pitch_delay_time = (pitch_time +\n 2 * ramp_constant_time) * pitch_delay_time_fraction\n pitch_acc_time = pitch_time * pitch_acc_time_fraction / 2\n\n pitch_start_time = end_ramp_end_time - pitch_time + pitch_delay_time\n p_acc_end_time = pitch_start_time + pitch_acc_time\n pitch_end_time = pitch_start_time + pitch_time\n p_decc_start_time = pitch_end_time - pitch_acc_time\n\n def dalf(x):\n \"\"\"linear ramp pitch speed function\"\"\"\n # if pitch_start_time - ramp_constant_time <= x <= pitch_end_time + ramp_constant_time:\n # f_t0 = smooth_factor * (x - pitch_start_time)\n # f_t1 = smooth_factor * (x - p_acc_end_time)\n # f_t2 = smooth_factor * (x - p_decc_start_time)\n # f_t3 = smooth_factor * (x - pitch_end_time)\n\n # dalfx = (pitch_acceleration /\n # 2) / smooth_factor * (logcosh(f_t0) - logcosh(f_t1) +\n # logcosh(f_t3) - logcosh(f_t2))\n # else:\n # dalfx = 0\n f_t0 = smooth_factor * (x - pitch_start_time)\n f_t1 = smooth_factor * (x - p_acc_end_time)\n f_t2 = smooth_factor * (x - p_decc_start_time)\n f_t3 = smooth_factor * (x - pitch_end_time)\n\n dalfx = (pitch_acceleration / 2) / smooth_factor * (\n logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) - logcosh(f_t2))\n return dalfx\n\n dalf_data = []\n for ti in t:\n dalf_data.append(dalf(ti))\n dalf_spl = UnivariateSpline(t, dalf_data, s=0)\n\n pitch_angle = dalf_spl.integral(pitch_start_time - ramp_constant_time,\n pitch_end_time + ramp_constant_time)\n\n print('wing pitch angle = %s' % np.abs(pitch_angle))\n\n steady_pitching_omega = dalf((pitch_start_time + pitch_end_time) / 2)\n omega_print = steady_pitching_omega * np.pi / 180\n print('steady wing pitch omega = %s\\n' % omega_print)\n\n def ddalf(x):\n \"\"\"flapping angular acceleration function\"\"\"\n return dalf_spl.derivatives(x)[1]\n\n def alf(x):\n \"\"\"rotation angle function\"\"\"\n return dalf_spl.integral(0, x)\n\n kinematic_angles = []\n for ti in t:\n if pitch_mode == 'no_end_pitch':\n kinematic_anglesi = [-phi(ti), 0, -omega(ti), 0, -ddphi(ti), 0]\n elif pitch_mode == 'with_end_pitch':\n kinematic_anglesi = [\n -phi(ti), -alf(ti), -omega(ti), -dalf(ti), -ddphi(ti),\n -ddalf(ti)\n ]\n kinematic_angles.append(kinematic_anglesi)\n\n return kinematic_angles", "def plot_trajectories_XYZ(t_start,t_stop):\n \n time, ankle_l_trajectory, ankle_r_trajectory,foot_l_contact,foot_r_contact,muscle_lh_activations, muscle_rh_activations,muscle_lh_forces,muscle_rh_forces,joint_lh_positions,joint_rh_positions = load_data()\n \n index_start = np.where(time == t_start)[0][0]\n index_end = np.where(time == t_stop)[0][0]\n \n time = time[index_start:index_end+1]\n ankle_l_trajectory = ankle_l_trajectory[index_start:index_end+1,:]\n ankle_r_trajectory = ankle_r_trajectory[index_start:index_end+1,:]\n \n #time=np.linspace(1,len(ankle_l_trajectory[:,0]),len(ankle_l_trajectory[:,0]));\n \n plt.figure('Trajectories')\n plt.subplot(311)\n plt.plot(time,ankle_l_trajectory[:,0])\n plt.plot(time,ankle_r_trajectory[:,0])\n #plt.title('Trajectory of the X component')\n plt.xlabel('Time [s]')\n plt.ylabel('X Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(312)\n plt.plot(time,ankle_l_trajectory[:,1])\n plt.plot(time,ankle_r_trajectory[:,1])\n #plt.title('Trajectory of the Y component')\n plt.xlabel('Time [s]')\n plt.ylabel('Y Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n plt.subplot(313)\n plt.plot(time,ankle_l_trajectory[:,2])\n plt.plot(time,ankle_r_trajectory[:,2])\n #plt.title('Trajectory of the Z component')\n plt.xlabel('Time [s]')\n plt.ylabel('Z Position [cm]')\n plt.legend(['Left ankle','Right ankle'],loc='upper right')\n \n# plt.suptitle('Decomposition of the trajectories of the hind feet')\n return", "def __init__(self, total_time, kin, limb, start_pos, ar_tag_pos):\n #raise NotImplementedError\n MotionPath.__init__(self, limb, kin, total_time)\n start_pos = np.array([abs(start_pos[0]), abs(start_pos[1]), abs(start_pos[2])])\n\n\n #The ar tracking function was not working properly so we added the block below \n #to correct it.\n if start_pos[1]>start_pos[0]:\n a = start_pos[0]\n b = start_pos[1]\n start_pos[0] = b\n start_pos[1] = a\n print(\"### Modified start_pos ###\")\n\n\n ar_tag_pos = np.array([abs(ar_tag_pos[0]), abs(ar_tag_pos[1]), abs(ar_tag_pos[2])])\n if ar_tag_pos[1]>ar_tag_pos[0]:\n a = ar_tag_pos[0]\n b = ar_tag_pos[1]\n ar_tag_pos[0] = b\n ar_tag_pos[1] = a\n self.start_pos = start_pos\n\n print(\"!!!!!!!!!!!!!!!!start_pos!!!!!!!!!!!!!!!!\")\n print(start_pos)\n print(\"!!!!!!!!!ar_tag_pos in LinearPath!!!!!!!!!\")\n print(ar_tag_pos)\n ar_tag_pos[2] = start_pos[2]\n self.ar_tag_pos = ar_tag_pos\n self.points_generated = []", "def make_times(self, start, end, delta):\n assert type(start) is float or type(start) is int, \\\n 'Start time not specified with float'\n assert type(end) is float or type(end) is int, \\\n 'End time not specified with a number'\n assert type(delta) is float or type(delta) is int, \\\n 'Time increment not specified with a number'\n assert end >= start, 'End time is before start time'\n self.target_times = []\n step = start\n while step <= end:\n self.target_times.append(step)\n step += delta", "def tilt(self, new_end):\n values = np.array(self._values)\n start = values[0]\n rise, run = 1.0 * (new_end - start), values[-1] - start\n new_slope = rise / run if run else rise / (len(values) - 1)\n self._values = (start + (new_slope * (values - start))).tolist()", "def get_trajectory(h, launch_speed, launch_angle):\n\n v0 = launch_speed\n theta = mp.radians(launch_angle)\n\n N = 100000\n tgrid, dt = np.linspace(0, 15000, N, retstep=True)\n tr = np.empty((N,2))\n v = np.empty((N,2))\n # Initial rocket position, velocity and acceleration\n tr[0] = 0, R + h\n v[0] = v0 * np.sin(theta), v0 * np.cos(theta)\n a = calc_a(tr[0])\n\n for i, t in enumerate(tgrid[1:]):\n # Calculate the rocket's next position based on its instantaneous velocity.\n \n r = tr[i] + v[i] * dt \n \n if np.hypot(*r) < R:\n # Our rocket crashed.\n break\n # Update the rocket's position, velocity and acceleration.\n tr[i+1] = r\n v[i+1] = v[i] + a*dt\n a = calc_a(tr[i+1])\n return tr[:i+1]", "def interp_cubic(p0, p1, t_abs):\n\tT = (p1.time_from_start - p0.time_from_start).to_sec()\n\tt = t_abs - p0.time_from_start.to_sec()\n\tq = [0] * 6\n\tqdot = [0] * 6\n\tqddot = [0] * 6\n\tfor i in range(len(p0.positions)):\n\t\ta = p0.positions[i]\n\t\tb = p0.velocities[i]\n\t\tc = (-3 * p0.positions[i] + 3 * p1.positions[i] - 2 * T * p0.velocities[i] - T * p1.velocities[i]) / T**2\n\t\td = (2 * p0.positions[i] - 2 * p1.positions[i] + T * p0.velocities[i] + T * p1.velocities[i]) / T**3\n\n\t\tq[i] = a + b * t + c * t**2 + d * t**3\n\t\tqdot[i] = b + 2 * c * t + 3 * d * t**2\n\t\tqddot[i] = 2 * c + 6 * d * t\n\treturn JointTrajectoryPoint(positions=q, velocities=qdot, accelerations=qddot, time_from_start=rospy.Duration(t_abs))", "def makeSpline(self, waypointTrajectory: Trajectory, loop: bool=False) -> None:\n if loop and waypointTrajectory.milestones[-1] != waypointTrajectory.milestones[0]:\n print(waypointTrajectory.milestones[-1],\"!=\",waypointTrajectory.milestones[0])\n raise ValueError(\"Asking for a loop trajectory but the endpoints don't match up\")\n velocities = []\n t = waypointTrajectory\n d = len(t.milestones[0])\n third = 1.0/3.0\n if len(t.milestones)==1:\n velocities.append([0]*d)\n elif len(t.milestones)==2:\n if loop:\n v = [0.0]*d\n velocities = [v,v]\n else:\n s = (1.0/(t.times[1]-t.times[0]) if (t.times[1]-t.times[0]) != 0 else 0)\n v = vectorops.mul(self.geodesic.difference(t.milestones[1],t.milestones[0]),s) \n velocities.append(v)\n v2 = vectorops.mul(self.geodesic.difference(t.milestones[0],t.milestones[1]),-s) \n velocities.append(v2)\n else:\n N = len(waypointTrajectory.milestones)\n if loop:\n timeiter = zip([-2]+list(range(N-1)),range(0,N),list(range(1,N))+[1])\n else:\n timeiter = zip(range(0,N-2),range(1,N-1),range(2,N))\n for p,i,n in timeiter:\n if p < 0: dtp = t.times[-1] - t.times[-2]\n else: dtp = t.times[i] - t.times[p]\n if n <= i: dtn = t.times[1]-t.times[0]\n else: dtn = t.times[n]-t.times[i]\n assert dtp >= 0 and dtn >= 0\n s2 = (1.0/dtn if dtn != 0 else 0)\n v2 = vectorops.mul(self.geodesic.difference(t.milestones[n],t.milestones[i]),s2)\n s1 = (1.0/dtp if dtp != 0 else 0)\n v1 = vectorops.mul(self.geodesic.difference(t.milestones[p],t.milestones[i]),-s1)\n v = vectorops.mul(vectorops.add(v1,v2),0.5)\n velocities.append(v)\n if not loop:\n #start velocity as linear\n v0 = vectorops.mul(self.geodesic.difference(t.milestones[1],t.milestones[0]),1.0/(t.times[1]-t.times[0]))\n #terminal velocity as quadratic\n vn = vectorops.mul(self.geodesic.difference(t.milestones[-2],t.milestones[-1]),-1.0/(t.times[-1]-t.times[-2]))\n velocities = [v0]+velocities+[vn]\n else:\n assert len(velocities) == N\n GeodesicHermiteTrajectory.__init__(self,self.geodesic,waypointTrajectory.times[:],waypointTrajectory.milestones,velocities)", "def plot_joint_angles_with_contact(t_start,t_stop):\n [time,\n ankle_l_trajectory,\n ankle_r_trajectory,\n foot_l_contact,\n foot_r_contact,\n muscle_lh_activations,\n muscle_rh_activations,\n muscle_lh_forces,\n muscle_rh_forces,\n joint_lh_positions,\n joint_rh_positions] = load_data()\n \n index_start = np.where(time == t_start)[0][0]\n index_end = np.where(time == t_stop)[0][0]\n \n time_plot = time[index_start:index_end+1]\n joint_lh_positions = joint_lh_positions[index_start:index_end+1,:]\n joint_rh_positions = joint_rh_positions[index_start:index_end+1,:]\n \n gait = foot_r_contact[index_start:index_end+1,:]\n foot_l_contact = foot_l_contact[index_start:index_end+1,:]\n \n# gait = np.hstack((foot_r_contact, foot_l_contact))\n\n # Example to plot joint trajectories.\n # Feel free to change or use your own plot tools\n plt.figure()\n ax = plt.subplot(3,1,1)\n# plt.plot(time_plot, np.rad2deg(joint_lh_positions[:, 0]))\n plt.plot(time_plot, np.rad2deg(joint_rh_positions[:, 0]),'r')\n plt.ylabel('Hip Angle [deg]')\n# plt.legend(['Left','Right'],loc='upper right')\n plt.grid('on')\n for t, g in enumerate(gait):\n for l, gait_l in enumerate(g):\n if gait_l:\n add_patch(ax, time_plot[t], -300, width=0.01, height=150)\n ax = plt.subplot(3,1,2)\n# plt.plot(time_plot, np.rad2deg(joint_lh_positions[:, 1]))\n plt.plot(time_plot, np.rad2deg(joint_rh_positions[:, 1]),'r')\n plt.ylabel('Knee Angle [deg]')\n# plt.legend(['Left','Right'],loc='upper right')\n plt.grid('on')\n for t, g in enumerate(gait):\n for l, gait_l in enumerate(g):\n if gait_l:\n add_patch(ax, time_plot[t], -600, width=0.01, height=150)\n ax = plt.subplot(3,1,3)\n# plt.plot(time_plot, np.rad2deg(joint_lh_positions[:, 2]))\n plt.plot(time_plot, np.rad2deg(joint_rh_positions[:, 2]),'r')\n plt.grid('on')\n for t, g in enumerate(gait):\n for l, gait_l in enumerate(g):\n if gait_l:\n add_patch(ax, time_plot[t], -300, width=0.01, height=150)\n plt.ylabel('Ankle Angle [deg]')\n# plt.legend(['Left','Right'],loc='upper right')\n plt.xlabel('Time [s]')\n\n return", "def executeTrajectory():\n driveStraight(1, 0.6)\n rotate(0.25)\n driveStraight(1, .45)\n rotate(-0.25)", "def trajectory(self):\n return Trajectory.createFromTuples(self.points)", "def compute_trajectory(\n traj_type: str,\n origin: Position,\n goal: Position,\n step: int,\n origin_qd: float = 0.002,\n goal_qd: float = 0.002,\n) -> rtb.tools.trajectory.Trajectory:\n # print(f\"{origin=}\\n{goal=}\\n\")\n\n trajs = []\n for origin_q, goal_q in zip(origin.xyzrpy, goal.xyzrpy):\n\n if traj_type == \"lspb\":\n traj_func = rtb.tools.trajectory.lspb\n args = [origin_q, goal_q, step]\n elif traj_type == \"tpoly\":\n traj_func = rtb.tools.trajectory.tpoly\n args = [origin_q, goal_q, step, origin_qd, goal_qd]\n\n if origin_q == goal_q:\n trajs.append(\n Trajectory(\n traj_type,\n step,\n [origin_q] * step,\n [0] * step,\n [0] * step,\n istime=False,\n )\n )\n else:\n trajs.append(traj_func(*args))\n\n x = trajs[0].t\n y = np.array([tg.s for tg in trajs]).T\n yd = np.array([tg.sd for tg in trajs]).T\n ydd = np.array([tg.sdd for tg in trajs]).T\n\n istime = trajs[0].istime\n\n return Trajectory(\"mtraj\", x, y, yd, ydd, istime)", "def compute_trajectory():\n pass", "def makeSpline(self,\n waypointTrajectory: Trajectory,\n preventOvershoot: bool = True,\n loop: bool = False\n ) -> None:\n if loop and waypointTrajectory.milestones[-1] != waypointTrajectory.milestones[0]:\n raise ValueError(\"Asking for a loop trajectory but the endpoints don't match up\")\n velocities = []\n t = waypointTrajectory\n d = len(t.milestones[0])\n if len(t.milestones)==1:\n velocities.append([0]*d)\n elif len(t.milestones)==2:\n if loop:\n v = [0]*d\n else:\n s = (1.0/(t.times[1]-t.times[0]) if (t.times[1]-t.times[0]) != 0 else 0)\n v = vectorops.mul(vectorops.sub(t.milestones[1],t.milestones[0]),s) \n velocities.append(v)\n velocities.append(v)\n else:\n third = 1.0/3.0\n N = len(waypointTrajectory.milestones)\n if loop:\n timeiter = zip([-2]+list(range(N-1)),range(0,N),list(range(1,N))+[1])\n else:\n timeiter = zip(range(0,N-2),range(1,N-1),range(2,N))\n for p,i,n in timeiter:\n if p < 0:\n dtp = t.times[-1] - t.times[-2]\n else:\n dtp = t.times[i] - t.times[p]\n if n <= i:\n dtn = t.times[1]-t.times[0]\n else:\n dtn = t.times[n]-t.times[i]\n assert dtp >= 0 and dtn >= 0\n s = (1.0/(dtp+dtn) if (dtp+dtn) != 0 else 0)\n v = vectorops.mul(vectorops.sub(t.milestones[n],t.milestones[p]),s)\n if preventOvershoot:\n for j,(x,a,b) in enumerate(zip(t.milestones[i],t.milestones[p],t.milestones[n])):\n if x <= min(a,b):\n v[j] = 0.0\n elif x >= max(a,b):\n v[j] = 0.0\n elif v[j] < 0 and x - v[j]*third*dtp >= a:\n v[j] = 3.0/dtp*(x-a)\n elif v[j] > 0 and x - v[j]*third*dtp <= a:\n v[j] = 3.0/dtp*(x-a)\n elif v[j] < 0 and x + v[j]*third*dtn < b:\n v[j] = 3.0/dtn*(b-x)\n elif v[j] > 0 and x + v[j]*third*dtn > b:\n v[j] = 3.0/dtn*(b-x)\n \n velocities.append(v)\n if not loop:\n #start velocity as quadratic\n x2 = vectorops.madd(t.milestones[1],velocities[0],-third*(t.times[1]-t.times[0]))\n x1 = vectorops.madd(x2,vectorops.sub(t.milestones[1],t.milestones[0]),-third)\n v0 = vectorops.mul(vectorops.sub(x1,t.milestones[0]),3.0/(t.times[1]-t.times[0]))\n #terminal velocity as quadratic\n xn_2 = vectorops.madd(t.milestones[-2],velocities[-1],third*(t.times[-1]-t.times[-2]))\n xn_1 = vectorops.madd(xn_2,vectorops.sub(t.milestones[-1],t.milestones[-2]),third)\n vn = vectorops.mul(vectorops.sub(t.milestones[-1],xn_1),3.0/(t.times[-1]-t.times[-2]))\n velocities = [v0]+velocities+[vn]\n self.__init__(waypointTrajectory.times[:],waypointTrajectory.milestones,velocities)", "def _lerp(self, start_value, end_value):\n # @todo: can probably replace this with np.interp(self.step_lerp_pcts, [0, 1], [start_value, end_value])\n return (1.0-self.step_lerp_pcts)*start_value + self.step_lerp_pcts*end_value", "def line_points(start, end):\n # Setup initial conditions\n x1, y1 = start.astuple()\n x2, y2 = end.astuple()\n dx = x2 - x1\n dy = y2 - y1\n \n # Determine how steep the line is\n is_steep = abs(dy) > abs(dx)\n \n # Rotate line\n if is_steep:\n x1, y1 = y1, x1\n x2, y2 = y2, x2\n \n # Swap start and end points if necessary and store swap state\n swapped = False\n if x1 > x2:\n x1, x2 = x2, x1\n y1, y2 = y2, y1\n swapped = True\n \n # Recalculate differentials\n dx = x2 - x1\n dy = y2 - y1\n \n # Calculate error\n error = int(dx / 2.0)\n ystep = 1 if y1 < y2 else -1\n \n # Iterate over bounding box generating points between start and end\n y = y1\n points = []\n for x in range(x1, x2 + 1):\n coord = Int2(y, x) if is_steep else Int2(x, y)\n points.append(coord)\n error -= abs(dy)\n if error < 0:\n y += ystep\n error += dx\n \n # Reverse the list if the coordinates were swapped\n if swapped:\n points.reverse()\n return points" ]
[ "0.630769", "0.62118566", "0.6039857", "0.59671867", "0.59664977", "0.58373964", "0.578141", "0.57333755", "0.57225364", "0.5712431", "0.5712421", "0.57060003", "0.57044417", "0.569924", "0.5694327", "0.5621275", "0.561819", "0.56162274", "0.56119204", "0.56117755", "0.55802137", "0.5559946", "0.55585223", "0.55566347", "0.5539639", "0.55095327", "0.5509335", "0.5504307", "0.5391452", "0.53893185" ]
0.7083624
0
This is used to send a trajectory to the WAM arm at a given frequency.
def send_joint_trajectory(trajectory, velocities, frequency=250): pub = rospy.Publisher("/wam/jnt_pos_cmd", RTJointPos, queue_size=10) #If wam_node is running, it will be connected to this publisher. #Mostly this loop is here because you want to make sure the publisher #gets set up before it starts sending information. while pub.get_num_connections() < 1: print "Waiting on the publisher to go up." rospy.sleep(0.5) trajectory_length = trajectory.__len__() finished = False traj_row = 0 message_for_service = RTJointPos() r = rospy.Rate(frequency) while not rospy.is_shutdown() and not finished: message_for_service.joints = trajectory[traj_row] message_for_service.rate_limits = velocities[traj_row] traj_row += 1 pub.publish(message_for_service) if traj_row == trajectory_length - 1: finished = True r.sleep()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def send_speed(self, linear_speed, angular_speed):\n ### Makes a new Twist message\n msg_cmd_vel = Twist()\n \t# Linear velocity\n \tmsg_cmd_vel.linear.x = linear_speed\n \tmsg_cmd_vel.linear.y = 0.0\n \tmsg_cmd_vel.linear.z = 0.0\n \t# Angular velocity\n \tmsg_cmd_vel.angular.x = 0.0\n \tmsg_cmd_vel.angular.y = 0.0\n \tmsg_cmd_vel.angular.z = angular_speed\n ### Publishes the message\n self.cmd_vel.publish(msg_cmd_vel)", "def create_and_send_wam_trajectory(wam_start, wam_end, duration, frequency=250):\n\n joint_traj, joint_vels = create_joint_trajectory(wam_start, wam_end,\n duration, frequency)\n send_joint_trajectory(joint_traj, joint_vels, frequency)", "def __stream_triggered(self):\n # Call this every time period\n thread = Timer(self.stream_time, self.__stream_triggered)\n thread.start()\n self.__threads.append(thread)\n\n if len(self.__spike_buffer) > 2:\n speed = self.__get_speed()\n print(speed)\n self.__stream_send(speed)", "def __stream_send(self, speed):\n print(speed)\n self.conn.send(str(speed).zfill(18).encode())", "def send(self, seq_number, *voltage_list):\r\n\r\n timestamp = time.perf_counter()\r\n volt_list = list()\r\n for volt in voltage_list:\r\n volt_list.append(volt)\r\n try:\r\n self.sock.sendto(struct.pack(packet.H2R_PACKET_FORMAT, seq_number, packet.time2int(timestamp), *volt_list),\r\n (self.robot_ip, self.robot_port))\r\n\r\n self.tx_cntr.inc()\r\n\r\n self.tastx_ks[seq_number + 1] = timestamp\r\n\r\n try:\r\n tsr_k = self.tsr_ks[seq_number]\r\n tsstx_k = self.tsstx_ks[seq_number]\r\n tssrx_k = self.tssrx_ks[seq_number]\r\n tastx_k = self.tastx_ks[seq_number]\r\n tasrx_k = self.tasrx_ks[seq_number]\r\n taw_k = self.taw_ks[seq_number]\r\n\r\n if packet.time2int(tasrx_k) is not 0:\r\n self.tsr_k_logger.timestamp(timestamp=tsr_k, value=tsr_k)\r\n self.tsstx_k_logger.timestamp(timestamp=tsstx_k, value=tsstx_k)\r\n\r\n self.tssrx_k_logger.timestamp(timestamp=tssrx_k, value=tssrx_k)\r\n self.tastx_k_logger.timestamp(timestamp=tastx_k, value=tastx_k)\r\n\r\n self.tasrx_k_logger.timestamp(timestamp=tasrx_k, value=tasrx_k)\r\n self.taw_k_logger.timestamp(timestamp=taw_k, value=taw_k)\r\n\r\n del self.tsr_ks[seq_number - 1]\r\n del self.tsstx_ks[seq_number - 1]\r\n\r\n del self.tasrx_ks[seq_number - 1]\r\n del self.taw_ks[seq_number - 1]\r\n\r\n except KeyError:\r\n logging.debug(\"Packet not found\")\r\n\r\n except socket.error:\r\n logging.error('Tx error')\r\n return", "def send_fft_osc(self):\n self.client.send_message(\"/fft_train\", list(self.fft_bins_y))", "def send_data(self, SPEED, STEER, BRAKE, GEAR):\n GEAR = 2 if SPEED >= 0.0 else 0\n\n # if self.feedbackMsg.AorM == 0:\n # return\n\n if self.doPIControl is True:\n\n current_speed = self.mps2kph(self.feedbackMsg.speed) # kph\n desired_speed = SPEED # kph\n SPEED, BRAKE = self.PIControl(\n currentSpeed=current_speed, desiredSpeed=desired_speed, brake=BRAKE)\n\n SPEED = abs(SPEED) * 10\n if SPEED > 200:\n SPEED = 200\n elif SPEED < 0:\n SPEED = 0\n\n STEER = STEER * 71\n if STEER > 1999:\n STEER = 1999\n if STEER < -1999:\n STEER = -1999\n\n try:\n\n if STEER >= 0:\n self.DATA[8] = int(STEER // 256)\n self.DATA[9] = int(STEER % 256)\n else:\n STEER = -STEER\n self.DATA[8] = int(255 - STEER // 256)\n self.DATA[9] = int(255 - STEER % 256)\n\n self.DATA[5] = GEAR # GEAR\n self.DATA[6] = int(SPEED // 256)\n self.DATA[7] = int(SPEED % 256)\n self.DATA[10] = BRAKE # BREAK\n self.DATA[11] = self.ALIVE\n\n self.ser.write((self.DATA))\n\n self.ALIVE = self.ALIVE + 1\n if self.ALIVE == 256:\n self.ALIVE = 0\n\n except Exception as ex:\n print(ex)", "def send_list(self, frequencies=[1e9,2e9,3e9,4e9], powers=[-10,-5,-2,0], dwell=0, delay=0):\r\n _debug('api.send_list()')\r\n \r\n # Handle integers or lists for either frequencies or powers\r\n if not _s.fun.is_iterable(frequencies): frequencies = [frequencies]\r\n if not _s.fun.is_iterable(powers): powers = [powers]\r\n \r\n # Handle numpy arrays\r\n if not type(frequencies) == 'list': frequencies = list(frequencies)\r\n if not type(powers) == 'list': powers = list(powers)\r\n \r\n # Handle length-1 arrays:\r\n if len(frequencies) == 1: frequencies = frequencies*len(powers)\r\n if len(powers) == 1: powers = powers *len(frequencies)\r\n \r\n # Poop if the lengths don't match\r\n if not len(frequencies) == len(powers): \r\n print(\"ERROR: Lengths must match!\")\r\n return\r\n \r\n # Copy the input\r\n self.fs = frequencies\r\n self.ps = powers\r\n self.dwell = dwell\r\n self.delay = delay", "def set_frequency(self, frequency):\n\n if frequency == 1:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 0)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 0)\n if frequency == 2:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 1)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 0)\n if frequency == 3:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 0)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 1)\n if frequency == 4:\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 0, 1)\n self.__rtcconfig = self.__helper.updatebyte(self.__rtcconfig, 1, 1)\n self.__bus.write_byte_data(\n self.__rtcaddress, self.CONTROL, self.__rtcconfig)\n return", "def single_freq_time(params):\n f = params[0]\n t = params[1]\n Ref_param = params[2] \n Ref = pp.Reflectometer_Output(Ref_param.file_path, [f], [t],\n Ref_param.n_cross_section,\n Ref_param.FWR_dimension, True, \n Ref_param.receiver_file_name)\n return Ref.E_out", "def move_wam_from_current_location(wam_end, duration, frequency=250):\n wam_start = get_wam_joint_coordinates()\n joint_traj, joint_vels = create_joint_trajectory(wam_start, wam_end,\n duration, frequency)\n send_joint_trajectory(joint_traj, joint_vels, frequency)", "def do_wave(l, wave_type, r, g, b, duration, repeat):\n command = create_wave_command(\n wave_type, r, g, b, duration, repeat\n )\n l.write(command)", "def send_to_port(self):\r\n time.sleep(2)\r\n # ser.write(\"R\".encode())\r\n ser.flush()\r\n ser.write(\"{},{},{},{},{}\".format(self.x_Pos, self.y_Pos, self.t_Tap, self.U_on, self.u_off).encode())\r\n # ser.flush()\r\n # while (1 == 1):\r\n # mydata = ser.readline().lstrip()\r\n # print(mydata.decode('utf-8'))\r\n # value = str(mydata)\r", "def set_frequency(self):\r\n def move_synth(delta_f_synth):\r\n sign_delta_f_synth = int(delta_f_synth/abs(delta_f_synth))\r\n stepsize_Hz = int(10)\r\n num_steps = int(abs(delta_f_synth)/stepsize_Hz)\r\n remainder_Hz = round(abs(delta_f_synth)%stepsize_Hz,1)\r\n self.synth.set_incr(stepsize_Hz, 'Hz')\r\n for nn in range(num_steps): # slowly move the synth by delta_f_synth in stepsize steps\r\n self.synth.walk(sign_delta_f_synth)\r\n time.sleep(0.1)\r\n self.synth.set_incr(remainder_Hz, 'Hz')\r\n self.synth.walk(sign_delta_f_synth)\r\n time.sleep(0.1)\r\n \r\n def get_delta_f_synth():\r\n #get latest f_rep,f_0\r\n self.get_frequency() \r\n #calculate required f_rep to get desired PA_freq. switches n and frep in above eq.\r\n f_rep_goal = (self.setfrequency - self.sign_lock * self.f_lock - self.sign_0 * self.f_0) / self.n\r\n # print 'f_rep_goal = %.0f Hz'%f_rep_goal\r\n # lock uses 3rd harmonic so synth must be set to *3\r\n delta_f_synth = (f_rep_goal - self.f_rep)*3 \r\n delta_f_synth = round(delta_f_synth,1)\r\n # print 'delta_f_synth = %.1f Hz'%delta_f_synth\r\n return delta_f_synth\r\n \r\n iteration = 0\r\n delta_f_synth = get_delta_f_synth()\r\n while abs(delta_f_synth) > self.synth_tol:\r\n move_synth(delta_f_synth)\r\n delta_f_synth = get_delta_f_synth()\r\n iteration += 1\r\n if iteration > self.max_iteration:\r\n # print 'REACHED MAX ITERATION: delta_f_synth = %.1f'%delta_f_synth\r\n break", "def note(freq):\n data = np.sin(2.0 * np.pi * freq * t) * amp\n return data", "def start_transmit(self):\n\n # Set publishing rate\n self.r = rospy.Rate(50) # 50Hz\n \n quitting = False\n while not rospy.is_shutdown() and not quitting:\n try:\n # JointState message to publish joint positions\n js_msg = self.build_joint_state_msg()\n \n # PoseStamped messages to publish position and \n # orientation of each joint\n ps_msg = self.build_pose_stamped_msg()\n \n # TODO: TwistStamped messages to publish linear and\n # angular velocities of each joint\n ts_msg = TwistStamped()\n\n # Publish the messages\n self.js_pub.publish(js_msg)\n self.ps_pub.publish(ps_msg)\n\n # TODO: Publish TwistStamped\n # self.ts_pub.publish(ts_msg)\n self.r.sleep()\n self.t += 0.01 # automated tests time var\n \n except KeyboardInterrupt:\n LOG.e(\"KeyboardInterrupt detected\", \"start_transmit\")\n quitting = True\n\n LOG.d(\"Quit command sent to client\", \"main\")\n raise QuitMessageException(\"Quit message received from client\")", "def send_signal():\n print(\"... run {0} transmission\".format(SendSignal.__SIGNAL_SETTINGS['repeats']))\n SendSignal.__SIGNAL_OBJ.RFxmit(SendSignal.__SIGNAL_SETTINGS['text_message'] *\n SendSignal.__SIGNAL_SETTINGS['repeats'])\n print('... set USB Dongle idle')\n SendSignal.__SIGNAL_OBJ.setModeIDLE()", "def set_write_cycle_time(self, osc_freq=32000000):\n self.SPItrans([0xac, 0x5d, 0x00, int((0.000025 * osc_freq) / 64)])\n self._wrt_defined = True", "def send_list(self, frequencies=[1e9,2e9,3e9,4e9], powers=[-10,-5,-2,0], dwell=0, delay=0):\r\n return self._api.send_list(frequencies, powers, dwell, delay)", "def send_list(self, frequencies=[1e9,2e9,3e9,4e9], powers=[-10,-5,-2,0], dwell=0, delay=0):\r\n _debug('api.send_list()')\r\n \r\n # Handle integers or lists for either frequencies or powers\r\n if not _s.fun.is_iterable(frequencies): frequencies = [frequencies]\r\n if not _s.fun.is_iterable(powers): powers = [powers]\r\n \r\n # Handle numpy arrays\r\n if not type(frequencies) == 'list': frequencies = list(frequencies)\r\n if not type(powers) == 'list': powers = list(powers)\r\n \r\n # Handle length-1 arrays:\r\n if len(frequencies) == 1: frequencies = frequencies*len(powers)\r\n if len(powers) == 1: powers = powers *len(frequencies)\r\n \r\n # Poop if the lengths don't match\r\n if not len(frequencies) == len(powers): \r\n print(\"ERROR: Lengths must match!\")\r\n return\r\n \r\n # The anapico, annoyingly, will only send a list if it's not in list mode\r\n original_mode = self.get_mode()\r\n if original_mode=='List': self.set_mode('Fixed')\r\n \r\n # Assemble the long-ass command\r\n command = \"LIST:POW \" \r\n for p in powers: command += str(p)+\",\"\r\n self.write(command)\r\n \r\n # Do the same for the frequencies\r\n command = \"LIST:FREQ \"\r\n for f in frequencies: command += str(f)+\",\" \r\n self.write(command)\r\n \r\n # Set dwell and delay\r\n self.write(\"LIST:DWEL \" + str(dwell))\r\n self.write(\"LIST:DEL \" + str(delay))\r\n \r\n # Set it back if we're supposed to\r\n if original_mode == 'List': self.set_mode('List')", "def transmit_mqtt(form_obj):\n # Print to console for debug\n print(form_obj)\n # Create a message to send\n topic = 'Testdevice/team2_module/RECEIVE'\n send_me = [topic,\n form_obj['sender'],\n form_obj['angle'],\n form_obj['brightness'],\n form_obj['resistance']\n ]\n # debug output to console\n print(\"før!!\")\n print(send_me)\n send_me = str(send_me)\n print(\"Efter!!\")\n\n # Send it\n\n # The donothing callback function\n def donothing(client, userdata, message):\n pass\n\n # Callback on publishing - After handshakes\n def on_publish_callback(client, userdata, mid):\n global sending\n sending = False\n\n # Create client\n publisher = MqttClient(\"Team2ModuleMessageSender\", donothing, on_publish_callback)\n\n # Send and disconnect\n rc = publisher.publish(topic, send_me)\n\n publisher.loop_start()\n global sending\n sending = True\n # Wait for the handshaking to end\n while sending:\n pass\n publisher.loop_stop()\n\n publisher.disconnect()\n\n return rc", "def _send(self, frame):\n \n self.device.write(frame)", "def update_motor_speed(data):\n print('sending new motor power')\n slider_power = json.dumps({\"id\" : \"Motor1\", \"speed\": data})\n SERIAL_PARENT.send(slider_power)\n OUTGOING.append(slider_power)", "def send_list(self, frequencies=[1e9,2e9,3e9,4e9], powers=[-10,-5,-2,0], dwell=1000, delay=0):\r\n _debug('api.send_list()')\r\n \r\n # Handle integers or lists for either frequencies or powers\r\n if not _s.fun.is_iterable(frequencies): frequencies = [frequencies]\r\n if not _s.fun.is_iterable(powers): powers = [powers]\r\n \r\n # Handle numpy arrays\r\n if not type(frequencies) == 'list': frequencies = list(frequencies)\r\n if not type(powers) == 'list': powers = list(powers)\r\n \r\n # Handle length-1 arrays:\r\n if len(frequencies) == 1: frequencies = frequencies*len(powers)\r\n if len(powers) == 1: powers = powers *len(frequencies)\r\n \r\n # Poop if the lengths don't match\r\n if not len(frequencies) == len(powers): \r\n print(\"ERROR: Lengths must match!\")\r\n return\r\n \r\n #The mode switch to Fixed when we write a power and dwell list. \r\n #So I track the initial mode to put it back at the end. \r\n initial_mode = self.get_mode()\r\n \r\n #First choose a list, otherwise SMA100B is mad\r\n #To know the available list, the query is 'SOUR1:LIST:CAT?'\r\n self.write('SOUR1:LIST:SEL \"/var/user/list1.lsw\"') \r\n \r\n #Prepare the strings for the list command\r\n str_freq = 'SOUR1:LIST:FREQ ' + str(frequencies[0]) #String for the frequency list command\r\n str_pow = 'SOUR1:LIST:POW ' + str(powers[0]) #String for the power list command\r\n str_dwell = 'SOUR1:LIST:DWEL:LIST '+str(dwell) #String for the dwell list command\r\n for i in range(1,len(frequencies)):\r\n str_freq += ', ' + str(frequencies[i])\r\n str_pow += ', ' + str(powers[i])\r\n str_dwell += ', '+str(dwell)\r\n \r\n self.write(str_freq)\r\n self.write(str_pow)\r\n self.write(str_dwell)\r\n \r\n #Apparently the SMA change to Fixed mode after the power and the Dwell list is send... \r\n #So I just switch back to the initial mode to make sure we end up in the same state. \r\n self.set_mode(initial_mode)", "def send_list(self, frequencies=[1e9,2e9,3e9,4e9], powers=[-10,-5,-2,0], dwell=1000, delay=0):\r\n _debug('api.send_list()')\r\n \r\n # Handle integers or lists for either frequencies or powers\r\n if not _s.fun.is_iterable(frequencies): frequencies = [frequencies]\r\n if not _s.fun.is_iterable(powers): powers = [powers]\r\n \r\n # Handle numpy arrays\r\n if not type(frequencies) == 'list': frequencies = list(frequencies)\r\n if not type(powers) == 'list': powers = list(powers)\r\n \r\n # Handle length-1 arrays:\r\n if len(frequencies) == 1: frequencies = frequencies*len(powers)\r\n if len(powers) == 1: powers = powers *len(frequencies)\r\n \r\n # Poop if the lengths don't match\r\n if not len(frequencies) == len(powers): \r\n print(\"ERROR: Lengths must match!\")\r\n return\r\n \r\n #The mode switch to Fixed when we write a power and dwell list. \r\n #So I track the initial mode to put it back at the end. \r\n initial_mode = self.get_mode()\r\n \r\n #First choose a list, otherwise SMA100B is mad\r\n #To know the available list, the query is 'SOUR1:LIST:CAT?'\r\n self.write('SOUR1:LIST:SEL \"/var/user/list1.lsw\"') \r\n \r\n #Prepare the strings for the list command\r\n str_freq = 'SOUR1:LIST:FREQ ' + str(frequencies[0]) #String for the frequency list command\r\n str_pow = 'SOUR1:LIST:POW ' + str(powers[0]) #String for the power list command\r\n str_dwell = 'SOUR1:LIST:DWEL:LIST '+str(dwell) #String for the dwell list command\r\n for i in range(1,len(frequencies)):\r\n str_freq += ', ' + str(frequencies[i])\r\n str_pow += ', ' + str(powers[i])\r\n str_dwell += ', '+str(dwell)\r\n \r\n self.write(str_freq)\r\n self.write(str_pow)\r\n self.write(str_dwell)\r\n \r\n #Apparently the SMA change to Fixed mode after the power and the Dwell list is send... \r\n #So I just switch back to the initial mode to make sure we end up in the same state. \r\n self.set_mode(initial_mode)", "def setCarrierFrequency(self, frequency):\n if self._params['modulationMode'] != \"IQMixer\":\n print \"WARNING ! Carrier Frequency change also Tone Frequency in %s mode\" % self._params['modulationMode']\n self._MWSource.setFrequency(frequency)", "def set_frequency(self, f=1e9):\r\n _debug('simq03b_api.set_frequency')\r\n \r\n self.write('SOUR:FREQ:CW '+str(f))", "def set_frequency(self, f=1e9):\r\n self.write('SOUR:FREQ:CW '+str(f))", "def set_frequency(self, f=1e9):\r\n self.write('SOUR:FREQ:CW '+str(f))", "def sendSensors(self,sensors):\n self.broadcaster.sendSensors(sensors)" ]
[ "0.6385109", "0.62068605", "0.6045478", "0.59710425", "0.58719957", "0.5833281", "0.5810495", "0.5604722", "0.55937123", "0.5562863", "0.55398273", "0.55012274", "0.5495552", "0.54938346", "0.5476565", "0.54658544", "0.54509515", "0.54400605", "0.54393345", "0.5429929", "0.5428554", "0.5424191", "0.54230475", "0.5399115", "0.5399115", "0.53847134", "0.53770524", "0.53420067", "0.53420067", "0.53395855" ]
0.643952
0
Create and send a trajectory that's a linear interpolation between wam_start and wam_end that lasts duration seconds send at frequency.
def create_and_send_wam_trajectory(wam_start, wam_end, duration, frequency=250): joint_traj, joint_vels = create_joint_trajectory(wam_start, wam_end, duration, frequency) send_joint_trajectory(joint_traj, joint_vels, frequency)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def move_wam_from_current_location(wam_end, duration, frequency=250):\n wam_start = get_wam_joint_coordinates()\n joint_traj, joint_vels = create_joint_trajectory(wam_start, wam_end,\n duration, frequency)\n send_joint_trajectory(joint_traj, joint_vels, frequency)", "def calculate_delay(self, wav_start, wav_finish, thr_start, thr_finish):\n\n w_s=self.find_nearest_wav(wav_start)\n w_f=self.find_nearest_wav(wav_finish)\n temp=self.pre_proc_data.loc[:,w_s]\n t_start = self.times[(temp.values>thr_start).argmax()]\n print(t_start)\n\n temp2=self.pre_proc_data.loc[:,w_f]\n dx=temp2.diff()\n dx_clean=dx.ewm(span = 50).mean()\n t_finish=self.times[np.min(np.where(dx_clean<thr_finish))]\n print(t_finish)\n\n plt.subplot(211)\n plt.plot(temp,label='{}nm'.format(wav_start))\n plt.axvline(t_finish,color='grey')\n plt.axvline(t_start,color='grey')\n plt.xlim(t_start-30,t_finish+30)\n plt.legend()\n\n plt.subplot(212)\n plt.plot(temp2,label='{}nm'.format(wav_finish))\n plt.axvline(t_finish,color='grey')\n plt.axvline(t_start,color='grey')\n plt.xlim(t_start-30,t_finish+30)\n plt.legend()\n\n plt.show()\n\n self.t_delay=np.round(t_finish-t_start,2)\n return np.round(t_finish-t_start,2)", "def smooth_linear_ramp(t, kinematic_parameters):\n ramp_stage_acceleration = kinematic_parameters[0]\n ramp_start_time = kinematic_parameters[1]\n i_ramp_end_time = kinematic_parameters[2]\n steady_end_time = kinematic_parameters[3]\n end_ramp_end_time = kinematic_parameters[4]\n smooth_factor = kinematic_parameters[5]\n ramp_mode = kinematic_parameters[6]\n ramp_constant_time = kinematic_parameters[7]\n pitch_mode = kinematic_parameters[8]\n pitch_time = kinematic_parameters[9]\n pitch_delay_time_fraction = kinematic_parameters[10]\n pitch_acceleration = kinematic_parameters[11]\n pitch_acc_time_fraction = kinematic_parameters[12]\n section_location = kinematic_parameters[13]\n bstroke = kinematic_parameters[14]\n\n def logcosh(x):\n # s always has real part >= 0\n s = np.sign(x) * x\n p = np.exp(-2 * s)\n return s + np.log1p(p) - np.log(2)\n\n def omega(x):\n \"\"\"linear ramp rotation speed function\"\"\"\n # if ramp_start_time - ramp_constant_time <= x <= end_ramp_end_time + ramp_constant_time:\n # f_t0 = smooth_factor * (x - ramp_start_time)\n # f_t1 = smooth_factor * (x - i_ramp_end_time)\n # if ramp_mode == 'with_end_acc':\n # f_t2 = smooth_factor * (x - steady_end_time)\n # f_t3 = smooth_factor * (x - end_ramp_end_time)\n # elif ramp_mode == 'no_end_acc':\n # f_t2 = smooth_factor * ramp_start_time\n # f_t3 = smooth_factor * i_ramp_end_time\n\n # omegax = (ramp_stage_acceleration / 2) / smooth_factor * (\n # logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) - logcosh(f_t2))\n # else:\n # if bstroke == 'yes' and x <= 2 * (end_ramp_end_time +\n # ramp_constant_time):\n # x -= end_ramp_end_time + ramp_constant_time\n # f_t0 = smooth_factor * (x - ramp_start_time)\n # f_t1 = smooth_factor * (x - i_ramp_end_time)\n # if ramp_mode == 'with_end_acc':\n # f_t2 = smooth_factor * (x - steady_end_time)\n # f_t3 = smooth_factor * (x - end_ramp_end_time)\n # elif ramp_mode == 'no_end_acc':\n # f_t2 = smooth_factor * ramp_start_time\n # f_t3 = smooth_factor * i_ramp_end_time\n\n # omegax = -(ramp_stage_acceleration / 2) / smooth_factor * (\n # logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) -\n # logcosh(f_t2))\n # else:\n # omegax = 0\n\n if bstroke == 'no':\n f_t0 = smooth_factor * (x - ramp_start_time)\n f_t1 = smooth_factor * (x - i_ramp_end_time)\n if ramp_mode == 'with_end_acc':\n f_t2 = smooth_factor * (x - steady_end_time)\n f_t3 = smooth_factor * (x - end_ramp_end_time)\n elif ramp_mode == 'no_end_acc':\n f_t2 = smooth_factor * ramp_start_time\n f_t3 = smooth_factor * i_ramp_end_time\n\n omegax = (ramp_stage_acceleration / 2) / smooth_factor * (\n logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) - logcosh(f_t2))\n\n else:\n if x <= end_ramp_end_time + ramp_constant_time:\n f_t0 = smooth_factor * (x - ramp_start_time)\n f_t1 = smooth_factor * (x - i_ramp_end_time)\n if ramp_mode == 'with_end_acc':\n f_t2 = smooth_factor * (x - steady_end_time)\n f_t3 = smooth_factor * (x - end_ramp_end_time)\n elif ramp_mode == 'no_end_acc':\n f_t2 = smooth_factor * ramp_start_time\n f_t3 = smooth_factor * i_ramp_end_time\n\n omegax = (ramp_stage_acceleration /\n 2) / smooth_factor * (logcosh(f_t0) - logcosh(f_t1) +\n logcosh(f_t3) - logcosh(f_t2))\n\n else:\n x -= end_ramp_end_time + ramp_constant_time\n f_t0 = smooth_factor * (x - ramp_start_time)\n f_t1 = smooth_factor * (x - i_ramp_end_time)\n if ramp_mode == 'with_end_acc':\n f_t2 = smooth_factor * (x - steady_end_time)\n f_t3 = smooth_factor * (x - end_ramp_end_time)\n elif ramp_mode == 'no_end_acc':\n f_t2 = smooth_factor * ramp_start_time\n f_t3 = smooth_factor * i_ramp_end_time\n\n omegax = -(ramp_stage_acceleration / 2) / smooth_factor * (\n logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) -\n logcosh(f_t2))\n\n return omegax\n\n steady_rotation_omega = omega((i_ramp_end_time + steady_end_time) / 2)\n omega_print = steady_rotation_omega * np.pi / 180\n print('steady revolving omega = %s' % omega_print)\n\n dphi_data = []\n for ti in t:\n dphi_data.append(omega(ti))\n dphi_spl = UnivariateSpline(t, dphi_data, s=0)\n\n def ddphi(x):\n \"\"\"flapping angular acceleration function\"\"\"\n return dphi_spl.derivatives(x)[1]\n\n ramp_angle = dphi_spl.integral(0, i_ramp_end_time)\n print('initial linear ramp angle = %s' % ramp_angle)\n\n if ramp_mode == 'with_end_acc':\n end_ramp_angle = dphi_spl.integral(\n steady_end_time, end_ramp_end_time + ramp_constant_time)\n print('end linear ramp angle = %s' % end_ramp_angle)\n\n stroke_angle = dphi_spl.integral(0, end_ramp_end_time + ramp_constant_time)\n st_dist = np.abs(stroke_angle) * np.pi / 180 * section_location\n print('2d wing travel distance = %s' % st_dist)\n\n def phi(x):\n \"\"\"rotation angle function\"\"\"\n return dphi_spl.integral(0, x)\n\n #--pitching motion functions--\n if pitch_mode == 'with_end_pitch':\n pitch_delay_time = (pitch_time +\n 2 * ramp_constant_time) * pitch_delay_time_fraction\n pitch_acc_time = pitch_time * pitch_acc_time_fraction / 2\n\n pitch_start_time = end_ramp_end_time - pitch_time + pitch_delay_time\n p_acc_end_time = pitch_start_time + pitch_acc_time\n pitch_end_time = pitch_start_time + pitch_time\n p_decc_start_time = pitch_end_time - pitch_acc_time\n\n def dalf(x):\n \"\"\"linear ramp pitch speed function\"\"\"\n # if pitch_start_time - ramp_constant_time <= x <= pitch_end_time + ramp_constant_time:\n # f_t0 = smooth_factor * (x - pitch_start_time)\n # f_t1 = smooth_factor * (x - p_acc_end_time)\n # f_t2 = smooth_factor * (x - p_decc_start_time)\n # f_t3 = smooth_factor * (x - pitch_end_time)\n\n # dalfx = (pitch_acceleration /\n # 2) / smooth_factor * (logcosh(f_t0) - logcosh(f_t1) +\n # logcosh(f_t3) - logcosh(f_t2))\n # else:\n # dalfx = 0\n f_t0 = smooth_factor * (x - pitch_start_time)\n f_t1 = smooth_factor * (x - p_acc_end_time)\n f_t2 = smooth_factor * (x - p_decc_start_time)\n f_t3 = smooth_factor * (x - pitch_end_time)\n\n dalfx = (pitch_acceleration / 2) / smooth_factor * (\n logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) - logcosh(f_t2))\n return dalfx\n\n dalf_data = []\n for ti in t:\n dalf_data.append(dalf(ti))\n dalf_spl = UnivariateSpline(t, dalf_data, s=0)\n\n pitch_angle = dalf_spl.integral(pitch_start_time - ramp_constant_time,\n pitch_end_time + ramp_constant_time)\n\n print('wing pitch angle = %s' % np.abs(pitch_angle))\n\n steady_pitching_omega = dalf((pitch_start_time + pitch_end_time) / 2)\n omega_print = steady_pitching_omega * np.pi / 180\n print('steady wing pitch omega = %s\\n' % omega_print)\n\n def ddalf(x):\n \"\"\"flapping angular acceleration function\"\"\"\n return dalf_spl.derivatives(x)[1]\n\n def alf(x):\n \"\"\"rotation angle function\"\"\"\n return dalf_spl.integral(0, x)\n\n kinematic_angles = []\n for ti in t:\n if pitch_mode == 'no_end_pitch':\n kinematic_anglesi = [-phi(ti), 0, -omega(ti), 0, -ddphi(ti), 0]\n elif pitch_mode == 'with_end_pitch':\n kinematic_anglesi = [\n -phi(ti), -alf(ti), -omega(ti), -dalf(ti), -ddphi(ti),\n -ddalf(ti)\n ]\n kinematic_angles.append(kinematic_anglesi)\n\n return kinematic_angles", "def send_speed(self, linear_speed, angular_speed):\n ### Makes a new Twist message\n msg_cmd_vel = Twist()\n \t# Linear velocity\n \tmsg_cmd_vel.linear.x = linear_speed\n \tmsg_cmd_vel.linear.y = 0.0\n \tmsg_cmd_vel.linear.z = 0.0\n \t# Angular velocity\n \tmsg_cmd_vel.angular.x = 0.0\n \tmsg_cmd_vel.angular.y = 0.0\n \tmsg_cmd_vel.angular.z = angular_speed\n ### Publishes the message\n self.cmd_vel.publish(msg_cmd_vel)", "def generate_linear_trace(self, min_queries, min_duration, qps):\n timestamp = 0\n arrival = []\n timestep = 1 / qps\n while timestamp < min_duration and len(arrival) < min_queries:\n timestamp += timestep\n arrival.append(timestep)\n self.arrival = arrival", "def send_joint_trajectory(trajectory, velocities, frequency=250):\n pub = rospy.Publisher(\"/wam/jnt_pos_cmd\", RTJointPos, queue_size=10)\n #If wam_node is running, it will be connected to this publisher.\n #Mostly this loop is here because you want to make sure the publisher\n #gets set up before it starts sending information.\n while pub.get_num_connections() < 1:\n print \"Waiting on the publisher to go up.\"\n rospy.sleep(0.5)\n\n trajectory_length = trajectory.__len__()\n finished = False\n traj_row = 0\n message_for_service = RTJointPos()\n\n r = rospy.Rate(frequency)\n\n while not rospy.is_shutdown() and not finished:\n message_for_service.joints = trajectory[traj_row]\n message_for_service.rate_limits = velocities[traj_row]\n traj_row += 1\n pub.publish(message_for_service)\n if traj_row == trajectory_length - 1:\n finished = True\n r.sleep()", "def morletft(s, w, w0, dt):\n \n p = 0.75112554446494251 # pi**(-1.0/4.0)\n wavelet = np.zeros((s.shape[0], w.shape[0]))\n pos = w > 0\n\n for i in range(s.shape[0]):\n n = normalization(s[i], dt)\n wavelet[i][pos] = n * p * np.exp(-(s[i] * w[pos] - w0)**2 / 2.0)\n \n return wavelet", "def surf_tts(distance, start_time):\n deltas = np.arange(0., 140., 5.)\n tts = 60. * np.array(\n [0., 2., 4., 6.2, 8.4, 11., 13., 15.2, 17.8, 19.4, 22., 24.1, 26.6,\n 28.6, 30.8, 33., 35.6, 37.4, 39.8, 42., 44.2, 46.4, 48.8, 50.9, 53.6,\n 55.2, 57.8, 60.])\n (mval, nval) = np.polyfit(deltas, tts, 1)\n # calculate surface wave travel times for degrees 1 to 180 ?\n surftts = mval * np.arange(0., 180.1, 0.01)\n difer = []\n for i4 in xrange(0, len(surftts)):\n dife_r = abs(0.001 * distance / 111.11 - np.arange(0., 180.1, 0.01)\n [i4])\n difer.append(dife_r)\n # love wave arrival: event time + surftts for closest degree??\n # (smallest difference between distance for surftts and actual distance of\n # event)\n arriv_lov = np.floor(start_time + surftts[np.asarray(difer).argmin()])\n diferans = []\n for i1 in xrange(len(deltas)):\n dif2 = abs(np.arange(0., 180.1, 0.01)[np.asarray(difer).argmin()] -\n deltas[i1])\n diferans.append(dif2)\n # arrival = love wave arrival - p arrival?\n peq = surftts[np.asarray(difer).argmin()] - \\\n tts[np.asarray(diferans).argmin()]\n arrival = arriv_lov + peq\n\n return arrival", "def generate_fire_recurrence(self):\r\n \r\n self.time_to_next_fire = round(weibullvariate(self.scale_parameter, self.shape_parameter),2)\r\n return self.time_to_next_fire", "def syns(alpha=0.1, rate=10, delay=0, dur=50, amp=1.0, dt=0.020, N=1, mindur = 120, makewave=True):\n deadtime = 0.7\n if dur + delay < mindur:\n tvec = np.arange(0.0, mindur , dt)\n else:\n tvec = np.arange(0.0, dur+delay , dt)\n npts = len(tvec)\n ta = np.arange(0.0, 20.0, dt)\n aw = ta * alpha* np.exp(-ta/alpha)/alpha # alpha waveform time course\n spt = [[]]*N # list of spike times\n wave = np.array([]) # waveform\n sptime=[]\n for j in range(0,N):\n done = False\n t=0.0\n nsp = 0\n while not done:\n a = np.random.sample(1)\n if t < delay:\n t = delay\n continue\n if t >= delay and t <= (delay+dur):\n ti = -np.log(a)/(rate/1000.0) # convert to exponential distribution with rate\n if ti < deadtime:\n continue\n t = t + ti # running time\n if t > delay+dur:\n done = True\n continue\n if nsp is 0:\n sptime = t\n nsp = nsp+1\n else:\n sptime = np.append(sptime, t)\n nsp = nsp+1\n if j is 0:\n wavej = np.zeros(len(tvec))\n for i in range(0,len(sptime)):\n st = int(sptime[i]/dt)\n wavej[st] = wavej[st] + 1\n spt[j] = sptime\n\n if makewave:\n w = np.convolve(wavej, aw/max(aw))*amp\n if len(w) < npts:\n w = np.append(w, np.zeros(npts-len(w)))\n if len(w) > npts:\n w = w[0:npts]\n if j is 0:\n wave = w\n else:\n wave = wave + w\n return (spt, wave, tvec, N)", "def step(amplitude, t_stop):\n times = np.array([0, t_stop/10, t_stop])\n amps = np.array([0, amplitude, amplitude])\n return times, amps", "def publish(self):\n msg = PolynomialTrajectory()\n\n if self.trajectory_constructed == True: \n t = time.time()-self.traj_t0\n\n if t <= self.seg_times[-1]: \n segment = map(lambda x:x > t, self.seg_times).index(True)\n else: \n segment = self.no_of_segments; t = self.seg_times[-1]\n \n rospy.loginfo('the value of t and segment is:%f, %d', t, segment)\n #rospy.loginfo('segment times are:%f, %f, %f, %f, %f, %f', self.segment_times[0], \\\n #self.segment_times[1], self.segment_times[2], self.segment_times[3], self.segment_times[4], self.segment_times[5])\n p1 = self.pc_x; p2 = self.pc_y; p3 = self.pc_z\n N = self.N+1\n p1 = [p1[i:i + N] for i in range(0, len(p1), N)]\n [i.reverse() for i in p1]\n \n p2 = [p2[i:i + N] for i in range(0, len(p2), N)]\n [i.reverse() for i in p2]\n \n p3 = [p3[i:i + N] for i in range(0, len(p3), N)]\n [i.reverse() for i in p3]\n xx = np.poly1d(p1[segment-1]); vx = np.polyder(xx, 1); ax = np.polyder(xx, 2)\n yy = np.poly1d(p2[segment-1]); vy = np.polyder(yy, 1); ay = np.polyder(yy, 2)\n zz = np.poly1d(p3[segment-1]); vz = np.polyder(zz, 1); az = np.polyder(zz, 2)\n \n msg.header.stamp = rospy.Time.now()\n msg.pdes.x = xx(t); msg.pdes.y = yy(t); msg.pdes.z = zz(t)\n msg.vdes.x = vx(t); msg.vdes.y = vy(t); msg.ades.z = vz(t)\n msg.ades.x = ax(t); msg.ades.y = ay(t); msg.ades.z = az(t)\n msg.ddes.x = 1; msg.ddes.y = 0; msg.ddes.z = 0\n msg.controller = 0\n \n self.pub.publish(msg)\n\n else: \n rospy.loginfo('no goal is published yet')", "def make_trajectory(self, NextwpPosition, NextwpOrientation):\n d = np.linalg.norm(self.CurrentPosition - NextwpPosition)\n inter_segment_distance = 1\n self.no_of_segments = 1+int(d//inter_segment_distance)\n \n\n # enter sequence of waypoints: no of points should be self.no_of_segments+1\n x_wp = np.linspace(self.CurrentPosition[0], NextwpPosition[0], self.no_of_segments+1)\n y_wp = np.linspace(self.CurrentPosition[1], NextwpPosition[1], self.no_of_segments+1)\n z_wp = np.linspace(self.CurrentPosition[2], NextwpPosition[2], self.no_of_segments+1)\n \n # add intial and final condiions vel, acc, jerk\n x_ic = np.array([0, 0, 0])\n x_fc = np.array([0, 0, 0])\n x0 = np.array([x_wp[0], x_ic[0], x_ic[1], x_ic[2]])\n xT = np.array([x_wp[-1], x_fc[0], x_fc[1], x_fc[2]])\n\n y_ic = np.array([0, 0, 0])\n y_fc = np.array([0, 0, 0])\n y0 = np.array([y_wp[0], y_ic[0], y_ic[1], y_ic[2]])\n yT = np.array([y_wp[-1], y_fc[0], y_fc[1], y_fc[2]])\n \n z_ic = np.array([0, 0, 0])\n z_fc = np.array([0, 0, 0])\n z0 = np.array([z_wp[0], z_ic[0], z_ic[1], z_ic[2]])\n zT = np.array([z_wp[-1], z_fc[0], z_fc[1], z_fc[2]])\n\n path = [np.sqrt((x_wp[i]-x_wp[i-1])**2 + (y_wp[i]-y_wp[i-1])**2 + (z_wp[i]-z_wp[i-1])**2) for i in range(1, self.no_of_segments+1, 1)]\n\n \n T = []; T.insert(0, 0)\n T.insert(1, T[-1] + path[0]/self.reduced_speed)\n for i in range(1, len(path)-1, 1):\n T.append(T[-1] + path[i]/self.average_speed)\n T.insert(len(T)+1, T[-1]+path[-1]/self.reduced_speed) \n\n\n\n\n #T = []; T.insert(0, 0) # insert 0 at 0 position\n #for i in range(self.no_of_segments): \n # T.append(T[-1]+path[i]/self.average_speed)\n\n r = self.r\n N = 1 + self.N # because number of terms in a polynomial = degree+1\n\n QQ = []; AA_inv = []\n\n for i in range(self.no_of_segments): \n q = self.construct_Q(N, r, T[i], T[i+1])\n a = self.construct_A(N, r, T[i], T[i+1])\n a_inv = scipy.linalg.pinv(a)\n QQ = block_diag(QQ, q)\n AA_inv = block_diag(AA_inv, a_inv)\n \n order = 2*r*self.no_of_segments\n R = np.dot(AA_inv.T, np.dot(QQ, AA_inv))\n \n bx = self.construct_b(x0, xT)\n by = self.construct_b(y0, yT)\n bz = self.construct_b(z0, zT)\n\n m = Model(\"qp\")\n order = 2*r*self.no_of_segments\n dx = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dx\")\n dy = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dy\") \n dz = m.addVars(order, lb = -GRB.INFINITY, ub = GRB.INFINITY, vtype=GRB.CONTINUOUS, name=\"dz\") \n\n # making objective using quicksum, takes a lot of time \n #obj1 = quicksum(dx[i] * quicksum(R[i][j] * dx[j] for j in range(order)) for i in range(order))\n #obj2 = quicksum(dy[i] * quicksum(R[i][j] * dy[j] for j in range(order)) for i in range(order))\n #obj3 = quicksum(dz[i] * quicksum(R[i][j] * dz[j] for j in range(order)) for i in range(order))\n \n # using LinExpr for the second expression is significantly faster \n obj1 = quicksum(dx[i] * LinExpr([(R[i][j], dx[j]) for j in range(order)]) for i in range(order))\n obj2 = quicksum(dy[i] * LinExpr([(R[i][j], dy[j]) for j in range(order)]) for i in range(order))\n obj3 = quicksum(dz[i] * LinExpr([(R[i][j], dz[j]) for j in range(order)]) for i in range(order))\n obj = obj1 + obj2 + obj3\n j = 0\n for i in range(order): \n if i < r: \n m.addConstr(dx[i] == bx[i])\n m.addConstr(dy[i] == by[i])\n m.addConstr(dz[i] == bz[i])\n elif i >= order-r: \n m.addConstr(dx[i] == bx[r+j])\n m.addConstr(dy[i] == by[r+j])\n m.addConstr(dz[i] == bz[r+j])\n j += 1\n \n c = 1 # counter\n for i in range(r, order-2*r, 2*r): \n #m.addConstr(dx[i] == self.x_wp[c])\n #m.addConstr(dy[i] == self.y_wp[c])\n #m.addConstr(dz[i] == self.z_wp[c])\n m.addConstr(dx[i] <= x_wp[c] + 0.2)\n m.addConstr(dx[i] >= x_wp[c] - 0.2)\n m.addConstr(dy[i] <= y_wp[c] + 0.2)\n m.addConstr(dy[i] >= y_wp[c] - 0.2)\n m.addConstr(dz[i] <= z_wp[c] + 0.2)\n m.addConstr(dz[i] >= z_wp[c] - 0.2)\n c = c+1\n for j in range(r): \n m.addConstr(dx[i+j] == dx[i+j+r])\n m.addConstr(dy[i+j] == dy[i+j+r])\n m.addConstr(dz[i+j] == dz[i+j+r])\n #if j ==2: \n # m.addConstr(dx[i+j] == 2.0)\n\n m.setObjective(obj, GRB.MINIMIZE)\n #m.write('model.lp')\n m.setParam('OutputFlag', 0)\n m.setParam('PSDtol', 1e-1)\n m.optimize()\n\n\n runtime = m.Runtime\n\n\n x_coeff = [dx[i].X for i in range(order)]\n y_coeff = [dy[i].X for i in range(order)]\n z_coeff = [dz[i].X for i in range(order)]\n\n Dx = np.asarray(x_coeff)[np.newaxis].T\n Dy = np.asarray(y_coeff)[np.newaxis].T \n Dz = np.asarray(z_coeff)[np.newaxis].T \n pcx = np.dot(AA_inv, Dx); pcy = np.dot(AA_inv, Dy); pcz = np.dot(AA_inv, Dz)\n\n\n poly_coeff_x = pcx.T.ravel().tolist()\n poly_coeff_y = pcy.T.ravel().tolist()\n poly_coeff_z = pcz.T.ravel().tolist()\n\n return poly_coeff_x, poly_coeff_y, poly_coeff_z, T, time.time()\n #self.publish(poly_coeff_x, poly_coeff_y, poly_coeff_z)", "def target(self, time, points, dt, num_way):\n start_index = min(int(time / dt), num_way - 1)\n end_index = min(start_index + 1, num_way - 1)\n start_point = points[start_index]\n end_point = points[end_index]\n fraction = float(time % dt) / dt\n return linear_interpolation_two_points(start_point, end_point, fraction).reshape(3)", "def communication_delay(self, begin, end):\n\n duration, path = self.movement_model.shortest_distance(begin, end)\n path_clusters = self.count_clusters(path)\n\n segment_speed_pairs = list()\n path_index = 0\n last_segment = None\n for path_cluster in path_clusters:\n segments = list()\n if last_segment:\n segments.append(last_segment)\n\n while path[path_index] in path_cluster.tour.objects:\n segments.append(path[path_index])\n last_segment = path[path_index]\n\n path_index += 1\n if path_index >= len(path):\n break\n\n segment_speed_pairs.append((path_cluster.mdc_speed, segments))\n\n travel_delay = 0. # * pq.second\n for speed, segments in segment_speed_pairs:\n cluster_distance = 0 # * pq.meter\n start_segment = segments[0]\n for end_segment in segments[1:]:\n distance = np.linalg.norm(\n start_segment.location.nd - end_segment.location.nd)\n cluster_distance += distance\n\n travel_delay += cluster_distance / speed\n\n transmission_delay = len(path_clusters)\n transmission_delay *= data.segment_volume(begin, end, self.env)\n transmission_delay /= self.env.comms_rate\n\n relay_delay = self.holding_time(path_clusters[1:])\n\n total_delay = travel_delay + transmission_delay + relay_delay\n return total_delay", "def make_signal(self, waveform):\n\n #print >> sys.stdout, \"generating signal...\"\n\n # --- Set up timing\n\n # index of the absolute maximum peak\n #idx = np.concatenate(np.argwhere(abs(waveform.hplus.data.data)>0))[0]\n idx = np.argmax(abs(waveform.hplus.data))\n\n # Epoch = GPS start of time series. Want the peak time of the waveform\n # to be aligned to the geocenter, so set the epoch to the geocentric\n # peak time minus the time to the waveform peak. In other words:\n # (waveform epoch) = (geocentric peak time) - (# of seconds to peak)\n\n hplus_epoch = self.ext_params.geocent_peak_time - idx*waveform.hplus.delta_t\n hcross_epoch = self.ext_params.geocent_peak_time - idx*waveform.hcross.delta_t\n\n # XXX: create regular lal timeseries objects for this bit (may replace\n # with pycbc injection routines later)\n\n hplus = lal.CreateREAL8TimeSeries('hplus', hplus_epoch, 0,\n waveform.hplus.delta_t, lal.StrainUnit,\n int(waveform.hplus.duration / waveform.hplus.delta_t))\n hplus.data.data = np.array(waveform.hplus.data)\n\n hcross = lal.CreateREAL8TimeSeries('hcross', hcross_epoch, 0,\n waveform.hcross.delta_t, lal.StrainUnit,\n int(waveform.hcross.duration / waveform.hcross.delta_t))\n hcross.data.data = np.array(waveform.hcross.data)\n\n\n if self.taper is True:\n\n print >> sys.stderr, \"Warning: tapering out inspiral (not a realistic strategy)\"\n delay = 0.0e-3\n idx = np.argmax(hplus.data.data) + \\\n np.ceil(delay/self.delta_t)\n hplus.data.data[0:idx]=0.0\n hcross.data.data[0:idx]=0.0\n lalsim.SimInspiralREAL8WaveTaper(hplus.data,\n lalsim.SIM_INSPIRAL_TAPER_START)\n lalsim.SimInspiralREAL8WaveTaper(hcross.data,\n lalsim.SIM_INSPIRAL_TAPER_START)\n\n\n # Scale for distance (waveforms extracted at 20 Mpc)\n hplus.data.data *= 20.0 / self.ext_params.distance\n hcross.data.data *= 20.0 / self.ext_params.distance\n\n tmp = lalsim.SimDetectorStrainREAL8TimeSeries(hplus, hcross,\n self.ext_params.ra, self.ext_params.dec,\n self.ext_params.polarization, self.det_site) \n\n # Pad the end so we have the same length signal and noise (useful for\n # snr and psds)\n sigdata = np.zeros(len(self.td_noise))\n sigdata[:len(tmp.data.data)] = np.copy(tmp.data.data)\n\n # Project waveform onto these extrinsic parameters\n self.td_signal = \\\n pycbc.types.timeseries.TimeSeries(initial_array=sigdata,\n delta_t=tmp.deltaT, epoch=tmp.epoch)\n\n del tmp\n\n # Remove extraneous data\n #self.td_signal = self.td_signal.trim_zeros()", "def linear_spline_interpolation(q_, t_, m = 100):\n n = q_.shape[0]\n dof = q_.shape[1]\n\n t_ = t_ / m\n timesteps = np.linspace(0, 1, num = m)\n\n a = 100\n time_segments = np.zeros((n, dof))\n blend_times = np.zeros((n, dof))\n velocities = np.zeros((n, dof))\n accelerations = np.zeros((n, dof))\n\n # Initial time segment\n accelerations[0] = np.sign(q_[1] - q_[0]) * a\n blend_times[0] = t_[0] - np.sqrt(\n t_[0] * t_[0] - 2 * (q_[1] - q_[0]) / accelerations[0])\n velocities[0] = (q_[1] - q_[0]) / (t_[0] - 0.5 * blend_times[0])\n\n # Final time segment\n accelerations[n - 1] = np.sign(q_[n - 2] - q_[n - 1]) * a\n blend_times[n - 1] = t_[n - 2] - np.sqrt(\n t_[n - 2] * t_[n - 2] + 2 * (q_[n - 1] - q_[n - 2]) / accelerations[n - 1])\n velocities[n - 2] = (q_[n - 1] - q_[n - 2]) / (t_[n - 2] - 0.5 * blend_times[n - 1])\n velocities[n - 1] = 0\n\n # Loop for velocities\n for i in range(1, n - 2):\n velocities[i] = (q_[i + 1] - q_[i]) / t_[i]\n\n # Loop for accelerations and blend times\n for i in range(1, n - 1):\n accelerations[i] = np.sign(velocities[i] - velocities[i - 1]) * a\n blend_times[i] = (velocities[i] - velocities[i - 1]) / accelerations[i]\n\n # Calculate time segments\n time_segments[0] = t_[0] - blend_times[0] - 0.5 * blend_times[1]\n time_segments[n - 2] = t_[n - 2] - blend_times[n - 1] - 0.5 * blend_times[n - 2]\n time_segments[n - 1] = 0\n for i in range(1, n - 2):\n time_segments[i] = t_[i] - 0.5 * blend_times[i + 1] - 0.5 * blend_times[i]\n\n \n # Calculate Trajectories\n q = np.zeros((dof, m))\n qd = np.zeros((dof, m))\n qdd = np.zeros((dof, m))\n\n # Loop for each degree of freedom\n for d in range(dof):\n # j for using above parameters\n # previous_i for saving i of start of a parabola segment\n # previous_ii for saving i of start of a linear segment\n j = 0\n previous_i = 0\n previous_ii = 0\n\n # Loop over the timesteps\n for i in range(len(timesteps)):\n t = timesteps[i] - timesteps[previous_i]\n\n # If t is in the parabola range\n if t <= blend_times[j][d]:\n a = accelerations[j][d]\n\n qdd[d, i] = a\n qd[d, i] = qd[d, previous_i] + a * t\n\n if i == 0:\n q[d, i] = q_[0][d] + 0.5 * a * t * t\n else:\n q[d, i] = q[d, previous_i] + qd[d, previous_i] * t + 0.5 * a * t * t\n\n previous_ii = i\n\n # If t is in the linear range\n elif t <= blend_times[j][d] + time_segments[j][d]:\n t = timesteps[i] - timesteps[previous_ii]\n v = velocities[j][d]\n\n qdd[d, i] = 0\n qd[d, i] = v\n q[d, i] = q[d, previous_ii] + v * t\n\n # If t has crossed the parabola plus the linear range\n else:\n previous_i = i - 1\n j += 1\n\n t = timesteps[i] - timesteps[previous_i]\n\n # Break loop if parameter exceeded\n if j == len(accelerations):\n break\n\n a = accelerations[j][d]\n v = qd[d, previous_i]\n\n qdd[d, i] = a\n qd[d, i] = v + a * t\n q[d, i] = q[d, previous_i] + v * t + 0.5 * a * t * t\n\n previous_ii = i\n\n # Loop over remaining timesteps\n while i < len(timesteps):\n a = accelerations[j - 1][d]\n v = velocities[j - 1][d]\n\n qdd[d, i] = a\n qd[d, i] = v + a * t\n q[d, i] = q[d, previous_i] + v * t + 0.5 * a * t * t\n\n i += 1\n\n return q, qd, qdd", "def create_spectral_bandpass_interpol(interpol_wavelen, interpol_rad, center_wvl,\n save_dir):\n\n save_dir = os.path.join(save_dir, r'look_up_table')\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n\n center_wvl1 = np.arange(min(center_wvl), max(center_wvl), 2)\n\n\n\n\n for j in np.arange(0, interpol_wavelen.shape[1]):\n #print(j)\n dframe = pd.DataFrame()\n wavelen = interpol_wavelen[:, j]\n\n radiance = interpol_rad[:, j]\n sampled_wvl = np.arange(min(wavelen), max(wavelen), 0.01)\n fit_params = interp1d(wavelen, radiance, kind='slinear')\n fitted_val = fit_params(sampled_wvl)\n #peak_val = np.where(fitted_val==max(fitted_val))[0]\n #print(peak_val)\n #peak_shift = sampled_wvl[peak_val] - CW1[j]\n\n\n# if peak_shift >0:\n# sampled_wvl = sampled_wvl - peak_shift\n# elif peak_shift <0:\n# sampled_wvl = sampled_wvl + peak_shift\n# else:\n# sampled_wvl = sampled_wvl\n#\n# print(sampled_wvl[peak_val] - CW1[j])\n\n dframe['Wavelength'] = sampled_wvl\n dframe['Radiance'] = fitted_val\n dframe.round(4).to_csv(save_dir + '/' + 'bandpass_' + \\\n str(round(center_wvl1[j], 2))+'_nm.csv')\n plt.plot(sampled_wvl, fitted_val/np.max(fitted_val), 'g.--')\n plt.grid(True, linestyle=':')\n plt.xlabel('Wavelength (nm)')\n plt.ylabel('Normalized Spectral Response')\n plt.title('TEMPO Spectral Bandpass (WL = ' + str(round(center_wvl1[j], 2)) + ' nm)')\n plt.ylim(0, 1.1)\n plt.xlim(min(wavelen), max(wavelen))\n #plt.show()\n\n # Now let us save the spectral bandpass data and spectral bandpass plot\n plt.savefig(save_dir + '/' + 'bandpass_' + str(round(center_wvl1[j], 2))+'_nm.png',\n dpi=100)\n plt.close('all')", "def __init__(self, start_w: float = 280.0, stop_w: float = 4000.0, spectra: str = \"AM1.5G\"):\n super(PowerSpectrum).__init__(start_w, stop_w, spectra)\n self.spectrum[:, 1] = self.spectrum[:, 1] * (self.spectrum[:, 0] * 1e-9 / (constants.c * constants.h))\n self.interp = interpolate.interp1d(self.spectrum[:, 0], self.spectrum[:, 1])", "def generate_fire_recurrence(self):\n\n self.time_to_next_fire = round(weibullvariate(self.scale_parameter, self.shape_parameter),2)\n return self.time_to_next_fire", "def __init__(self, start_w: float = 280.0, stop_w: float = 4000.0, spectra: str = \"AM1.5G\"):\n super(PhotocurrentSpectrum).__init__(start_w, stop_w, spectra)\n self.spectrum[:, 1] *= constants.e\n self.interp = interpolate.interp1d(self.spectrum[:, 0], self.spectrum[:, 1])", "def create_joint_trajectory(start_position, end_position,\n duration_of_trajectory, frequency_of_trajectory):\n\n frequency_of_ros_messages = frequency_of_trajectory # in Hz.\n number_of_way_points = duration_of_trajectory * frequency_of_ros_messages\n number_of_joints = start_position.__len__()\n trajectory = np.zeros((number_of_joints, number_of_way_points))\n\n for i in xrange(number_of_joints):\n trajectory[i] = np.linspace(start_position[i], end_position[i],\n number_of_way_points)\n trajectory = trajectory.T.copy()\n vel_lims = np.diff(trajectory, axis=0)\n #Because this is discrete differentiation,\n # the last value is missing: len(vel_lims) = len(trajectory) - 1\n # so we just repeat the last calculated velocity.\n vel_lims = np.append(vel_lims, [[x for x in vel_lims[-1,:]]], axis = 0)\n vel_lims = vel_lims * frequency_of_trajectory\n vel_lims = np.absolute(vel_lims)\n\n if vel_lims.all() > 1.0:\n raise ValueError(\"One or more of the values in the specified velocities\"\n \"Exceed 1 rad / second. The robot won't like this.\"\n \"Adjust the trajectory so that each point can be \"\n \"reached without exceeding this limit.\")\n return trajectory, vel_lims", "def train(self):\n \n for demo_traj in self._demo_trajs:\n\n interpolate = interp1d(self._phase._z, demo_traj, kind='cubic')\n\n #strech the trajectory to fit 0 to 1\n stretched_demo = interpolate(self._phase._z)[None,:]\n\n #compute the weights of the trajectory using the basis function\n w_demo_traj = np.dot(np.linalg.inv(np.dot(self._Phi, self._Phi.T) + 1e-12*np.eye(self._n_bfs) ), np.dot(self._Phi, stretched_demo.T)).T # weights for each trajectory\n \n #append the weights to the list\n self._W.append(w_demo_traj.copy())\n\n self._W = np.asarray(self._W).squeeze()\n \n # mean of weights\n self._mean_W = np.mean(self._W, axis=0)\n \n # covariance of weights\n # w1 = np.array(map(lambda x: x - self._mean_W.T, self._W))\n # self._sigma_W = np.dot(w1.T, w1)/self._W.shape[0]\n\n self._sigma_W = np.cov(self._W.T)", "def temporal_smooth(s, sample_rate, tau, hwinlen=20):\n\n t = np.arange(-hwinlen, hwinlen+1) / sample_rate\n w = np.exp(-t**2 / tau)\n w /= w.sum()\n return convolve1d(s, w)", "def delay_times_linear(min_t, max_t, step_size):\n return np.flip(np.arange(max_t, min_t - step_size, -step_size))", "def calculate_wind_delay(self):\n\n # Pull function variables off of the dictionaries to make the\n # following lines shorter. Also, the keys on the input\n # dictionary have units but the lines below do not have units in\n # the variable names.\n start_delay = self.input_dict['start_delay_hours']\n mission_time = self.input_dict['mission_time_hours']\n critical_wind_speed = self.input_dict['critical_wind_speed_m_per_s']\n wind_height_of_interest_m = self.input_dict['wind_height_of_interest_m']\n wind_shear_exponent = self.input_dict['wind_shear_exponent']\n weather_window = self.input_dict['weather_window']\n\n # Extract only the 'Speed m per s' as a dataframe, and only retain\n # elements where index is > start_delay and < mission_time\n wind_speeds_m_s = weather_window['Speed m per s'].values\n # check if mission time exceeds size of weather window\n if mission_time > len(wind_speeds_m_s):\n raise ValueError('{}: Error: Mission time longer than weather window'.format(type(self).__name__))\n wind_speeds_m_s_filtered = wind_speeds_m_s[(start_delay + 1):(int(mission_time) + 1)]\n\n # Calculate the wind speed at the particular, given the wind shear exponent\n wind_speed_at_height_m_s = wind_speeds_m_s_filtered * (wind_height_of_interest_m / 100) ** wind_shear_exponent\n\n # wind_delays is an array of booleans. It is True if the critical\n # wind speed is exceeded. False if the critical wind speed is not\n # exceeded. Each element represents an hour of wind\n wind_delays = wind_speed_at_height_m_s > critical_wind_speed\n\n # If there are any wind delays found:\n #\n # Iterate over each of the wind_delays. Count for contiguous blocks of\n # hours of wind delays. Add the length of each of these blocks to the\n # delay_durations.\n #\n # This code snippet takes O(n) linear time. The trick in vectorizing it\n # is that contiguous blocks of unknown length of True wind delays\n # are needed to find durations. But linear time may not be a problem\n # because the data we are iterating over has already been filtered down\n # to a small set.\n\n if np.any(wind_delays):\n\n # Holds the list of delay durations\n delay_durations = []\n\n # As we go through each delay, this accumulates the duration of\n # the current delay. Each True element of wind_delays increments\n # this by one\n current_delay_duration = 0\n\n # The following variable is True if we are iterating through\n # that are True, which means a weather delay.\n iterating_through_wind_delay = False\n\n # Iterate over each element of wind_delays. This is O(n) as\n # noted above.\n for wind_delay in np.nditer(wind_delays):\n # If we are in a weather delay...\n if wind_delay:\n # If we are not currently iterating over a weather delay\n # a new continuous sequence of delayed hours has started.\n if not iterating_through_wind_delay:\n current_delay_duration = 1\n iterating_through_wind_delay = True\n\n # While iterating over delay, increment counter.\n else:\n current_delay_duration += 1\n\n # If we are NOT iterating through a wind delay\n # And we were iterating through a wind delay, end that\n # delay and record the duration.\n elif iterating_through_wind_delay:\n delay_durations.append(current_delay_duration)\n iterating_through_wind_delay = False\n\n # Otherwise we are not iterating through a wind delay and\n # and did not finish a wind delay, so we do nothing with\n # no need for an else.\n\n # Finally return the durations we found.\n return delay_durations\n\n # If there are not wind delays, return a list with just 0 in it\n else:\n return [0]", "def __init__(self, start_w: float = 280.0, stop_w: float = 4000.0, spectra: str = \"AM1.5G\"):\n # the first column should be the wavelength in nanometers, the second is the tilt power density/nm in\n # W/(m**2 nm) = J s^-1 m^-2 nm^-1 = C V m^-2 nm^-1\n spectras = {\"AM0Etr\": 1, \"AM1.5G\": 2, \"AM1.5D\": 3}\n self.spectrum = np.genfromtxt(path.join(path.dirname(__file__), './ASTMG173.csv'), delimiter=\",\",\n skip_header=2)[:, [0, spectras[spectra]]]\n self.start_w = start_w\n self.stop_w = stop_w\n # build custom spectrum if necessary\n if start_w != 280.0 or stop_w != 4000.0:\n self.spectrum = self.sub_spectrum(start_w, stop_w)\n\n # create the PowerSpectrum interpolator\n self.interp = interpolate.interp1d(self.spectrum[:, 0], self.spectrum[:, 1])", "def generate_fire_recurrence(self):\n self._time_to_next_fire = round(\n weibullvariate(self._scale_parameter, self._shape_parameter), 2\n )\n return self._time_to_next_fire", "def twr(begin_ts, ts, current_ts):\n\n begin_diff = ts - begin_ts\n diff = current_ts - begin_ts\n if diff == 0:\n normalized = 1\n else:\n normalized = Decimal(begin_diff) / Decimal(diff)\n twr = 1 / (1 + Decimal.exp(Decimal(-12) * normalized + Decimal(2) + ((1 - Metrics.TIME_RANGE) * 10)))\n return twr", "def _get_interpolated_wtsp(self, *args, **kwargs):\n raise NotImplementedError" ]
[ "0.60942864", "0.5600358", "0.559887", "0.54943013", "0.5456494", "0.52778065", "0.52490836", "0.5219559", "0.5138963", "0.51206017", "0.5116879", "0.51017463", "0.5097054", "0.5094589", "0.5087105", "0.50795066", "0.5061036", "0.50578904", "0.5024878", "0.5002162", "0.49778125", "0.49632812", "0.49472818", "0.49347", "0.49259266", "0.49106997", "0.48679152", "0.48670012", "0.48599103", "0.4853298" ]
0.7602451
0
Create and send a trajectory that's a linear interpolation between where the wam currently is and wam_end that lasts duration seconds. Publishes the trajectory at frequency Hz.
def move_wam_from_current_location(wam_end, duration, frequency=250): wam_start = get_wam_joint_coordinates() joint_traj, joint_vels = create_joint_trajectory(wam_start, wam_end, duration, frequency) send_joint_trajectory(joint_traj, joint_vels, frequency)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_and_send_wam_trajectory(wam_start, wam_end, duration, frequency=250):\n\n joint_traj, joint_vels = create_joint_trajectory(wam_start, wam_end,\n duration, frequency)\n send_joint_trajectory(joint_traj, joint_vels, frequency)", "def send_joint_trajectory(trajectory, velocities, frequency=250):\n pub = rospy.Publisher(\"/wam/jnt_pos_cmd\", RTJointPos, queue_size=10)\n #If wam_node is running, it will be connected to this publisher.\n #Mostly this loop is here because you want to make sure the publisher\n #gets set up before it starts sending information.\n while pub.get_num_connections() < 1:\n print \"Waiting on the publisher to go up.\"\n rospy.sleep(0.5)\n\n trajectory_length = trajectory.__len__()\n finished = False\n traj_row = 0\n message_for_service = RTJointPos()\n\n r = rospy.Rate(frequency)\n\n while not rospy.is_shutdown() and not finished:\n message_for_service.joints = trajectory[traj_row]\n message_for_service.rate_limits = velocities[traj_row]\n traj_row += 1\n pub.publish(message_for_service)\n if traj_row == trajectory_length - 1:\n finished = True\n r.sleep()", "def send_speed(self, linear_speed, angular_speed):\n ### Makes a new Twist message\n msg_cmd_vel = Twist()\n \t# Linear velocity\n \tmsg_cmd_vel.linear.x = linear_speed\n \tmsg_cmd_vel.linear.y = 0.0\n \tmsg_cmd_vel.linear.z = 0.0\n \t# Angular velocity\n \tmsg_cmd_vel.angular.x = 0.0\n \tmsg_cmd_vel.angular.y = 0.0\n \tmsg_cmd_vel.angular.z = angular_speed\n ### Publishes the message\n self.cmd_vel.publish(msg_cmd_vel)", "def publish(self):\n msg = PolynomialTrajectory()\n\n if self.trajectory_constructed == True: \n t = time.time()-self.traj_t0\n\n if t <= self.seg_times[-1]: \n segment = map(lambda x:x > t, self.seg_times).index(True)\n else: \n segment = self.no_of_segments; t = self.seg_times[-1]\n \n rospy.loginfo('the value of t and segment is:%f, %d', t, segment)\n #rospy.loginfo('segment times are:%f, %f, %f, %f, %f, %f', self.segment_times[0], \\\n #self.segment_times[1], self.segment_times[2], self.segment_times[3], self.segment_times[4], self.segment_times[5])\n p1 = self.pc_x; p2 = self.pc_y; p3 = self.pc_z\n N = self.N+1\n p1 = [p1[i:i + N] for i in range(0, len(p1), N)]\n [i.reverse() for i in p1]\n \n p2 = [p2[i:i + N] for i in range(0, len(p2), N)]\n [i.reverse() for i in p2]\n \n p3 = [p3[i:i + N] for i in range(0, len(p3), N)]\n [i.reverse() for i in p3]\n xx = np.poly1d(p1[segment-1]); vx = np.polyder(xx, 1); ax = np.polyder(xx, 2)\n yy = np.poly1d(p2[segment-1]); vy = np.polyder(yy, 1); ay = np.polyder(yy, 2)\n zz = np.poly1d(p3[segment-1]); vz = np.polyder(zz, 1); az = np.polyder(zz, 2)\n \n msg.header.stamp = rospy.Time.now()\n msg.pdes.x = xx(t); msg.pdes.y = yy(t); msg.pdes.z = zz(t)\n msg.vdes.x = vx(t); msg.vdes.y = vy(t); msg.ades.z = vz(t)\n msg.ades.x = ax(t); msg.ades.y = ay(t); msg.ades.z = az(t)\n msg.ddes.x = 1; msg.ddes.y = 0; msg.ddes.z = 0\n msg.controller = 0\n \n self.pub.publish(msg)\n\n else: \n rospy.loginfo('no goal is published yet')", "def smooth_linear_ramp(t, kinematic_parameters):\n ramp_stage_acceleration = kinematic_parameters[0]\n ramp_start_time = kinematic_parameters[1]\n i_ramp_end_time = kinematic_parameters[2]\n steady_end_time = kinematic_parameters[3]\n end_ramp_end_time = kinematic_parameters[4]\n smooth_factor = kinematic_parameters[5]\n ramp_mode = kinematic_parameters[6]\n ramp_constant_time = kinematic_parameters[7]\n pitch_mode = kinematic_parameters[8]\n pitch_time = kinematic_parameters[9]\n pitch_delay_time_fraction = kinematic_parameters[10]\n pitch_acceleration = kinematic_parameters[11]\n pitch_acc_time_fraction = kinematic_parameters[12]\n section_location = kinematic_parameters[13]\n bstroke = kinematic_parameters[14]\n\n def logcosh(x):\n # s always has real part >= 0\n s = np.sign(x) * x\n p = np.exp(-2 * s)\n return s + np.log1p(p) - np.log(2)\n\n def omega(x):\n \"\"\"linear ramp rotation speed function\"\"\"\n # if ramp_start_time - ramp_constant_time <= x <= end_ramp_end_time + ramp_constant_time:\n # f_t0 = smooth_factor * (x - ramp_start_time)\n # f_t1 = smooth_factor * (x - i_ramp_end_time)\n # if ramp_mode == 'with_end_acc':\n # f_t2 = smooth_factor * (x - steady_end_time)\n # f_t3 = smooth_factor * (x - end_ramp_end_time)\n # elif ramp_mode == 'no_end_acc':\n # f_t2 = smooth_factor * ramp_start_time\n # f_t3 = smooth_factor * i_ramp_end_time\n\n # omegax = (ramp_stage_acceleration / 2) / smooth_factor * (\n # logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) - logcosh(f_t2))\n # else:\n # if bstroke == 'yes' and x <= 2 * (end_ramp_end_time +\n # ramp_constant_time):\n # x -= end_ramp_end_time + ramp_constant_time\n # f_t0 = smooth_factor * (x - ramp_start_time)\n # f_t1 = smooth_factor * (x - i_ramp_end_time)\n # if ramp_mode == 'with_end_acc':\n # f_t2 = smooth_factor * (x - steady_end_time)\n # f_t3 = smooth_factor * (x - end_ramp_end_time)\n # elif ramp_mode == 'no_end_acc':\n # f_t2 = smooth_factor * ramp_start_time\n # f_t3 = smooth_factor * i_ramp_end_time\n\n # omegax = -(ramp_stage_acceleration / 2) / smooth_factor * (\n # logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) -\n # logcosh(f_t2))\n # else:\n # omegax = 0\n\n if bstroke == 'no':\n f_t0 = smooth_factor * (x - ramp_start_time)\n f_t1 = smooth_factor * (x - i_ramp_end_time)\n if ramp_mode == 'with_end_acc':\n f_t2 = smooth_factor * (x - steady_end_time)\n f_t3 = smooth_factor * (x - end_ramp_end_time)\n elif ramp_mode == 'no_end_acc':\n f_t2 = smooth_factor * ramp_start_time\n f_t3 = smooth_factor * i_ramp_end_time\n\n omegax = (ramp_stage_acceleration / 2) / smooth_factor * (\n logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) - logcosh(f_t2))\n\n else:\n if x <= end_ramp_end_time + ramp_constant_time:\n f_t0 = smooth_factor * (x - ramp_start_time)\n f_t1 = smooth_factor * (x - i_ramp_end_time)\n if ramp_mode == 'with_end_acc':\n f_t2 = smooth_factor * (x - steady_end_time)\n f_t3 = smooth_factor * (x - end_ramp_end_time)\n elif ramp_mode == 'no_end_acc':\n f_t2 = smooth_factor * ramp_start_time\n f_t3 = smooth_factor * i_ramp_end_time\n\n omegax = (ramp_stage_acceleration /\n 2) / smooth_factor * (logcosh(f_t0) - logcosh(f_t1) +\n logcosh(f_t3) - logcosh(f_t2))\n\n else:\n x -= end_ramp_end_time + ramp_constant_time\n f_t0 = smooth_factor * (x - ramp_start_time)\n f_t1 = smooth_factor * (x - i_ramp_end_time)\n if ramp_mode == 'with_end_acc':\n f_t2 = smooth_factor * (x - steady_end_time)\n f_t3 = smooth_factor * (x - end_ramp_end_time)\n elif ramp_mode == 'no_end_acc':\n f_t2 = smooth_factor * ramp_start_time\n f_t3 = smooth_factor * i_ramp_end_time\n\n omegax = -(ramp_stage_acceleration / 2) / smooth_factor * (\n logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) -\n logcosh(f_t2))\n\n return omegax\n\n steady_rotation_omega = omega((i_ramp_end_time + steady_end_time) / 2)\n omega_print = steady_rotation_omega * np.pi / 180\n print('steady revolving omega = %s' % omega_print)\n\n dphi_data = []\n for ti in t:\n dphi_data.append(omega(ti))\n dphi_spl = UnivariateSpline(t, dphi_data, s=0)\n\n def ddphi(x):\n \"\"\"flapping angular acceleration function\"\"\"\n return dphi_spl.derivatives(x)[1]\n\n ramp_angle = dphi_spl.integral(0, i_ramp_end_time)\n print('initial linear ramp angle = %s' % ramp_angle)\n\n if ramp_mode == 'with_end_acc':\n end_ramp_angle = dphi_spl.integral(\n steady_end_time, end_ramp_end_time + ramp_constant_time)\n print('end linear ramp angle = %s' % end_ramp_angle)\n\n stroke_angle = dphi_spl.integral(0, end_ramp_end_time + ramp_constant_time)\n st_dist = np.abs(stroke_angle) * np.pi / 180 * section_location\n print('2d wing travel distance = %s' % st_dist)\n\n def phi(x):\n \"\"\"rotation angle function\"\"\"\n return dphi_spl.integral(0, x)\n\n #--pitching motion functions--\n if pitch_mode == 'with_end_pitch':\n pitch_delay_time = (pitch_time +\n 2 * ramp_constant_time) * pitch_delay_time_fraction\n pitch_acc_time = pitch_time * pitch_acc_time_fraction / 2\n\n pitch_start_time = end_ramp_end_time - pitch_time + pitch_delay_time\n p_acc_end_time = pitch_start_time + pitch_acc_time\n pitch_end_time = pitch_start_time + pitch_time\n p_decc_start_time = pitch_end_time - pitch_acc_time\n\n def dalf(x):\n \"\"\"linear ramp pitch speed function\"\"\"\n # if pitch_start_time - ramp_constant_time <= x <= pitch_end_time + ramp_constant_time:\n # f_t0 = smooth_factor * (x - pitch_start_time)\n # f_t1 = smooth_factor * (x - p_acc_end_time)\n # f_t2 = smooth_factor * (x - p_decc_start_time)\n # f_t3 = smooth_factor * (x - pitch_end_time)\n\n # dalfx = (pitch_acceleration /\n # 2) / smooth_factor * (logcosh(f_t0) - logcosh(f_t1) +\n # logcosh(f_t3) - logcosh(f_t2))\n # else:\n # dalfx = 0\n f_t0 = smooth_factor * (x - pitch_start_time)\n f_t1 = smooth_factor * (x - p_acc_end_time)\n f_t2 = smooth_factor * (x - p_decc_start_time)\n f_t3 = smooth_factor * (x - pitch_end_time)\n\n dalfx = (pitch_acceleration / 2) / smooth_factor * (\n logcosh(f_t0) - logcosh(f_t1) + logcosh(f_t3) - logcosh(f_t2))\n return dalfx\n\n dalf_data = []\n for ti in t:\n dalf_data.append(dalf(ti))\n dalf_spl = UnivariateSpline(t, dalf_data, s=0)\n\n pitch_angle = dalf_spl.integral(pitch_start_time - ramp_constant_time,\n pitch_end_time + ramp_constant_time)\n\n print('wing pitch angle = %s' % np.abs(pitch_angle))\n\n steady_pitching_omega = dalf((pitch_start_time + pitch_end_time) / 2)\n omega_print = steady_pitching_omega * np.pi / 180\n print('steady wing pitch omega = %s\\n' % omega_print)\n\n def ddalf(x):\n \"\"\"flapping angular acceleration function\"\"\"\n return dalf_spl.derivatives(x)[1]\n\n def alf(x):\n \"\"\"rotation angle function\"\"\"\n return dalf_spl.integral(0, x)\n\n kinematic_angles = []\n for ti in t:\n if pitch_mode == 'no_end_pitch':\n kinematic_anglesi = [-phi(ti), 0, -omega(ti), 0, -ddphi(ti), 0]\n elif pitch_mode == 'with_end_pitch':\n kinematic_anglesi = [\n -phi(ti), -alf(ti), -omega(ti), -dalf(ti), -ddphi(ti),\n -ddalf(ti)\n ]\n kinematic_angles.append(kinematic_anglesi)\n\n return kinematic_angles", "def __stream_triggered(self):\n # Call this every time period\n thread = Timer(self.stream_time, self.__stream_triggered)\n thread.start()\n self.__threads.append(thread)\n\n if len(self.__spike_buffer) > 2:\n speed = self.__get_speed()\n print(speed)\n self.__stream_send(speed)", "def wrench_stamped_cb(self, ws):\n force_vec = np.array([ws.wrench.force.x, ws.wrench.force.y, ws.wrench.force.z])\n scaled_vec = np.multiply(force_vec, self.scaling)\n mag = np.linalg.norm(force_vec)\n normalized_vec = np.divide(force_vec,mag)\n \n ta = TaxelArray()\n ta.header.frame_id = '/l_netft_frame' #self.ft_link_name\n ta.header.stamp = rospy.Time.now()\n ta.sensor_type = 'force'\n ta.link_names = ['wrist_roll']\n ta.centers_x = [0.]\n ta.centers_y = [0.]\n ta.centers_z = [0.]\n ta.normals_x = [-normalized_vec[0]]\n ta.normals_y = [-normalized_vec[1]]\n ta.normals_z = [-normalized_vec[2]]\n ta.values_x = [-scaled_vec[0]]\n ta.values_y = [-scaled_vec[1]]\n ta.values_z = [-scaled_vec[2]]\n \n self.taxel_array_pub.publish(ta)\n\n m3ta = TaxelArray()\n m3ta.header.frame_id = '/l_netft_frame'\n m3ta.header.stamp = rospy.Time.now()\n m3ta.sensor_type = 'force'\n m3ta.link_names = ['wrist_roll']\n m3ta.centers_x = [0.]\n m3ta.centers_y = [0.]\n m3ta.centers_z = [0.]\n m3ta.normals_x = [normalized_vec[0]]\n m3ta.normals_y = [normalized_vec[1]]\n m3ta.normals_z = [normalized_vec[2]]\n m3ta.values_x = [scaled_vec[0]]\n m3ta.values_y = [scaled_vec[1]]\n m3ta.values_z = [scaled_vec[2]]\n \n self.m3_taxel_array_pub.publish(m3ta)", "def publish():\n while True:\n mqttClient.reconnect()\n\n energy_data = getEnergyUsage()\n wats = float(energy_data['power_mw']) / 1000\n wat_hours = float(energy_data['total_wh'])\n\n sentPayload(name=\"power\", site=\"bathroom\", value=wats)\n sentPayload(name=\"energy_total\", site=\"bathroom\", value=wat_hours)\n\n time.sleep(updateInterval)", "def calculate_delay(self, wav_start, wav_finish, thr_start, thr_finish):\n\n w_s=self.find_nearest_wav(wav_start)\n w_f=self.find_nearest_wav(wav_finish)\n temp=self.pre_proc_data.loc[:,w_s]\n t_start = self.times[(temp.values>thr_start).argmax()]\n print(t_start)\n\n temp2=self.pre_proc_data.loc[:,w_f]\n dx=temp2.diff()\n dx_clean=dx.ewm(span = 50).mean()\n t_finish=self.times[np.min(np.where(dx_clean<thr_finish))]\n print(t_finish)\n\n plt.subplot(211)\n plt.plot(temp,label='{}nm'.format(wav_start))\n plt.axvline(t_finish,color='grey')\n plt.axvline(t_start,color='grey')\n plt.xlim(t_start-30,t_finish+30)\n plt.legend()\n\n plt.subplot(212)\n plt.plot(temp2,label='{}nm'.format(wav_finish))\n plt.axvline(t_finish,color='grey')\n plt.axvline(t_start,color='grey')\n plt.xlim(t_start-30,t_finish+30)\n plt.legend()\n\n plt.show()\n\n self.t_delay=np.round(t_finish-t_start,2)\n return np.round(t_finish-t_start,2)", "def start_transmit(self):\n\n # Set publishing rate\n self.r = rospy.Rate(50) # 50Hz\n \n quitting = False\n while not rospy.is_shutdown() and not quitting:\n try:\n # JointState message to publish joint positions\n js_msg = self.build_joint_state_msg()\n \n # PoseStamped messages to publish position and \n # orientation of each joint\n ps_msg = self.build_pose_stamped_msg()\n \n # TODO: TwistStamped messages to publish linear and\n # angular velocities of each joint\n ts_msg = TwistStamped()\n\n # Publish the messages\n self.js_pub.publish(js_msg)\n self.ps_pub.publish(ps_msg)\n\n # TODO: Publish TwistStamped\n # self.ts_pub.publish(ts_msg)\n self.r.sleep()\n self.t += 0.01 # automated tests time var\n \n except KeyboardInterrupt:\n LOG.e(\"KeyboardInterrupt detected\", \"start_transmit\")\n quitting = True\n\n LOG.d(\"Quit command sent to client\", \"main\")\n raise QuitMessageException(\"Quit message received from client\")", "def send_fft_osc(self):\n self.client.send_message(\"/fft_train\", list(self.fft_bins_y))", "def syns(alpha=0.1, rate=10, delay=0, dur=50, amp=1.0, dt=0.020, N=1, mindur = 120, makewave=True):\n deadtime = 0.7\n if dur + delay < mindur:\n tvec = np.arange(0.0, mindur , dt)\n else:\n tvec = np.arange(0.0, dur+delay , dt)\n npts = len(tvec)\n ta = np.arange(0.0, 20.0, dt)\n aw = ta * alpha* np.exp(-ta/alpha)/alpha # alpha waveform time course\n spt = [[]]*N # list of spike times\n wave = np.array([]) # waveform\n sptime=[]\n for j in range(0,N):\n done = False\n t=0.0\n nsp = 0\n while not done:\n a = np.random.sample(1)\n if t < delay:\n t = delay\n continue\n if t >= delay and t <= (delay+dur):\n ti = -np.log(a)/(rate/1000.0) # convert to exponential distribution with rate\n if ti < deadtime:\n continue\n t = t + ti # running time\n if t > delay+dur:\n done = True\n continue\n if nsp is 0:\n sptime = t\n nsp = nsp+1\n else:\n sptime = np.append(sptime, t)\n nsp = nsp+1\n if j is 0:\n wavej = np.zeros(len(tvec))\n for i in range(0,len(sptime)):\n st = int(sptime[i]/dt)\n wavej[st] = wavej[st] + 1\n spt[j] = sptime\n\n if makewave:\n w = np.convolve(wavej, aw/max(aw))*amp\n if len(w) < npts:\n w = np.append(w, np.zeros(npts-len(w)))\n if len(w) > npts:\n w = w[0:npts]\n if j is 0:\n wave = w\n else:\n wave = wave + w\n return (spt, wave, tvec, N)", "def make_signal(self, waveform):\n\n #print >> sys.stdout, \"generating signal...\"\n\n # --- Set up timing\n\n # index of the absolute maximum peak\n #idx = np.concatenate(np.argwhere(abs(waveform.hplus.data.data)>0))[0]\n idx = np.argmax(abs(waveform.hplus.data))\n\n # Epoch = GPS start of time series. Want the peak time of the waveform\n # to be aligned to the geocenter, so set the epoch to the geocentric\n # peak time minus the time to the waveform peak. In other words:\n # (waveform epoch) = (geocentric peak time) - (# of seconds to peak)\n\n hplus_epoch = self.ext_params.geocent_peak_time - idx*waveform.hplus.delta_t\n hcross_epoch = self.ext_params.geocent_peak_time - idx*waveform.hcross.delta_t\n\n # XXX: create regular lal timeseries objects for this bit (may replace\n # with pycbc injection routines later)\n\n hplus = lal.CreateREAL8TimeSeries('hplus', hplus_epoch, 0,\n waveform.hplus.delta_t, lal.StrainUnit,\n int(waveform.hplus.duration / waveform.hplus.delta_t))\n hplus.data.data = np.array(waveform.hplus.data)\n\n hcross = lal.CreateREAL8TimeSeries('hcross', hcross_epoch, 0,\n waveform.hcross.delta_t, lal.StrainUnit,\n int(waveform.hcross.duration / waveform.hcross.delta_t))\n hcross.data.data = np.array(waveform.hcross.data)\n\n\n if self.taper is True:\n\n print >> sys.stderr, \"Warning: tapering out inspiral (not a realistic strategy)\"\n delay = 0.0e-3\n idx = np.argmax(hplus.data.data) + \\\n np.ceil(delay/self.delta_t)\n hplus.data.data[0:idx]=0.0\n hcross.data.data[0:idx]=0.0\n lalsim.SimInspiralREAL8WaveTaper(hplus.data,\n lalsim.SIM_INSPIRAL_TAPER_START)\n lalsim.SimInspiralREAL8WaveTaper(hcross.data,\n lalsim.SIM_INSPIRAL_TAPER_START)\n\n\n # Scale for distance (waveforms extracted at 20 Mpc)\n hplus.data.data *= 20.0 / self.ext_params.distance\n hcross.data.data *= 20.0 / self.ext_params.distance\n\n tmp = lalsim.SimDetectorStrainREAL8TimeSeries(hplus, hcross,\n self.ext_params.ra, self.ext_params.dec,\n self.ext_params.polarization, self.det_site) \n\n # Pad the end so we have the same length signal and noise (useful for\n # snr and psds)\n sigdata = np.zeros(len(self.td_noise))\n sigdata[:len(tmp.data.data)] = np.copy(tmp.data.data)\n\n # Project waveform onto these extrinsic parameters\n self.td_signal = \\\n pycbc.types.timeseries.TimeSeries(initial_array=sigdata,\n delta_t=tmp.deltaT, epoch=tmp.epoch)\n\n del tmp\n\n # Remove extraneous data\n #self.td_signal = self.td_signal.trim_zeros()", "def surf_tts(distance, start_time):\n deltas = np.arange(0., 140., 5.)\n tts = 60. * np.array(\n [0., 2., 4., 6.2, 8.4, 11., 13., 15.2, 17.8, 19.4, 22., 24.1, 26.6,\n 28.6, 30.8, 33., 35.6, 37.4, 39.8, 42., 44.2, 46.4, 48.8, 50.9, 53.6,\n 55.2, 57.8, 60.])\n (mval, nval) = np.polyfit(deltas, tts, 1)\n # calculate surface wave travel times for degrees 1 to 180 ?\n surftts = mval * np.arange(0., 180.1, 0.01)\n difer = []\n for i4 in xrange(0, len(surftts)):\n dife_r = abs(0.001 * distance / 111.11 - np.arange(0., 180.1, 0.01)\n [i4])\n difer.append(dife_r)\n # love wave arrival: event time + surftts for closest degree??\n # (smallest difference between distance for surftts and actual distance of\n # event)\n arriv_lov = np.floor(start_time + surftts[np.asarray(difer).argmin()])\n diferans = []\n for i1 in xrange(len(deltas)):\n dif2 = abs(np.arange(0., 180.1, 0.01)[np.asarray(difer).argmin()] -\n deltas[i1])\n diferans.append(dif2)\n # arrival = love wave arrival - p arrival?\n peq = surftts[np.asarray(difer).argmin()] - \\\n tts[np.asarray(diferans).argmin()]\n arrival = arriv_lov + peq\n\n return arrival", "def morletft(s, w, w0, dt):\n \n p = 0.75112554446494251 # pi**(-1.0/4.0)\n wavelet = np.zeros((s.shape[0], w.shape[0]))\n pos = w > 0\n\n for i in range(s.shape[0]):\n n = normalization(s[i], dt)\n wavelet[i][pos] = n * p * np.exp(-(s[i] * w[pos] - w0)**2 / 2.0)\n \n return wavelet", "def __init__(self, name, freq, waypoint_specified, waypoint_bc):\n self.dt = 1.0/freq\n self.uav = name\n self.wp_specfied = waypoint_specified\n self.wp_bc = waypoint_bc\n self.start_time = time.time()\n self.average_speed = 3.0\n self.reduced_speed = 0.5\n # specify start/intermediate/end points and its deviratives \n self.no_of_segments = 7\n self.wp_callback_counter = 0\n self.trajectory_constructed = False\n\n self.r = 4 # corresponding to snap which is 4th derivative\n self.N = 7# degree of polynomial \n \n self.pub = rospy.Publisher('/'+self.uav+'/PolynomialTrajectory', PolynomialTrajectory, queue_size = 1, tcp_nodelay = True)\n \n rospy.Subscriber('/'+self.uav + '/odometry_sensor1/odometry', Odometry, self.currentstate_callback, queue_size = 1, tcp_nodelay = True) \n rospy.Subscriber('/'+self.uav+'/waypoint_publisher', Pose, self.waypoint_callback, queue_size = 1, tcp_nodelay=True)\n\n #try: \n # rospy.Subscriber('/'+self.uav+'/waypoint_publisher', Pose, self.waypoint_callback, queue_size = 1, tcp_nodelay=True)\n # rospy.Subscriber('/'+self.uav + '/odometry_sensor/odometry', Odometry, self.currentstate_callback, queue_size = 1, tcp_nodelay = True)\n #except: \n # print 'Either waypoints or odometry is not available.'", "def Delay( X, delay_time, feedback_amt, wetdry, apply_fb_input=True, rate=SR ):\n\n # convert inputs if scalars into np arrays\n delay_time = delay_time * np.ones(len(X)) if np.isscalar(delay_time) else delay_time\n feedback_amt = feedback_amt * np.ones(len(X)) if np.isscalar(feedback_amt) else feedback_amt\n wetdry = wetdry * np.ones(len(X)) if np.isscalar(wetdry) else wetdry\n\n # convert delay time to delay in samples\n # not implemented yet, but eventually would be good to interpolate\n maxdelay = np.max(delay_time)\n # delay_samps = np.array(delay_time*SR).astype(int)\n delay_samps = delay_time * SR\n\n # create circular buffer with appropriate size\n buffer_size = int( math.ceil( math.log(maxdelay*rate,2) ) )\n # print(buffer_size)\n delay_size = int(round(maxdelay*SR)) # approximate for now\n delaybuff = Circ_buffer( buffer_size=buffer_size, delay_size=delay_size )\n\n # make output vec\n output_sig = np.zeros(len(X)).astype(int)\n\n # process signal\n for ii in range(len(X)):\n\n # read delayed value\n delay_prev_samp = math.ceil( delay_samps[ii] )\n prev_samp = delaybuff.read_value( delay_prev_samp )\n delay_next_samp = delay_prev_samp - 1\n next_samp = delaybuff.read_value(delay_next_samp)\n output_sig[ii] = prev_samp + ( (next_samp - prev_samp) * (delay_prev_samp - delay_samps[ii]) )\n\n # calculate value to write\n if apply_fb_input:\n cur_value = (X[ii] + output_sig[ii]) * feedback_amt[ii]\n else:\n cur_value = X[ii] + output_sig[ii]*feedback_amt[ii]\n\n # write to buffer\n delaybuff.write_value(int(cur_value))\n\n # end for loop\n\n # return output\n return np.array(output_sig * wetdry + X * (1-wetdry)).astype(np.int16)", "def spew_fake_data(self, ideal_datapoint=None):\n if not ideal_datapoint:\n ideal_datapoint = 0\n for chan, wsock in self._chan_to_wsocket.items():\n if chan.stream_type == \"Integrated\":\n length = 1\n data = 0.5 + 0.1*(np.random.random(length).astype(chan.dtype) + 1j*np.random.random(length).astype(chan.dtype)) + ideal_datapoint\n elif chan.stream_type == \"Demodulated\":\n length = int(self._lib.record_length/32)\n data = np.zeros(length, dtype=chan.dtype)\n data[int(length/4):int(3*length/4)] = 1.0\n data += 0.1*(np.random.random(length) + 1j*np.random.random(length))\n else: #Raw\n length = int(self._lib.record_length/4)\n signal = np.sin(np.linspace(0,10.0*np.pi,int(length/2)))\n data = np.zeros(length, dtype=chan.dtype)\n data[int(length/4):int(length/4)+len(signal)] = signal\n data += 0.1*np.random.random(length)\n wsock.send(struct.pack('n', length*data.dtype.itemsize) + data.tostring())", "def send_temp(context):\n job = context.job\n t1 = __sauna.control.getPortValue(\"Temperature Sensor 2\")\n t2 = float(\"{:.1f}\".format(t1))\n context.bot.send_message(job.context, text=\"Current Temp \" + str(t2) + \" Grad\")", "def TempoSyncFeedForwardEcho(Input, bpm, SampleRate, delayGain=0.2):\r\n # Delay length\r\n bps = bpm / 60.0\r\n spb = 1 / float(bps)\r\n\r\n # Note duration\r\n noteDuration = 0.5\r\n\r\n # samples\r\n samplesOfDelay = int((noteDuration * spb) * SampleRate)\r\n\r\n\r\n #\r\n output = np.zeros(len(Input))\r\n for i in range(len(Input)):\r\n if(i<samplesOfDelay):\r\n output[i] = Input[i]\r\n else:\r\n output[i] = Input[i] + delayGain * Input[i-samplesOfDelay] \r\n return output", "def generate_linear_trace(self, min_queries, min_duration, qps):\n timestamp = 0\n arrival = []\n timestep = 1 / qps\n while timestamp < min_duration and len(arrival) < min_queries:\n timestamp += timestep\n arrival.append(timestep)\n self.arrival = arrival", "def WaterVaporTransmission(T,P,n_wv,wavelength,dr,freq_lim=np.array([lp.c/828.5e-9,lp.c/828e-9]),sim_nu=np.array([])):\n \n if sim_nu.size==0:\n sim_nu = np.arange(-3e9,3e9,20e6)\n \n ext_wv = rb.ExtinctionFromHITRAN(lp.c/wavelength+sim_nu,T,P,(lp.mH2O*1e-3)/lp.N_A,nuLim=freq_lim,freqnorm=True).T\n T_wv = np.exp(-np.cumsum(n_wv[np.newaxis,:]*ext_wv,axis=1)*dr)\n \n return T_wv,sim_nu", "def sendEnergy(self):\n if len(self.controller.myContainer.vrms) != 0:\n vrms = sum(self.controller.myContainer.vrms) / len(self.controller.myContainer.vrms)\n irms = sum(self.controller.myContainer.irms) / len(self.controller.myContainer.irms)\n watts = sum(self.controller.myContainer.watts) / len(self.controller.myContainer.watts)\n else:\n vrms = irms = watts = 0\n payload = ('{\"ts\": '+ str(int(time())) + ', \"ace\": ' + str(self.controller.myContainer.ace_accum)\n + ', \"dce\": ' + str(self.controller.myContainer.dce_accum)+\n ', \"data\": { \"watt\": ' + str(watts) + ', \"vrms\": '+ str(vrms) + ', \"irms\": '+ str(irms) + ' }}' )\n\n res, self.midEnergy = self.client.publish(self.pubEnergy, payload, qos=1, retain=False)\n if debug: print(\"Sent: \", payload , \"on\", self.pubEnergy, \"mid: \", self.midEnergy)\n self.controller.myContainer.resetEnergyAccumulators()\n filename = self.pubEnergy.replace(\"/\", \"-\") + \".txt\"\n if self.storeEnergyLocal:\n f = open(filename, 'a+')\n f.write(self.lastEnergyPayload+\"\\n\")\n f.close()\n self.storeLocalEnergy = True\n self.lastEnergyPayload = payload", "def create_spectral_bandpass_interpol(interpol_wavelen, interpol_rad, center_wvl,\n save_dir):\n\n save_dir = os.path.join(save_dir, r'look_up_table')\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n\n center_wvl1 = np.arange(min(center_wvl), max(center_wvl), 2)\n\n\n\n\n for j in np.arange(0, interpol_wavelen.shape[1]):\n #print(j)\n dframe = pd.DataFrame()\n wavelen = interpol_wavelen[:, j]\n\n radiance = interpol_rad[:, j]\n sampled_wvl = np.arange(min(wavelen), max(wavelen), 0.01)\n fit_params = interp1d(wavelen, radiance, kind='slinear')\n fitted_val = fit_params(sampled_wvl)\n #peak_val = np.where(fitted_val==max(fitted_val))[0]\n #print(peak_val)\n #peak_shift = sampled_wvl[peak_val] - CW1[j]\n\n\n# if peak_shift >0:\n# sampled_wvl = sampled_wvl - peak_shift\n# elif peak_shift <0:\n# sampled_wvl = sampled_wvl + peak_shift\n# else:\n# sampled_wvl = sampled_wvl\n#\n# print(sampled_wvl[peak_val] - CW1[j])\n\n dframe['Wavelength'] = sampled_wvl\n dframe['Radiance'] = fitted_val\n dframe.round(4).to_csv(save_dir + '/' + 'bandpass_' + \\\n str(round(center_wvl1[j], 2))+'_nm.csv')\n plt.plot(sampled_wvl, fitted_val/np.max(fitted_val), 'g.--')\n plt.grid(True, linestyle=':')\n plt.xlabel('Wavelength (nm)')\n plt.ylabel('Normalized Spectral Response')\n plt.title('TEMPO Spectral Bandpass (WL = ' + str(round(center_wvl1[j], 2)) + ' nm)')\n plt.ylim(0, 1.1)\n plt.xlim(min(wavelen), max(wavelen))\n #plt.show()\n\n # Now let us save the spectral bandpass data and spectral bandpass plot\n plt.savefig(save_dir + '/' + 'bandpass_' + str(round(center_wvl1[j], 2))+'_nm.png',\n dpi=100)\n plt.close('all')", "def spilloverEff(freq,fD, FFBW, dB_at_bw, feed_type):\n theta0 = fD2angle(fD,units='degrees')\n tt = 0.0\n dtt = 0.1\n theta = np.arange(0.0,180.0+dtt,dtt)\n g = feedPattern(freq, theta, FFBW, dB_at_bw, feed_type)\n theta = theta*math.pi/180.0\n\n # integrate over main beam\n gmb = np.where(theta < (theta0/2.0)*math.pi/180.0)\n kern = g[gmb]*np.sin(theta[gmb]) \n num = integ.trapz(kern,dx=dtt*math.pi/180.0)\n # integrate over full beam\n kern = g*np.sin(theta)\n den = integ.trapz(kern,dx=dtt*math.pi/180.0)\n \n n_spill = num/den\n return n_spill", "def create_msgs():\n getcontext().prec = 3 # will round to 3 decimal places\n orig_times = sorted(dat)\n for n in range(len(dat) - 1):\n linfun = interp1d([orig_times[n], orig_times[n+1]], \\\n [dat[orig_times[n]], dat[orig_times[n+1]]])\n dt = orig_times[n+1] - orig_times[n] # current\n freq = 1/dt # current\n if dt < (1/desHz):\n print('found instance where Freq already at/above desired Freq')\n else:\n new_dt = dt*freq/desHz\n new_times = linspace(orig_times[n],orig_times[n+1],floor(dt/new_dt))\n # print(new_times)\n new_values = linfun(new_times)\n # rounded_values = [float(Decimal(\"%.3f\" % e)) for e in new_values]\n rounded_times = [float(Decimal(\"%.3f\" % e)) for e in new_times]\n for m in range(len(rounded_times)):\n # this_time = int(new_times[m]*100000)/100000 # 5 decimal places in timstamp\n self.outData[sens][meas][rounded_times[m]] = new_values[m]", "def test_synth_simple():\n \n twd = tempfile.mkdtemp(dir=os.getcwd()+\"/tmp\")\n print(twd)\n \n wmin, wmax, dwl = 6700, 6720, 0.01\n ll = turbopy.TSLineList(os.path.join(data_path, \"vald-6700-6720.list\"))\n atmo = turbopy.MARCSModel.load(os.path.join(data_path, \"sun.mod\"))\n atmo.Teff = 5777\n atmo.logg = 4.44\n atmo.MH = 0.0\n atmo.AM = 0.0\n wave, norm, flux = turbopy.run_synth(wmin, wmax, dwl,\n atmosphere=atmo, vt=1.0,\n linelist=ll, twd=twd)\n \n wave2, norm2, flux2 = turbopy.run_synth(wmin, wmax, dwl,\n [12.0, 0.4], [6.0, 1.0], [8.0, 1.0],\n atmosphere=atmo, vt=1.0,\n linelist=ll, twd=twd)\n \"\"\"\n import matplotlib.pyplot as plt\n fig = plt.figure()\n plt.plot(wave, norm, 'k-', lw=3)\n plt.plot(wave, norm2, 'r-', lw=1)\n fig.savefig(\"test3.pdf\")\n plt.close(fig)\n \"\"\"", "def run_get_waveform(self):\r\n \r\n #c = self.client\r\n event = self.ev\r\n ref_time_place = self.ref_time_place\r\n\r\n evtime = event.origins[0].time\r\n reftime = ref_time_place.origins[0].time\r\n\r\n if self.ifmass_downloader is True:\r\n domain = CircularDomain(latitude=self.elat, longitude=self.elon,\r\n minradius=kilometer2degrees(self.min_dist), maxradius=kilometer2degrees(self.max_dist))\r\n \r\n restrictions = Restrictions(\r\n starttime = reftime - self.tbefore_sec,\r\n endtime = reftime + self.tafter_sec,\r\n station_starttime = None,\r\n station_endtime = None,\r\n chunklength_in_sec = None,\r\n network = self.network,\r\n station = self.station,\r\n location = self.location,\r\n channel = self.channel,\r\n #exclude_networks = (),\r\n #exclude_stations = (),\r\n #limit_stations_to_inventory=None,\r\n reject_channels_with_gaps=False,\r\n minimum_length = 0.0,\r\n sanitize = True,\r\n minimum_interstation_distance_in_m = 0,\r\n #channel_priorities=(),\r\n #location_priorities=())\r\n\t\t)\r\n\r\n mdl = MassDownloader()\r\n \r\n outdir = './' + self.evname\r\n mdl.download(domain, restrictions, \r\n mseed_storage=outdir+\"/mass_downloader/waveforms\", \r\n stationxml_storage=outdir+\"/mass_downloader/stations\", \r\n download_chunk_size_in_mb=20, threads_per_client=3, print_report=True)\r\n\r\n inventory = get_inventory_from_xml(outdir+\"/mass_downloader/stations\")\r\n stream_raw = get_streams_from_dir(outdir+\"/mass_downloader/waveforms\")\r\n\r\n print(inventory)\r\n phases = self.phases\r\n \r\n t1s, t2s= get_phase_arrival_times(inventory,event,self.phases,\r\n self.phase_window,self.taupmodel,\r\n reftime,self.tbefore_sec,self.tafter_sec)\r\n\r\n # Add deprecation warning\r\n if self.idb is not None:\r\n print('WARNING: Instead of idb use which client you want to use \\n'\\\r\n ' By default ev_info.client_name is set to IRIS')\r\n if self.idb == 3:\r\n self.client_name = \"LLNL\"\r\n \r\n if self.client_name != \"LLNL\" and self.ifmass_downloader is False:\r\n # Send request to client\r\n # There might be other way to do this using 'RoutingClient'\r\n print(\"DATABASE >>> Sending request to\",self.client_name,\"client for data\")\r\n c = self.client\r\n print(c)\r\n \r\n # Check if stations chosen are correct\r\n # Example: NCEDC does not understand '-XXX' station code\r\n if self.client_name == \"NCEDC\":\r\n if '-' in self.station:\r\n raise ValueError(\"NCEDC client does not take '-' in station code\")\r\n\r\n if self.client_name == \"IRIS\":\r\n if '*' in self.network:\r\n print(\"WARNING: You have chosen to search ALL networks at IRIS.\" \\\r\n \"This could take long!\")\r\n #-----------------------------\r\n if self.ifph5:\r\n STATION = 'http://service.iris.edu/ph5ws/station/1'\r\n c = fdsn.client.Client('http://service.iris.edu',\r\n service_mappings={\r\n 'station': STATION\r\n },\r\n debug=True\r\n )\r\n #-------------------\r\n # Download stations\r\n print(\"Download stations...\")\r\n stations = c.get_stations(network=self.network, location=self.location,\r\n station=self.station, channel=self.channel,\r\n starttime=reftime - self.tbefore_sec, \r\n endtime=reftime + self.tafter_sec,\r\n minlatitude=self.min_lat,\r\n maxlatitude=self.max_lat,\r\n minlongitude=self.min_lon,\r\n maxlongitude=self.max_lon,\r\n level=\"response\")\r\n inventory = stations # so that llnl and iris scripts can be combined\r\n\r\n if self.ifverbose:\r\n print(\"Printing stations\")\r\n print(stations)\r\n print(\"Done Printing stations...\")\r\n\r\n sta_limit_distance(ref_time_place, \r\n stations, \r\n min_dist=self.min_dist, \r\n max_dist=self.max_dist, \r\n min_az=self.min_az, \r\n max_az=self.max_az,\r\n ifverbose=self.ifverbose)\r\n #print(\"Printing stations NEW\")\r\n #print(stations)\r\n #print(\"Done Printing stations...\")\r\n \r\n #stations.plotprojection=\"local\")\r\n # Find P and S arrival times\r\n phases = self.phases\r\n \r\n t1s, t2s= get_phase_arrival_times(stations,event,self.phases,\r\n self.phase_window,self.taupmodel,\r\n reftime,self.tbefore_sec,self.tafter_sec)\r\n \r\n print(\"Downloading waveforms...\")\r\n # this needs to change\r\n bulk_list = make_bulk_list_from_stalist(stations,t1s,t2s, \r\n channel=self.channel)\r\n\r\n if self.ifph5:\r\n DATASELECT = 'http://service.iris.edu/ph5ws/dataselect/1'\r\n c = fdsn.client.Client('http://service.iris.edu',\r\n service_mappings={\r\n 'dataselect': DATASELECT\r\n },\r\n user = self.user,password = self.password,\r\n debug=True\r\n )\r\n stream_raw = c.get_waveforms(network=self.network, location=self.location,\r\n station=self.station, channel=self.channel,\r\n starttime=reftime - self.tbefore_sec, \r\n endtime=reftime + self.tafter_sec)\r\n else:\r\n stream_raw = c.get_waveforms_bulk(bulk_list)\r\n \r\n # save ev_info object\r\n pickle.dump(self,open(self.evname + '/' + \r\n self.evname + '_ev_info.obj', 'wb')) \r\n \r\n \r\n elif self.client_name==\"LLNL\" and self.ifmass_downloader is False:\r\n #client_name = \"LLNL\"\r\n print(\"Preparing request for LLNL ...\")\r\n \r\n # Get event an inventory from the LLNL DB.\r\n event_number = int(event.event_descriptions[0].text)\r\n # event = llnl_db_client.get_obspy_event(event)\r\n inventory = c.get_inventory()\r\n \r\n nsta_llnl = len(inventory.get_contents()[\"stations\"])\r\n print(\"--> Total stations in LLNL DB: %i\" % nsta_llnl)\r\n sta_limit_distance(event, inventory, \r\n min_dist=self.min_dist, \r\n max_dist=self.max_dist, \r\n min_az=self.min_az, \r\n max_az=self.max_az)\r\n print(\"--> Stations after filtering for distance: %i\" % (\r\n len(inventory.get_contents()[\"stations\"])))\r\n \r\n stations = set([sta.code for net in inventory for sta in net])\r\n \r\n _st = c.get_waveforms_for_event(event_number)\r\n stream_raw = obspy.Stream()\r\n for tr in _st:\r\n if tr.stats.station in stations:\r\n stream_raw.append(tr)\r\n \r\n # set reftime\r\n stream = obspy.Stream()\r\n stream = set_reftime(stream_raw, evtime)\r\n \r\n print(\"--> Adding SAC metadata...\")\r\n if self.ifverbose: print(stream.__str__(extended=True))\r\n st2 = add_sac_metadata(stream, client_name=self.client_name, ev=event, \r\n stalist=inventory, taup_model= self.taupmodel, \r\n phases=phases, phase_write = self.write_sac_phase)\r\n \r\n # Do some waveform QA\r\n do_waveform_QA(st2, self.client_name, event, evtime, \r\n self.tbefore_sec, self.tafter_sec)\r\n \r\n if self.demean:\r\n st2.detrend('demean')\r\n \r\n if self.detrend:\r\n st2.detrend('linear')\r\n \r\n if self.ifFilter:\r\n prefilter(st2, self.f1, self.f2, \r\n self.zerophase, self.corners, self.filter_type)\r\n \r\n if self.remove_response:\r\n resp_plot_remove(st2, self.ipre_filt, self.pre_filt, \r\n self.iplot_response, self.water_level,\r\n self.scale_factor, \r\n inventory, self.outformat, self.ifverbose)\r\n else:\r\n # output RAW waveforms\r\n decon=False\r\n print(\"WARNING -- NOT correcting for instrument response\")\r\n\r\n if self.scale_factor > 0:\r\n amp_rescale(st2, self.scale_factor)\r\n if self.client_name == \"LLNL\":\r\n amp_rescale_llnl(st2, self.scale_factor)\r\n\r\n\r\n # Set the sac header KEVNM with event name\r\n # This applies to the events from the LLNL database\r\n # NOTE this command is needed at the time of writing files, so it has to\r\n # be set early\r\n st2, evname_key = rename_if_LLNL_event(st2, evtime)\r\n self.evname = evname_key\r\n\r\n # save station plot\r\n # Note: Plotted are stations in the inventory and NOT the ones with the traces\r\n # It could be possible that there might not be waveforms for some of these stations.\r\n try:\r\n fig = inventory.plot(projection=\"local\", resolution=\"i\", label = False, show=False)\r\n Catalog([self.ev]).plot(fig=fig, outfile=self.evname + '/station_map.pdf')\r\n except:\r\n print(\"There is a problem with creating the station map!\")\r\n\r\n # Get list of unique stations + locaiton (example: 'KDAK.00')\r\n stalist = []\r\n for tr in stream.traces:\r\n if self.ifverbose: print(tr)\r\n station_key = \"%s.%s.%s.%s\" % (tr.stats.network, tr.stats.station,\r\n tr.stats.location, tr.stats.channel[:-1])\r\n stalist.append(station_key)\r\n\r\n # Crazy way of getting a unique list of stations\r\n stalist = list(set(stalist))\r\n\r\n # Resample\r\n if self.resample_TF == True:\r\n # NOTE !!! tell the user if BOTH commands are disabled NOTE !!!\r\n if (self.client_name == \"IRIS\"):\r\n resample(st2, freq=self.resample_freq)\r\n elif (self.client_name == \"LLNL\"):\r\n resample_cut(st2, self.resample_freq, evtime, self.tbefore_sec, self.tafter_sec)\r\n else:\r\n print(\"WARNING. Will not resample. Using original rate from the data\")\r\n\r\n # match start and end points for all traces\r\n st2 = trim_maxstart_minend(stalist, st2, self.client_name, event, evtime, \r\n self.resample_TF, self.resample_freq, \r\n self.tbefore_sec, self.tafter_sec, self.ifverbose)\r\n if len(st2) == 0:\r\n raise ValueError(\"no waveforms left to process!\")\r\n\r\n # save raw waveforms in SAC format\r\n if self.isave_raw:\r\n path_to_waveforms = evname_key + \"/RAW\"\r\n write_stream_sac_raw(stream_raw, path_to_waveforms, \r\n evname_key, self.client_name, event, stations=inventory)\r\n\r\n # Taper waveforms (optional; Generally used when data is noisy- example: HutchisonGhosh2016)\r\n # https://docs.obspy.org/master/packages/autogen/obspy.core.trace.Trace.taper.html\r\n # To get the same results as the default taper in SAC, use max_percentage=0.05 and leave type as hann.\r\n # Note: Tapering also happens while resampling (see util_write_cap.py)\r\n if self.taper:\r\n st2.taper(max_percentage=self.taper, type='hann',max_length=None, side='both')\r\n\r\n # save processed waveforms in SAC format\r\n # evname_key/RAW_processed = traces after waveform_QA + demean + detrend +\r\n # resample + remove response + filtering +\r\n # resampling + scaling + tapering\r\n # NOTE: The orientation is same as that of extracted waveforms\r\n # Waveforms are rotated to ENZ, in case they are not already orientated,\r\n # in the next step (self.rotateRTZ)\r\n if self.isave_raw_processed:\r\n path_to_waveforms = os.path.join(evname_key, 'RAW_processed')\r\n write_stream_sac(st2, path_to_waveforms, evname_key)\r\n\r\n # Rotate to ENZ (save: optional)\r\n #if self.rotateENZ:\r\n #st2 = rotate2ENZ(st2, evname_key, self.isave_ENZ, self.icreateNull, self.ifverbose)\r\n\r\n if self.rotateENZ:\r\n st2 = rotate2ENZ(st2, evname_key, self.isave_ENZ, self.icreateNull, self.ifverbose)\r\n\r\n # rotate to UVW and save\r\n if self.rotateUVW:\r\n rotate2UVW(st2, evname_key) \r\n\r\n # Rotate to RTZ and save\r\n if self.rotateRTZ:\r\n rotate2RTZ(st2, evname_key, self.ifverbose) \r\n \r\n\r\n # save CAP weight files\r\n if self.output_cap_weight_file:\r\n write_cap_weights(st2, evname_key, self.client_name, event, self.ifverbose)\r\n\r\n # save event info\r\n if self.output_event_info:\r\n write_ev_info(event, evname_key)\r\n\r\n # Plot spectrograms\r\n if self.ifplot_spectrogram:\r\n plot_spectrogram(st2, evname_key)\r\n\r\n # save pole zero file (Needed for MouseTrap)\r\n if self.ifsave_sacpaz:\r\n write_resp(inventory,evname_key)\r\n\r\n # save station inventory as XML file\r\n if self.ifsave_stationxml:\r\n xmlfilename = evname_key + \"/stations.xml\"\r\n try:\r\n inventory.write(xmlfilename, format=\"stationxml\", validate=True)\r\n except:\r\n print('Could not create stationxml file')\r\n \r\n # Path to the asdf_converter script \r\n if self.ifsave_asdf:\r\n # save RTZ\r\n asdf_filename = evname_key + \"/\" + evname_key + \".h5\"\r\n os.system(\"../asdf_converters/asdf_converters/sac2asdf.py \"\r\n + evname_key + \" \" + asdf_filename + \" observed\")\r\n # save NEZ\r\n nez_dir = evname_key + \"/ENZ/\"\r\n nez_asdf_filename = nez_dir + evname_key + \".h5\"\r\n os.system(\"../asdf_converters/asdf_converters/sac2asdf.py \"\r\n + nez_dir + \" \" + nez_asdf_filename + \" observed\")\r\n \r\n if self.remove_clipped:\r\n remove_clipped(evname_key)", "def sendMessage(self):\n #print('sendMessage\\r')\n self.pub.publish(Twist(linear=self.linearVector, angular=self.angularVector))", "def increaseFreq(self, desHz):\n from scipy.interpolate import interp1d\n import time\n from numpy import linspace, floor\n from decimal import getcontext, Decimal\n\n if desHz > 1000: # set max freq here \n raise ValueError('Max Frequency is 1000 (3 decimal places)')\n now = time.asctime(time.localtime(time.time())) \n stamp = ''.join(['%% The following created by alog_manip.MOOSalog.MOOSalog.increaseFreq\\n%% ', now])\n increase_msg = ''.join(['%% Resultant Frequency: ',str(desHz),' Hz'])\n # hiHz = {}\n self.outData = {} # erase pre-existing dict\n self.outData['header'] = [stamp,increase_msg,'%%%%'] + self.srcData['header']\n\n def create_msgs():\n \"\"\" Puts interpolated data into dict outData\n Primary interpolation function for increaseFreq\n Consider using uniaxial spline --> would have one function for all of dictionary dat\n \"\"\"\n getcontext().prec = 3 # will round to 3 decimal places\n orig_times = sorted(dat)\n for n in range(len(dat) - 1):\n linfun = interp1d([orig_times[n], orig_times[n+1]], \\\n [dat[orig_times[n]], dat[orig_times[n+1]]])\n dt = orig_times[n+1] - orig_times[n] # current\n freq = 1/dt # current\n if dt < (1/desHz):\n print('found instance where Freq already at/above desired Freq')\n else:\n new_dt = dt*freq/desHz\n new_times = linspace(orig_times[n],orig_times[n+1],floor(dt/new_dt))\n # print(new_times)\n new_values = linfun(new_times)\n # rounded_values = [float(Decimal(\"%.3f\" % e)) for e in new_values]\n rounded_times = [float(Decimal(\"%.3f\" % e)) for e in new_times]\n for m in range(len(rounded_times)):\n # this_time = int(new_times[m]*100000)/100000 # 5 decimal places in timstamp\n self.outData[sens][meas][rounded_times[m]] = new_values[m]\n\n ## go thru and pull out dictionaries {time: value} then send to interpolation func\n for sens in self.srcData:\n if sens is not 'header':\n self.outData[sens] = {}\n for meas in self.srcData[sens]:\n self.outData[sens][meas] = {}\n dat = self.srcData[sens][meas]\n if len(dat) == 1:\n self.outData[sens][meas] = dat # only 1 data point, no interp\n else:\n create_msgs()" ]
[ "0.7318101", "0.58122194", "0.5594247", "0.5316941", "0.52576196", "0.52559245", "0.524749", "0.5236174", "0.5196419", "0.5157173", "0.51346946", "0.5111613", "0.5108861", "0.51046497", "0.50656474", "0.4978461", "0.4945043", "0.49422932", "0.49275485", "0.49133897", "0.49077547", "0.4897097", "0.48936674", "0.48920897", "0.48859867", "0.4885758", "0.48785353", "0.48680377", "0.4836179", "0.48308283" ]
0.5945982
1
Uses a service call to have the WAM move to the end point. Goes at its own pace.
def request_wam_move(end_point, velocity_limits): move_wam_srv = rospy.ServiceProxy('/wam/joint_move', JointMove) try: resp1 = move_wam_srv(end_point) except rospy.ServiceException as exc: print("Service did not process request: " + str(exc))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def service(ants=0, tmo=200, waiton=-2) :\n return stow( ants, tmo, waiton, SERVICE );", "def GET_forward(self):\n self.roomba.DriveStraight(pyrobot.VELOCITY_FAST)\n time.sleep(1)\n self.roomba.SlowStop(pyrobot.VELOCITY_FAST)", "def flash_move(self,params):\n direction = params['direction']\n avoid = 0\n if params.has_key('avoid'): avoid = 1\n (x,y) = self.service.grid.requestLocation(self.participant,direction,1,avoid)\n\n group = self.service.groupOfParticipant(self.participant)\n\n if group:\n if len(group.members) == 1:\n self.service.removeParticipantFromDisussion(self.name)\n else:\n if self.participant.status == AVOID:\n self.service.removeParticipantFromDisussion(self.name)\n else:\n self.receiveDirectCommand(\"group\",{\"members\":string.join(map(lambda x:x.name,group.members),\";\")})\n return 0\n\n self.participant.setLocation((x,y)) \n self.receiveDirectCommand(\"location\",{\"x\":x,\"y\":y,\"sender\":self.name})\n self.service.sendParticipants(self.name,\"location\",{\"x\":x,\"y\":y,\"sender\":self.name})", "def __call__(self):\n if grinder.runNumber == 0: self.initialSleep()\n (param1, param2) = self.getParam()\n self.request1(param1, param2)", "def ServiceRequest(self):\n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n pass", "def Advance():\n warp.step()", "def do_drive(self, task):\n\n try:\n #Set run status\n self.status_stream.push(0, self.subsystem, autolock=True)\n\n #Get configs\n config = self.default_config\n config.update(self.config_stream.get(self.default_config))\n goals = self.goals_stream.get(self.default_goals)\n\n logging.info(\"Navigating to point {}\".format(goals[0]))\n\n self.drivetrain_control_stream.lock(self.subsystem)\n\n wait_time = 1/config[\"cycles_per_second\"]\n\n last_speed = 0\n\n while task.active:\n\n drivetrain_state = self.drivetrain_state_stream.get({\"distance\": 0, \"speed\": 0, \"angle\": 0})\n\n #How much farther do we have to go?\n distance_error = goals[0][1] - drivetrain_state[\"distance\"]\n\n #Are we done? Then break!\n if abs(distance_error) < config[\"precision\"]:\n break\n\n #Calculate the approximate distance it will take to slow down\n slow_point = (drivetrain_state[\"speed\"] ** 2 / (config[\"max_values\"][1] * 2))\n\n #Calculate what speed we currently want to go at:\n wanted_speed = math.copysign(config[\"max_values\"][0], distance_error)\n if distance_error < slow_point:\n #We must slow down then!\n wanted_speed = 0\n\n #Limit the acceleration\n wanted_speed_delta = wanted_speed - last_speed\n wanted_acceleration = math.copysign(config[\"max_values\"][1], wanted_speed_delta)\n result_speed = last_speed + wanted_acceleration * wait_time\n\n #Add a bit of angle correction\n angle_component = - drivetrain_state[\"angle\"] * .01\n\n #Send values to drivetrain\n self.drivetrain_control_stream.push((angle_component, result_speed), self.subsystem)\n\n last_speed = result_speed\n\n #Sleep until next loop\n time.sleep(wait_time)\n\n #Clean up\n self.drivetrain_control_stream.push((0, 0), self.subsystem)\n self.status_stream.push(1, self.subsystem, autolock=True)\n except datastreams.LockError:\n self.status_stream.push(-1, self.subsystem, autolock=True)", "def move_forward(power):\n message = \"FORWARD:\" + str(power) + '\\n'\n sock.sendall(message)\n return", "def swipe(self, client, direction, offset, sec=500):\r\n try:\r\n client.swipe2(direction, offset, sec)\r\n logging.info(str(time.asctime(time.localtime())) +\"swiping\" + \" swipe-->Passed \")\r\n except InternalException, msg:\r\n logging.error(str(time.asctime(time.localtime())) + \" :: \" + str(msg) + \"swipe-Failed\")\r\n except RuntimeException, msg:\r\n logging.error(str(time.asctime(time.localtime())) + \" :: \" + str(msg) + \"swipe-Failed\")", "def plane(env, name, cw):\n print('%s Chega ao aeroporto em %.2f.' % (name, env.now))\n stat.new_arrival()\n arrive = env.now\n with cw.machine.request() as request:\n yield request\n\n print('%s entra na pista de pouso em %.2f. Tempo de espera: %.2f' % (name, env.now, env.now - arrive))\n start = env.now\n WAIT_TIME.append(env.now - arrive)\n yield env.process(cw.delay())\n\n yield env.process(cw.partida(name))\n\n print('%s parte do aeroporto em %.2f. Tempo do servico: %.2f' % (name, env.now, env.now - start))\n SERVICE_TIME.append(env.now - start)\n SYSTEM_TIME.append(env.now - arrive)\n\n stat.new_completion()", "def GET_reverse(self):\n self.roomba.DriveStraight(-pyrobot.VELOCITY_FAST)\n time.sleep(1)\n self.roomba.SlowStop(-pyrobot.VELOCITY_FAST)", "def execute_cb(self, goal):\n print(\"Action server\")\n loop_rate = rospy.Rate(10)\n \n actual_path = Path()\n actual_path.header.frame_id = \"map\"\n self.set_plan(goal.path)\n while not rospy.is_shutdown():\n rospy.logwarn_throttle(2.0,\"ExecCb\")\n map_pose, odom_pose = self.robot.get_pose()\n if map_pose == None:\n rospy.logerr(\"Robot pose could not retreived\")\n continue\n \n if self.goal_reached(map_pose.pose):\n # Robot completed the task\n self.result.success = True\n self.action_server.set_succeeded(self.result)\n rospy.logerr(\"Goal reached\")\n return\n \n if self.action_server.is_preempt_requested():\n print(\"Preempt requested\")\n self.result.success = False\n self.action_server.publish_feedback(self.feedback)\n self.action_server.set_preempted(result=self.result)\n return\n\n\n\n res, cmd_vel = self.compute_velociy_commands()\n \n if res:\n self.robot.command(cmd_vel)\n\n actual_path.header.stamp = rospy.Time.now()\n actual_path.poses.append(map_pose)\n self.desired_pub.publish(goal.path)\n self.actual_pub.publish(actual_path)\n loop_rate.sleep()", "def performOverflow(self, call):\n overFlowDest = self.getOverflowDest()\n if not overFlowDest:\n self.huntGroup.member_to_distribute = 0\n PrintLog(\"+++++++Debug: Under construction+++++\")\n return\n PrintLog(\"Waiting overflow timeout %s sec\" % self.overflowTimeout)\n time.sleep(self.overflowTimeout)\n if overFlowDest.tserver <> self.tserver:\n overFlowDest = self.trunk(self, overFlowDest)\n if InTrue(GetOption(\"CofFeature\")):\n call.ViaExtRouter = 1\n call.external = 1\n pt = self.partyToDistribute()\n thirdPartyDNRole = PartyRole.Destination\n if pt.Role == PartyRole.ConferenceMember and len(pt.Call.PartyList) >= 3:\n thirdPartyDNRole = PartyRole.ConferenceMember\n thirdPartyDN = \"Trunk\"\n addPrm = {\"ThirdPartyDN\": thirdPartyDN, \"ThirdPartyDNRole\": thirdPartyDNRole}\n if not self.routeRequestOnQueued:\n ev = self.mayBeEvent(EventName.Diverted, pt, timeout=3, addPrm=addPrm)\n else:\n addPrmRU = {\"ReferenceID\": 0, \"Reasons\": None, \"ThirdPartyDN\": thirdPartyDN,\n \"ThirdPartyDNRole\": thirdPartyDNRole}\n ev = self.mayBeEvent(EventName.RouteUsed, pt, timeout=3, addPrm=addPrmRU)\n ev = self.mayBeEvent(EventName.Diverted, pt, timeout=3, addPrm=addPrm)\n if not ev:\n pt.postponedAbandonedOrDiverted = 1\n self.postponedAbandonedOrDiverted = self.postponedAbandonedOrDiverted + 1\n pt.removeFromCall()\n ringPt = overFlowDest.ring(call)\n return ringPt", "def sweep_relay():", "def move_robot(request):\n\n phase_id = request.phase\n print \"phase_id is {}\".format(phase_id)\n if phase_id == 0:\n success = move_to_marshmallow()\n elif phase_id == 1:\n success = move_to_mouth()\n elif phase_id == 2:\n success = release_marshmallow()\n elif phase_id == 3:\n success = grip_marshmallow()\n elif phase_id == 4:\n success = move_to_start_state()\n elif phase_id == 5:\n success = perform_full_sequence()\n message = \"placeholder\"\n\n return TriggerPhaseResponse(success, message)", "def wasp():\n send_to = input('Enter destination: ')\n active_wif = input('Enter your Active Key: ')\n steem = Steem(keys=[active_wif], nodes='https://api.steemit.com')\n w = Wallet(steem_instance=steem)\n t = Tokens() \n usr = w.getAccountFromPrivateKey(active_wif)\n sew = seWallet(account=usr, steem_instance=steem)\n tokens = sew.get_balances()\n for token in tokens:\n symbol = token['symbol']\n info = t.get_token(symbol)\n p = info['precision']\n b = float(token['balance'])\n balance = float(f'{b:.{p}f}')\n if balance > 0:\n print(f'[ Transfering {balance} of {symbol} to {send_to} ]')\n #pprint(sew.transfer(send_to, balance, symbol, memo=\"waspsting.py transfer\")\n sew.transfer(send_to, balance, symbol, memo=\"waspsting.py transfer\")\n time.sleep(1)\n return None", "def test_move_between(self):\n\n global sendPlayCallParams\n\n req = self.get_moves(50)\n\n with patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \\\n patch.object(socket.socket, 'bind', return_value=True) as mock_bind:\n src.drivers.hyundai_robot.udp = UdpConnector(\"localhost\", 8000)\n\n with patch('src.drivers.hyundai_robot.sendPlay', side_effect = mock_send_play) as m, \\\n patch.object(UdpConnector, 'appendToQueue') as u:\n \n src.drivers.hyundai_robot.allPositions = []\n src.drivers.hyundai_robot.move_between(MoveBetweenRequest( start = 2, end = 3 ))\n assert u.called == False\n\n src.drivers.hyundai_robot.store_poses(req)\n assert u.call_count == math.ceil( len(req.moves) / src.drivers.hyundai_robot.batchSize )\n\n src.drivers.hyundai_robot.move_between(MoveBetweenRequest( start = 2, end = 3 ))\n \n assert sendPlayCallParams['start'] == 3\n assert sendPlayCallParams['end'] == 4\n assert sendPlayCallParams['direction'] == 1\n assert sendPlayCallParams['poses'] == None\n assert m.called\n\n src.drivers.hyundai_robot.move_between(MoveBetweenRequest( start = 3, end = 1 ))\n \n assert sendPlayCallParams['start'] == 4\n assert sendPlayCallParams['end'] == 2\n assert sendPlayCallParams['direction'] == -1\n assert sendPlayCallParams['poses'] == None\n assert m.call_count == 2\n\n src.drivers.hyundai_robot.udp.stopConsumeThread()", "def turn_towards(heading):\r\n\tprint (\"In turn towards:\")\r\n\thead = vehicle.heading\r\n\tprint (\"Vehicle Heading: \",head)\r\n\tprint (\"Target Heading: \",heading)\r\n\trc1 = 1900\r\n\twhile True:\r\n\t\tif(head > heading - turnTowardsThreshold and head < heading + turnTowardsThreshold):\r\n\t\t\tbreak\r\n\t\tsendThrottleCommand(minimumThrottle, enableThrottle)\r\n\t\t#time.sleep(0.5)\r\n\t\tprint (\"Vehicle Heading: \",head)\r\n\t print (\"Target Heading: \",heading)\r\n\t\tvehicle.channels.overrides = {'1':rc1}\r\n\t\ttime.sleep(0.2)\r\n\t\thead = vehicle.heading", "def run(self):\n if not _wait_home_origin(self._xbee): return\n if not _preflight_check(self._vehicle, self._xbee): return\n if _wait_lift_cmd(self._xbee):\n shared.status['airborne'] = arm_and_takeoff(self._vehicle, self._dalt, self._delay)", "def onSMPPOperation(self):\n if self.isBound():\n self.activateEnquireLinkTimer()\n\n self.activateInactivityTimer()", "def go_forward(self, distance, speed=0.1):\n while (self._last_odom_msg == None):\n\t rospy.sleep(1.0)\n start = copy.deepcopy(self._last_odom_msg.pose.pose.position)\n rate = rospy.Rate(10)\n while self.distance_fn(self._last_odom_msg.pose.pose.position, start) < math.fabs(distance):\n direction = -1 if distance < 0 else 1\n self.move(direction * speed, 0)\n rate.sleep()", "def process_request(t):\n time.sleep(t)", "def process_request(t):\n time.sleep(t)", "def process_request(t):\n time.sleep(t)", "def u_turn(self, direction, diameter_in):\n \n# pdb.set_trace()\n # Calculate radius of turn for the inside wheel.\n r_in = diameter_in / 2\n\n # Outside radius is 20 inches from inside radius.\n r_out = r_in + MuleBot.WHEEL_BASE_LENGTH\n \n # Outside travel distance\n travel = r_out * 3.14159\n travel_revolutions = travel / MuleBot.CIRCUM_IN\n \n r_ratio = r_out / r_in\n #r_ratio_half = r_ratio / 2\n\n speed_multiplier = MuleBot.MAX_RPM / r_ratio\n\n outside_rpm = r_ratio * speed_multiplier\n inside_rpm = speed_multiplier\n \n \n # \n # minutes at outside_rpm\n minutes = travel_revolutions / outside_rpm\n seconds = minutes * MuleBot.SECONDS_PER_MINUTE\n \n # Something isn't quite perfect.\n if direction == 'left':\n if diameter_in < 25:\n seconds -= 1\n else:\n seconds -= 2\n else:\n if diameter_in < 25:\n seconds += 1\n else:\n seconds += 2\n\n if direction == 'left':\n v_l = self.rpm_to_rps(inside_rpm)\n v_r = self.rpm_to_rps(outside_rpm)\n else:\n v_r = self.rpm_to_rps(inside_rpm)\n v_l = self.rpm_to_rps(outside_rpm)\n\n #print(\"2inside: rpm: \", inside_rpm)\n #print(\"2outside: rpm: \", outside_rpm)\n \n #print(\"2.1: v_l: \", v_l)\n #print(\"2.1: v_r: \", v_r)\n\n # Set wheel drive rates.\n self.set_wheel_drive_rates(v_l, v_r)\n\n # Sleep during the turn.\n time.sleep(seconds)\n\n # Stop\n self.stop()\n \n # Move forward 24 inches.\n self.forward(24)", "def do_the_thing(self):\n # r = rospy.Rate(2)\n if self.current_state == \"wait\":\n self.wait()\n elif self.current_state == \"follow\":\n self.follow()\n\n self.pub.publish(self.twist)\n\n # r.sleep()", "def Z1Move(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def forward(self, param):\n\t\tif param:\n\t\t\tself.linear_move(param * .3048)\n\t\telse:\n\t\t\tself.linear_move(riu.default_dist * .3048)", "def ftp_APPE(self, line):\n # watch for APPE preceded by REST, which makes no sense.\n if self.restart_position:\n self.respond(\"550 Can't APPE while REST request is pending.\")\n return\n self.ftp_STOR(line, mode='a')", "def move_to(self, waypoint):\n self.set_final_wp(waypoint)\n self.go()\n currPos = np.asarray(self.rexarm.get_positions())\n while(np.linalg.norm(np.asarray(waypoint) - currPos) > 0.15):\n time.sleep(0.01)" ]
[ "0.6199987", "0.61403936", "0.5519624", "0.5468195", "0.53626955", "0.52841926", "0.5257299", "0.52350175", "0.51708645", "0.5167345", "0.51569194", "0.5134525", "0.5132805", "0.5116559", "0.51158285", "0.51156914", "0.5107367", "0.5070184", "0.5053658", "0.5045911", "0.5042649", "0.50410885", "0.50410885", "0.50410885", "0.502194", "0.5014852", "0.50117695", "0.50070685", "0.5005501", "0.4999385" ]
0.68024755
0
Return the most common element in a list of votes. If there are multiple voters with the same element, one of them is returned at random.
def majority_vote(votes): vote_counts = {} for vote in votes: if vote in vote_counts: vote_counts[vote] += 1 else: vote_counts[vote] = 1 winners = [] max_choice = max(vote_counts.values()) for vote, count in vote_counts.items(): if count == max_choice: winners.append(vote) import random return(random.choice(winners))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def majority_vote(votes):\n\n vote_counts = {} # dictionary\n for vote in votes:\n # known word\n if vote in vote_counts:\n vote_counts[vote] += 1\n # unknown word\n else:\n vote_counts[vote] = 1\n\n print(vote_counts)\n # but who is the winner?\n winners = []\n max_votes = max(vote_counts.values())\n\n # for (,) this is a tuple\n for vote, count in vote_counts.items():\n if count == max_votes:\n winners.append(vote)\n\n # what is we have multiply winners? Well we select one randomly\n\n return random.choice(winners)", "def get_most_common(self, lst):\n data = Counter(lst)\n mc = data.most_common(2) \n #if len(mc) == 1 or (mc[0][1] != (mc[1][1])):\n # return mc[0][0]\n #return \"AMB\"\n return data.most_common(1)[0][0]", "def get_majority_vote(rating_scores):\n return collections.Counter(rating_scores).most_common()[0][0]", "def most_common(lst):\n return max(set(lst), key=lst.count)", "def majority_vote(labels):\n vote_counts = Counter(labels)\n winner, winner_count = vote_counts.most_common(1)[0]\n num_winners = len([count for count in vote_counts.values()\n if count == winner_count])\n if num_winners == 1:\n return winner\n else:\n #try again without the farthest\n return majority_vote(labels[:-1])", "def most_common(self):\n # Example ouput : ['so', 6]\n return list(sorted(self.frequencies().items(), key = lambda x: x[1], reverse=True)[0])\n #sorted = sorted(self.frequencies().items(), key = lambda x: x[1], reverse=True)\n #return sorted[0] #not list", "def most_common(elems):\n # get an iterable of (item, iterable) pairs\n sl = sorted((x, i) for i, x in enumerate(elems))\n # print 'SL:', SL\n groups = itertools.groupby(sl, key=operator.itemgetter(0))\n\n # auxiliary function to get \"quality\" for an item\n def _auxfun(g):\n item, iterable = g\n count = 0\n min_index = len(elems)\n for _, where in iterable:\n count += 1\n min_index = min(min_index, where)\n # print 'item %r, count %r, minind %r' % (item, count, min_index)\n return count, -min_index\n\n # pick the highest-count/earliest item\n return max(groups, key=_auxfun)[0]", "def mostfrequent(self, L):\n L.sort()\n n0, e0 = 0, None\n ep = None\n for e in L:\n if e != ep:\n n = L.count(e)\n if n > n0:\n n0, e0 = n, e\n ep = e\n return e0, n0", "def get_max_loot(input_list):\n even = sum(input_list[::2])\n odd = sum(input_list[1::2])\n return even if even > odd else odd", "def __find_majority_opt(input_list):\n count = 0\n element = input_list[0]\n for i in range(len(input_list)):\n if count == 0 :\n element = input_list[i]\n count =1\n elif element == input_list[i]:\n count +=1\n else :\n count -=1\n return element", "def ensemble(scores):\r\n c = Counter ()\r\n for probs in zip (scores):\r\n idx = int (np.argmax (np.array (probs)))\r\n c.update ([idx])\r\n best = c.most_common (1)[0][0]\r\n return best", "def most_frequent(x):\n return Counter(x).most_common()[0][0]", "def most_common(iterable):\n from collections import Counter\n\n data = Counter(iterable)\n return data.most_common(1)[0][0]", "def get_majority(lst):\n a = {}\n candidate = lst[0]\n for elem in lst:\n\tif elem not in a:\n\t a[elem] = 0\n\telse:\n\t a[elem] += 1\n for elem in lst:\n \tif (a[elem] >= len(lst) / 3):\n candidate = elem\n return candidate", "def mostcommon(iterable, n=None):\n #import operator\n bag = {}\n bag_get = bag.get\n for elem in iterable:\n bag[elem] = bag_get(elem, 0) + 1\n if n is None:\n return sorted(bag.iteritems(), key=itemgetter(1), reverse=True)\n it = enumerate(bag.iteritems())\n nl = nlargest(n, ((cnt, i, elem) for (i, (elem, cnt)) in it))\n return [(elem, cnt) for cnt, i, elem in nl]", "def mostCommon(self):\n d = [r[-1] for r in self]\n return max(set(d), key=d.count)", "def naive_majority(voters):\n half = len(voters)//2\n for index, voter in enumerate(voters):\n count = 0\n for other_voter in voters:\n if voter == other_voter:\n count += 1\n if count > half:\n return Outcome.has_majority\n return Outcome.no_majority", "def majorityCount(votes):\n classCount = {}\n for vote in votes:\n if vote not in classCount.keys():\n classCount[vote] = 0\n classCount[vote] += 1\n return sorted(classCount.iteritems(),\n key=operator.itemgetter(1), reverse=True)[0][0]", "def get_majority_element_naive(self, lst, left, right):\r\n # Running time: O(n ** 2)\r\n n = len(lst)\r\n \r\n for i in range(n):\r\n current_elem, count = lst[i], 0\r\n for j in range(n):\r\n if lst[j] == current_elem:\r\n count += 1\r\n \r\n if count > int(math.floor(n / 2.0)): return lst[i]\r\n \r\n return -1", "def majority_vote(labels):\n\n conta = Counter(labels)\n\n winner, winner_count = conta.most_common(1)[0]\n\n num_winner = sum([1 for count in conta.values() if count == winner_count])\n\n if num_winner == 1:\n return winner\n else:\n return majority_vote(labels[:-1])", "def getMostLikelyTag(set_of_sents):\n # initialize tags for the words\n l_of_tags = []\n all_tags = brown.tagged_sents()\n size_of_set = len(set_of_sents)\n for i in range(size_of_set):\n tags = findTags(set_of_sents[i], all_tags[i])\n l_of_tags += tags\n\n # merge tags for each word\n d = {} # dict of words and tags amount\n for i in range(len(l_of_tags)):\n w = l_of_tags[i][WORD]\n t = l_of_tags[i][TAG]\n if w in d:\n if t in d[w]:\n d[w][t] = d[w][t] + 1\n else:\n d[w][t] = 1\n else:\n d[w] = {t: 1}\n\n # get the max tag of each word\n result = {}\n for w, t in d.items():\n v = list(t.values())\n k = list(t.keys())\n fin_tag = k[v.index(max(v))]\n result[w] = fin_tag\n\n return result", "def majority(x):\n c = Counter(x)\n value, _ = c.most_common()[0]\n return value", "def most_similar_available_itag(itags_by_preference, available_itags):\n\n for itag in itags_by_preference:\n if itag in available_itags:\n return itag", "def get_majority_element_sort_count(self, lst, left, right):\r\n # get sorted list\r\n sorted_lst = sorted(lst)\r\n \r\n n = len(sorted_lst)\r\n \r\n for i in range(n):\r\n count = sorted_lst.count(sorted_lst[i])\r\n if count > int(math.floor(n / 2.0)): return sorted_lst[i]\r\n \r\n return -1", "def majority_vote(votes):\n import scipy.stats as ss\n mode, count = ss.mstats.mode(votes)", "def mode(lst):\n cnt = Counter(lst)\n return cnt.most_common(1)[0][0]", "def __choose_best_matching_candidate(candidates, artist):\n\n artist_names = set()\n for match in candidates:\n artist_names.add(match[1])\n\n # If there is more than 1 matched artist:\n if len(artist_names) > 1:\n \n best_distance = 10000\n best_artist = \"\"\n\n # Calculate the levenshtein edit distance between the searched artist name and the artist names in the search results.\n for matched_artist in artist_names:\n distance = editdistance.eval(matched_artist, artist)\n if distance < best_distance:\n best_distance = distance\n best_artist = matched_artist\n\n # Then exclude from candidates all matches that are NOT from the best artist\n candidates = [candidate for candidate in candidates if candidate[1] == best_artist]\n else:\n best_artist = artist_names.pop()\n best_distance = editdistance.eval(best_artist, artist)\n\n # Threshold candidate name to the artist name\n ratio = best_distance/len(artist)\n # Allow ~15% difference\n if ratio > 0.15:\n raise MatchNotFoundError(\"Closest artist is too far of the queried artist\")\n\n # Descending list\n sort_on_num_ratings = sorted(candidates, key=lambda cand: cand[2], reverse=True)\n\n # Take the one with the most votes\n selected = sort_on_num_ratings[0]\n\n # Unless it has a rating lower than 4.\n if selected[3] < 4:\n\n sort_on_rating = sorted(candidates, key=lambda cand: cand[3], reverse=True)\n\n # If there is one with a rating higher than 4, select that one. \n if sort_on_rating[0][3] > 4:\n selected = sort_on_rating[0]\n\n return selected", "def findMode(list):\n # Use Python's Counter function on the list\n values = Counter(list)\n # Returns the highest occurring item\n return values.most_common(1)[0][0]", "def get_majority_vote_for_sequence(sequence, nb_classes):\n votes_per_class = np.zeros((nb_classes, 1))\n for i in range(len(sequence)):\n class_vote = np.argmax(sequence[i])\n votes_per_class[class_vote] += 1\n # Return random choice of the max if there's a tie.\n return np.random.choice(np.flatnonzero(votes_per_class == votes_per_class.max()))", "def secondAttempt(self, nums):\n foundMajority = False\n while(not foundMajority):\n randomEl = random.choice(nums)\n occurence = 0\n for num in nums:\n if(num == randomEl):\n occurence += 1\n if(occurence > len(nums)/2):\n foundMajority = True\n return randomEl" ]
[ "0.7092113", "0.642857", "0.64019984", "0.6061767", "0.60235876", "0.59869194", "0.597398", "0.59564465", "0.5941662", "0.5912173", "0.58963436", "0.58896893", "0.58867997", "0.5852934", "0.58495456", "0.5814076", "0.579337", "0.57645696", "0.5739213", "0.5736768", "0.5644522", "0.5638166", "0.55994624", "0.559884", "0.5596121", "0.5590105", "0.5589795", "0.5571307", "0.555858", "0.5548904" ]
0.6985902
1
Find the k nearest neighbors of point p in array points. If multiple points are the equally far, the ones with lower values in lower dimensions are preferred. For example, if (2,2) and (2,4) are equally distant, (2,2) would be chosen.
def find_nearest_neighbors(p, points, k): import numpy as np distances = np.zeros(points.shape[0]) for i in range(len(distances)): distances[i] = distance(p,points[i]) ind = np.argsort(distances) return ind[0:k]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_nearest_neighbors(p, points, k=5):\n dist = np.zeros(points.shape[0])\n for i in range(len(dist)):\n dist[i] = distance(p, points[i])\n ind = np.argsort(dist)\n return ind[0:k]", "def knn(p, pnts, k=1, return_dist=True):\r\n def _remove_self_(p, pnts):\r\n \"\"\"Remove a point which is duplicated or itself from the array\r\n \"\"\"\r\n keep = ~np.all(pnts == p, axis=1)\r\n return pnts[keep]\r\n #\r\n def _e_2d_(p, a):\r\n \"\"\" array points to point distance... mini e_dist\r\n \"\"\"\r\n diff = a - p[np.newaxis, :]\r\n return np.einsum('ij,ij->i', diff, diff)\r\n #\r\n p = np.asarray(p)\r\n k = max(1, min(abs(int(k)), len(pnts)))\r\n pnts = _remove_self_(p, pnts)\r\n d = _e_2d_(p, pnts)\r\n idx = np.argsort(d)\r\n if return_dist:\r\n return pnts[idx][:k], d[idx][:k]\r\n return pnts[idx][:k]", "def kclosestpoints(points, k):\n dist = {p : 0 for p in points}\n for point in points:\n dist[point] = point[0] ** 2 + point[1] ** 2\n dist = sorted(dist.items(), key=lambda x : x[1], reverse=False)\n return dist[:k]", "def k_nearest(self, pt, k):\n if k < 1:\n raise ValueError('k should be at least 1')\n result = []\n visit_ct = k_nearest(self.root, pt, k, result)\n logging.debug('Visited {0} leaf nodes'.format(visit_ct))\n return [(math.sqrt(d), item) for (d, item) in result]", "def k_nearest_neighbors(x_test, df_training, k):\n\n return np.argpartition(distance_to_each_training_point(x_test,\n df_training), k-1)[:,0:k]", "def get_k_closest_points(point, data, k, distance_metric):\n points_and_scores = []\n k_closest_points = []\n for item in data:\n item_score = distance_metric(point, item)\n points_and_scores.append([item, item_score])\n points_and_scores = sorted(points_and_scores, key = lambda item:(item[1], item[0].coords))\n for i in range(k):\n k_closest_points.append(points_and_scores[i][0])\n return k_closest_points", "def knn(p, k, x, t):\r\n\r\n # Number of instances in data set\r\n N = x.shape[0]\r\n\r\n Euclidean_Distance = numpy.square(x - p) #Euclidean distance\r\n dis = numpy.sum(Euclidean_Distance, axis=1) #sum of the euclidean distance\r\n inds = numpy.argsort(dis)[:k] #sort the indices of the distance array\r\n tgt_cat = Counter([t[i] for i in inds]) #count the times of equivalent target labels\r\n top_class = max(tgt_cat, key= tgt_cat.get) #top class among the k nearest points\r\n\r\n\r\n #top_class = 0\r\n\r\n return top_class", "def knn0(pnts, p, k):\r\n p = np.asarray(p)\r\n pnts = np.asarray(pnts)\r\n diff = pnts - p[np.newaxis, :]\r\n d = np.einsum('ij,ij->i', diff, diff)\r\n idx = np.argsort(d)[:k]\r\n# s = [i.tolist() for i in pnts[idx]]\r\n return pnts[idx].tolist()", "def get_nearest(src_points, candidates, k_neighbors=1):\n\n # Create tree from the candidate points\n tree = BallTree(candidates, leaf_size=15, metric='haversine')\n distances, indices = tree.query(src_points, k=k_neighbors)\n\n # Transpose to get distances and indices into arrays\n distances = distances.transpose()\n indices = indices.transpose()\n\n # Get closest indices and distances (i.e. array at index 0)\n # note: for the second closest points, you would take index 1, etc.\n closest = indices[0]\n closest_dist = distances[0]\n\n # Return indices and distances\n return closest, closest_dist", "def visit_k_nearest(node, pt, k, result):\n # rather brute force but because cut off and k expected to be rather small\n # not further optimized\n # (result could instead of list be a bin heap with at most k items)\n for active, item in zip(node.active, node.items):\n # check active items\n if active:\n d = distance2(pt, item)\n result.append( (d, item) )\n # sort on distance\n result.sort(key=lambda x: x[0])\n # keep max k items\n while len(result) > k:\n result.pop()", "def get_nearest(src_points, candidates, k_neighbors=1):\r\n\r\n # Create tree from the candidate points. leaf-size só muda o processamento, e a métrica é a forma de cálculo, que no caso é a Great Circle Distances\r\n tree = BallTree(candidates, leaf_size=15, metric='haversine')\r\n\r\n # Find closest points and distances. K é a quantidade de pontos que queremos a dis^tanica e SRC points são os pontos\r\n distances, indices = tree.query(src_points, k=k_neighbors)\r\n\r\n # Transpose to get distances and indices into arrays\r\n distances = distances.transpose()\r\n indices = indices.transpose()\r\n\r\n # Get closest indices and distances (i.e. array at index 0)\r\n # note: for the second closest points, you would take index 1, etc.\r\n closest = indices[0]\r\n closest_dist = distances[0]\r\n\r\n # Return indices and distances\r\n return (closest, closest_dist)", "def get_k_neighbors(self, point):\n nn = []\n nnl = []\n for p,l in zip(self.train_features,self.train_labels):\n d = self.distance_function(p,point)\n dl_pair = (d,l)\n nn.append(dl_pair)\n nn = sorted(nn, key = lambda x: x[0])\n for i in range(0,self.k):\n nnl.append(nn[i][1])\n return nnl\n raise NotImplementedError", "def k_nearest(node, pt, k, result):\n if node.items:\n visit_k_nearest(node, pt, k, result)\n return 1\n else:\n dx = pt[node.cutdim] - node.cutval\n if dx <= 0:\n near = node.left\n far = node.right\n else:\n near = node.right\n far = node.left\n ct_near = k_nearest(near, pt, k, result)\n # check if we found results, \n # if we have sufficient results and the closest of these\n # is closer than the split line, we do not have to search further\n if result and len(result) >= k and pow(dx, 2) >= result[0][0]:\n return ct_near \n ct_far = k_nearest(far, pt, k, result)\n return ct_near + ct_far", "def k_neighbors(self, unknown, dataset, k):\n distances = []\n for title in dataset:\n point = dataset[title]\n distance_to_point = distance.euclidean_distance(point, unknown)\n distances.append([distance_to_point, title])\n distances.sort()\n neighbors = distances[0:k]\n return neighbors", "def closest_points(point, points, nn=1):\n\n eu_dsts = point - points\n eu_dsts = np.sqrt((eu_dsts * eu_dsts).sum(axis=1))\n n_ids = np.argsort(eu_dsts)\n out_points = np.zeros(shape=(nn, 3))\n for i in range(nn):\n out_points[i] = points[n_ids[i], :]\n return out_points", "def test_k_nearest(self):\n L = range(100)\n L = [(i, i, i, i) for i in L]\n tree = KdTree(L)\n # remove distance, only keep points from the result\n items = lambda items: [x for (d, x) in items] \n assert items(tree.k_nearest((-1, -1), 1)) == [(0, 0, 0, 0)]\n assert items(tree.k_nearest((100, 100), 1)) == [(99, 99, 99, 99)]\n assert items(tree.k_nearest((50, 50), 1)) == [(50, 50, 50, 50)]\n assert items(tree.k_nearest((-1, -1), 2)) == [(0, 0, 0, 0),\n (1, 1, 1, 1)]", "def brute_k_nearest_neighbors(coords, query_point, k, distance_function):\n bpq = []\n for coord in coords:\n dist = distance_function(coord, query_point)\n if len(bpq) < k or dist < bpq[-1].distance:\n insort(bpq, NNResult(coord, dist), key=attrgetter(\"distance\"))\n if len(bpq) > k:\n bpq.pop()\n return bpq", "def find_nearest_neighbors_idx(X, x, k):\n ## homework:start\n result = \n ## homework:end\n return result", "def find_nearest_neighbour_from_point(point_cloud:np.ndarray, point:int) -> int:\n pass", "def _find_nearest_neighbors(self, k=15):\n # this isn't running as expected\n # if self.pca_matrix.any():\n # sys.exit(\"Please run reduce matrix dimensions for populate the PCA matrix.\")\n\n # key will represent index for artificial doublet\n # value will hold list of the most similar doublets\n nn_obj = nearest_neighbors.NearestNeighbors(self.pca_matrix, k)\n\n # create set of indices for nearest neighbors to ignore; set contains indices for artificial doublets\n idxs_to_ignore = {\n i for i in range(self.num_cells, self.num_cells + self.num_artifial_doublets)\n }\n for i in range(self.num_cells, self.num_cells + self.num_artifial_doublets):\n neighbors = nn_obj.get_nearest_neighbors(i, idxs_to_ignore)\n neighbors = [\n i for i in neighbors if i[1] < self.num_cells\n ] # only include similarity if that similarity is for a cell barcode\n self.nearest_neighbors_dict[i] = neighbors", "def nearest_input_pts(\n in_latlons: ndarray, out_latlons: ndarray, k: int\n) -> Tuple[ndarray, ndarray]:\n # Convert input latitude and longitude to XYZ coordinates, then create KDtree\n in_x, in_y, in_z = ecef_coords(in_latlons[:, 0].flat, in_latlons[:, 1].flat)\n in_coords = np.c_[in_x, in_y, in_z]\n in_kdtree = KDTree(in_coords)\n # Convert output to XYZ and query the KDtree for nearby input points\n out_x, out_y, out_z = ecef_coords(out_latlons[:, 0].flat, out_latlons[:, 1].flat)\n out_coords = np.c_[out_x, out_y, out_z]\n distances, indexes = in_kdtree.query(out_coords, k)\n # Avoid single dimension output for k=1 case\n if distances.ndim == 1:\n distances = np.expand_dims(distances, axis=1)\n if indexes.ndim == 1:\n indexes = np.expand_dims(indexes, axis=1)\n return distances, indexes", "def closestClusterAndDistance(p, centers):\n\tbestIndex = 0\n closest = float(\"+inf\")\n for i in range(len(centers)):\n distance = np.sqrt(np.sum((np.array(p) - centers[i]) ** 2))\n if distance < closest:\n closest = distance\n bestIndex = i\n return (bestIndex, closest)", "def find_all_nearest_neighbours(point_cloud:np.ndarray) -> np.ndarray:\n pass", "def get_neighbours(self, x, k):\n k = min(k, self.n)\n nearest = {}\n for i in range(k):\n nearest[i] = self.euclidean_distance(x, self.train_x[i])\n for i in range(k, self.n):\n dist = self.euclidean_distance(x, self.train_x[i])\n if dist < max(nearest.values()):\n nearest.pop(max(nearest, key=nearest.get))\n nearest[i] = dist\n return nearest", "def nearest_neighbors(self):\n neighbor_distances_and_indices = []\n for idx, data_point in enumerate(self.data):\n distance = self.euclidean_dis(data_point[:-1], self.query) # Calculate the distance between the query\n # example and the current example from the data.\n\n neighbor_distances_and_indices.append((distance, idx)) # Add the distance and the index of the example\n # to an ordered collection\n\n sorted_neighbor_distances_and_indices = sorted(neighbor_distances_and_indices, key=lambda x: x[0]) #\n # Sort the ordered collection of distances and indices from smallest to largest (in ascending order) by\n # the distances\n\n k_nearest_distances_and_indices = sorted_neighbor_distances_and_indices[:self.k] # Pick the first K\n # entries from the sorted collection\n\n k_nearest_labels = [self.data[i][1] for distance, i in k_nearest_distances_and_indices] # Get the labels of\n # the selected K entries\n\n return k_nearest_labels, self.mode(k_nearest_labels)", "def knn_predict(p, points, outcomes, k):\n\tind = find_nearest_neighbors(p, points, k)\n\treturn majority_vote(outcomes[ind])", "def closestCluster(p, centers):\n\tbestIndex = 0\n closest = float(\"+inf\")\n for i in range(len(centers)):\n distance = np.sqrt(np.sum((np.array(p) - centers[i]) ** 2))\n if distance < closest:\n closest = distance\n bestIndex = i\n\treturn bestIndex", "def find_k_closest(self, a, k):\r\n if not a:\r\n return []\r\n\r\n # Sort input array by Euclidean distance from origin\r\n a.sort(key = lambda x: x[0] ** 2 + x[1] ** 2)\r\n\r\n # Return the \"k\" smallest elements\r\n return a[:k]", "def k_nn(frame, newPoint, colClass, k): \n counts = []\n \n # find all distances wrt the newPoint\n dist = find_distances(frame, newPoint)\n\n # find the nearest k points, extract their labels and save them in a list\n labels = [label for distance,label in dist[:k]] \n \n # for each class label, count how many occurrencies have been found\n for label in frame[colClass].unique():\n # save the number of occurrencies in a list of tuples (number, label)\n counts.append((labels.count(label), label)) \n \n # sort the list in descending order, and use the first label of the tuples'\n # list to make the prediction \n counts.sort(reverse=True)\n prediction = counts[0][1] \n \n return prediction", "def _kth_nearest_neighbor_dist(\n distance_matrix: Union[np.ndarray, scipy.sparse.csr_matrix], k\n) -> np.ndarray:\n\n if not is_integer(k):\n raise ValueError(f\"parameter 'k={k}' must be a positive integer\")\n else:\n # make sure we deal with Python built-in\n k = int(k)\n\n if not (0 <= k <= distance_matrix.shape[1]):\n raise ValueError(\n \"'k' must be an integer between 1 and \"\n f\"distance_matrix.shape[1]={distance_matrix.shape[1]}\"\n )\n\n if isinstance(distance_matrix, np.ndarray):\n dist_knn = np.partition(distance_matrix, k - 1, axis=1)[:, k - 1]\n elif isinstance(distance_matrix, scipy.sparse.csr_matrix):\n # see mircobenchmark_kth_nn.py for a comparison of implementations for the\n # sparse case\n\n def _get_kth_largest_elements_sparse(\n data: np.ndarray,\n indptr: np.ndarray,\n row_nnz,\n k_neighbor: int,\n ):\n dist_knn = np.zeros(len(row_nnz))\n for i in range(len(row_nnz)):\n start_row = indptr[i]\n dist_knn[i] = np.partition(\n data[start_row : start_row + row_nnz[i]], k_neighbor - 1\n )[k_neighbor - 1]\n\n return dist_knn\n\n row_nnz = distance_matrix.getnnz(axis=1)\n\n if (row_nnz < k).any():\n raise ValueError(\n f\"There are {(row_nnz < k).sum()} points that \"\n f\"do not have at least k_neighbor={k}.\"\n )\n\n dist_knn = _get_kth_largest_elements_sparse(\n distance_matrix.data,\n distance_matrix.indptr,\n row_nnz,\n k,\n )\n else:\n raise TypeError(f\"type {type(distance_matrix)} not supported\")\n\n return dist_knn" ]
[ "0.8639097", "0.7705145", "0.7612184", "0.7396099", "0.72835445", "0.7229254", "0.7169603", "0.7161284", "0.7108857", "0.7079597", "0.7041696", "0.69775057", "0.6954059", "0.69533056", "0.6897031", "0.6890956", "0.68394387", "0.67642987", "0.67409587", "0.66584134", "0.6624188", "0.6620691", "0.6617968", "0.6575903", "0.656326", "0.65252143", "0.64597535", "0.64442426", "0.6426097", "0.6386255" ]
0.8469665
1
Test that the graph of a RegisteredPipeline can be copied. Each step in the copied graph should be a new object, but have the same name, predecessors, and model version as the original.
def test_copy_graph( make_mock_pipeline_graph, make_mock_registered_model_version, make_mock_registered_model, ) -> None: mocked_rm = make_mock_registered_model(id=123, name="test_rm") with patch.object( verta.pipeline.PipelineStep, "_get_registered_model", return_value=mocked_rm ): graph = make_mock_pipeline_graph() pipeline = RegisteredPipeline( graph=graph, registered_model_version=make_mock_registered_model_version(), ) copied_graph = pipeline.copy_graph() # convert from sets to lists and sort for side-by-side comparison graph_steps_sorted = sorted(graph.steps, key=lambda x: x.name) copied_graph_steps_sorted = sorted(copied_graph.steps, key=lambda x: x.name) for orig_step, copied_step in zip(graph_steps_sorted, copied_graph_steps_sorted): assert orig_step is not copied_step assert orig_step.name == copied_step.name assert orig_step.predecessors == copied_step.predecessors assert ( orig_step.registered_model_version.id == copied_step.registered_model_version.id ) assert copied_graph is not graph
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_clone_scenario(self):\n pass", "def test_clone(self):\n f0 = 5 * (np.random.rand(10, 5) - 0.5)\n ga = population.Evolver(f0, eval_one_max)\n ga.clone()\n\n # should have created a new generation\n self.assertEqual(len(ga.generations), 2)\n\n # should have copied fitness\n self.assertFalse(ga.generations[-1].new)", "def test_copy(self):\n\n # Copy the 'orig' data pipe to the 'new' data pipe.\n pipes.copy('orig', 'new')\n\n # Test that the new data pipe exists.\n self.assert_('new' in ds)\n\n # Test that the new data pipe has the object 'x' and that its value is 1.\n self.assertEqual(ds['new'].x, 1)\n\n # Change the value of x.\n ds['new'].x = 2\n\n # Test that the two values are different.\n self.assert_(ds['orig'].x != ds['new'].x)\n\n # Test that the new data pipe has the object 'mol[0].res[0].spin[0].num' and that its value is 1.\n self.assertEqual(ds['new'].mol[0].res[0].spin[0].num, 1)\n\n # Change the spin system number.\n ds['new'].mol[0].res[0].spin[0].num = 2\n\n # Test that the original spin system number hasn't changed.\n self.assertEqual(ds['orig'].mol[0].res[0].spin[0].num, 1)", "def test_copied_models_are_equal(dbdiskrepo):\n original = fit_model()\n\n shallow = copy(original)\n assert original.artifact.id == shallow.artifact.id\n assert original.artifact.value_id == shallow.artifact.value_id\n assert hash(original) == hash(shallow)\n\n deep = deepcopy(original)\n assert original.artifact.id == deep.artifact.id\n assert original.artifact.value_id == deep.artifact.value_id\n assert hash(original) == hash(deep)", "def test_clone_identical(self, cosmo):\n assert cosmo.clone() is cosmo", "def test_to_pipeline_configuration_no_resources(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n pipeline_configuration = pipeline._to_pipeline_configuration()\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n for graph_step, config_step in zip(graph.steps, pipeline_configuration[\"steps\"]):\n # All steps are included in the configuration\n assert graph_step.name == config_step[\"name\"]\n # No resources are found in the resulting configuration\n assert \"resources\" not in config_step.keys()", "def test_bad_mutation_of_graph_steps_exception(\n make_mock_registered_model,\n make_mock_registered_model_version,\n make_mock_pipeline_graph,\n):\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n mocked_rmv = make_mock_registered_model_version()\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n\n graph.steps.add(\"not_a_step\")\n with pytest.raises(TypeError) as err:\n RegisteredPipeline(graph=graph, registered_model_version=mocked_rmv)\n assert (\n str(err.value) == f\"individual steps of a PipelineGraph must be type\"\n f\" PipelineStep, not <class 'str'>.\"\n )", "def test_deep_copy(api):\n config = api.config()\n f1, f2 = config.flows.flow(name='f1').flow(name='f2')\n f1.packet.ethernet().ipv4().tcp()\n f2.packet.ethernet().ipv4().udp()\n f3 = f1.clone()\n f3.name = 'f3'\n config.flows.append(f3)\n f4 = copy.deepcopy(f2)\n f4.name = 'f4'\n config.flows.append(f4)\n print(config)\n assert(len(config.flows) == 4)\n assert(config.flows[-2].name == f3.name)\n assert(config.flows[-1].name == f4.name)", "def test_copy_current(self):\n\n # Copy the 'orig' data pipe to the 'new' data pipe.\n pipes.copy(pipe_to='new')\n\n # Test that the new data pipe exists.\n self.assert_('new' in ds)\n\n # Test that the new data pipe has the object 'x' and that its value is 1.\n self.assertEqual(ds['new'].x, 1)\n\n # Change the value of x.\n ds['new'].x = 2\n\n # Test that the two values are different.\n self.assert_(ds['orig'].x != ds['new'].x)\n\n # Test that the new data pipe has the object 'mol[0].res[0].spin[0].num' and that its value is 1.\n self.assertEqual(ds['new'].mol[0].res[0].spin[0].num, 1)\n\n # Change the spin system number.\n ds['new'].mol[0].res[0].spin[0].num = 2\n\n # Test that the original spin system number hasn't changed.\n self.assertEqual(ds['orig'].mol[0].res[0].spin[0].num, 1)", "def test_deepcopy_not_shallow(self):\n mol_source = create_ethanol()\n mol_source.generate_conformers()\n\n mol_copy = copy.deepcopy(mol_source)\n\n assert mol_source._conformers is not mol_copy._conformers\n assert all(\n a is not b for a, b in zip(mol_source._conformers, mol_copy._conformers)\n )\n\n assert mol_source._atoms is not mol_copy._atoms\n assert all(a is not b for a, b in zip(mol_source._atoms, mol_copy._atoms))\n\n assert mol_source._bonds is not mol_copy._bonds\n assert all(a is not b for a, b in zip(mol_source._bonds, mol_copy._bonds))\n\n assert mol_source._hierarchy_schemes is not mol_copy._hierarchy_schemes\n assert all(\n a is not b\n for a, b in zip(mol_source._hierarchy_schemes, mol_copy._hierarchy_schemes)\n )\n\n assert mol_source._properties is not mol_copy._properties\n assert mol_source._partial_charges is not mol_copy._partial_charges", "def testCopyCollection(self):\n copy = self.node.copy_collection()\n\n self.assertEqual(\n self.node.type,\n copy.type\n )\n\n self.assertEqual(\n self.node.desc,\n copy.desc\n )\n\n self.assertEqual(\n self.node.input_desc,\n copy.input_desc\n )\n\n self.assertEqual(\n self.node.viewing_desc,\n copy.viewing_desc\n )\n\n self.assertEqual(\n self.node.all_children,\n copy.all_children\n )", "def test_to_pipeline_definition(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n pipeline_definition = pipeline._to_pipeline_definition()\n assert pipeline_definition == {\n \"pipeline_version_id\": pipeline.id,\n \"graph\": graph._to_graph_definition(),\n \"steps\": graph._to_steps_definition(),\n }", "def test_deepcopy(self):\n t = Identity()\n t.transform([2])\n copy.deepcopy(t)", "def test_deepcopy(self):\n t = Quantize()\n t.transform([2])\n copy.deepcopy(t)", "def test_copy(self):\n data = [[0, 1], [1, 0]]\n b1 = Board(data)\n b2 = b1.copy()\n # test if proper copy\n self.assertListEqual(b1.data, b2.data)\n # teset if not just a shallow copy\n b1.data[0][0] = 1\n self.assertNotEqual(b1.data[0][0], b2.data[0][0])", "def test_clone_deployment(self):\n pass", "def test_copy_2(dset_full):\n dset_new = copy.deepcopy(dset_full)\n\n # Test internal references in the dataset\n assert id(dset_new.site_pos.other) == id(dset_new.sat_pos)\n assert id(dset_new.site_delta.ref_pos) == id(dset_new.site_pos)\n assert id(dset_new.site_posvel.other) == id(dset_new.sat_posvel)\n assert id(dset_new.site_posvel_delta.ref_pos) == id(dset_new.site_posvel)\n\n assert id(dset_new.group.site_pos.other) == id(dset_new.group.sat_pos)\n assert id(dset_new.group.site_delta.ref_pos) == id(dset_new.group.site_pos)\n assert id(dset_new.group.site_posvel.other) == id(dset_new.group.sat_posvel)\n assert id(dset_new.group.site_posvel_delta.ref_pos) == id(dset_new.group.site_posvel)\n\n # Verify that new dataset have different references than original object\n for field_name, field in dset_full._fields.items():\n assert id(field.data) != id(dset_new._fields[field_name].data)\n try:\n for group_field_name, group_field in field.data._fields.items():\n assert id(group_field.data) != id(dset_new._fields[field_name].data._fields[group_field_name].data)\n except AttributeError:\n # Field is not a group\n pass", "def test_copy(self):\n s = Sequence(\"TTTTTTTTTTAAAA\", name=\"test_copy\")\n annot1 = s.add_annotation(Feature, \"exon\", \"annot1\", [(0, 10)])\n annot2 = s.add_annotation(Feature, \"exon\", \"annot2\", [(10, 14)])\n got = s.copy()\n got_annot1 = got.get_annotations_matching(\n annotation_type=\"exon\", name=\"annot1\"\n )[0]\n got_annot2 = got.get_annotations_matching(\n annotation_type=\"exon\", name=\"annot2\"\n )[0]\n self.assertIsNot(got, s)\n self.assertIsNot(got_annot1, annot1)\n self.assertIsNot(got_annot2, annot2)\n self.assertEqual(got.name, s.name)\n self.assertEqual(got.info, s.info)\n self.assertEqual(got._seq, s._seq)\n self.assertEqual(got.moltype, s.moltype)\n annot1_slice = str(annot1.get_slice())\n annot2_slice = str(annot2.get_slice())\n got1_slice = str(got.annotations[0].get_slice())\n got2_slice = str(got.annotations[1].get_slice())\n self.assertEqual(annot1_slice, got1_slice)\n self.assertEqual(annot2_slice, got2_slice)", "def copy_graph(g):\n return copy.deepcopy(g)", "def test_copy_pickle(self):\n\n # Test that we can pickle and unpickle\n # We force a pattern that contains all custom types:\n # `Selector`, `NullSelector`, `SelectorTag`, `SelectorAttribute`,\n # `SelectorNth`, `SelectorLang`, `SelectorList`, and `Namespaces`\n p1 = sv.compile(\n 'p.class#id[id]:nth-child(2):lang(en):focus', {'html': 'http://www.w3.org/TR/html4/'}\n )\n sp1 = pickle.dumps(p1)\n pp1 = pickle.loads(sp1)\n self.assertTrue(pp1 == p1)\n\n # Test that we pull the same one from cache\n p2 = sv.compile(\n 'p.class#id[id]:nth-child(2):lang(en):focus', {'html': 'http://www.w3.org/TR/html4/'}\n )\n self.assertTrue(p1 is p2)\n\n # Test that we compile a new one when providing a different flags\n p3 = sv.compile(\n 'p.class#id[id]:nth-child(2):lang(en):focus', {'html': 'http://www.w3.org/TR/html4/'}, flags=0x10\n )\n self.assertTrue(p1 is not p3)\n self.assertTrue(p1 != p3)\n\n # Test that the copy is equivalent, but not same.\n p4 = copy.copy(p1)\n self.assertTrue(p4 is not p1)\n self.assertTrue(p4 == p1)\n\n p5 = copy.copy(p3)\n self.assertTrue(p5 is not p3)\n self.assertTrue(p5 == p3)\n self.assertTrue(p5 is not p4)", "def test_copy_sources(self):\n metric_copy = copy_metric(self.metric, self.DATA_MODEL)\n self.assertEqual(\"Source\", first(metric_copy[\"sources\"].values())[\"name\"])", "def copy_tree_checker(src, dst):\n copy_tree(src, dst)\n return True", "def test_to_pipeline_configuration_valid_incomplete(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n partial_steps = list(graph.steps)[:-1]\n excluded_step = list(graph.steps)[-1]\n step_resources = {step.name: resources for step in partial_steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n\n pipeline_configuration = pipeline._to_pipeline_configuration(\n pipeline_resources=step_resources\n )\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n # All steps have been included in the configuration\n assert len(graph.steps) == len(pipeline_configuration[\"steps\"])\n # Compare the steps that have resources, allowing zip to drop the excluded step.\n for graph_step, config_step in zip(partial_steps, pipeline_configuration[\"steps\"]):\n # All steps provided are included in the configuration.\n assert graph_step.name == config_step[\"name\"]\n # All steps for which resource were provided have resources in the config.\n assert \"resources\" in config_step.keys()\n # The step for which no resources were provided is in the config without resources.\n assert excluded_step.name == pipeline_configuration[\"steps\"][-1][\"name\"]\n assert \"resources\" not in pipeline_configuration[\"steps\"][-1].keys()", "def test_clone_change_param(self, cosmo):\n pass", "def test_clone_system(self):\n pass", "def component_clone ( same ) : \n if isinstance ( same , str ) \\\n and same.strip().lower() in ( 'clone' , 'cloned' , 'same' ) : return True \n return False", "def test_BuildModel1(self):\n print(\"\\nTest 5: Building a Model with cloning\")\n builder = StaticBuilder(\"Clone\")\n in1 = builder.addInput(10)\n enc1 = builder.addInner(3)\n out1 = builder.addOutput(name=\"Out1\")\n out2 = builder.addOutput(name=\"Out2\")\n \n builder.addDirectedLink(in1, enc1)\n builder.addDirectedLink(enc1, out1)\n builder.addDirectedLink(enc1, out2)\n \n builder.build()", "def copy(self):\n return self.__class__(\n self.kind, self.link_ids.copy(), self.included_nodes.copy(), self.mass,\n self.name, self.crossring_cleavages.copy(), self.composition.copy())", "def test_to_pipeline_configuration_invalid_resources(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rmv\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n step_resources = {step.name: resources for step in graph.steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n # step name not in pipeline\n step_resources[\"invalid_step_name\"] = resources\n with pytest.raises(ValueError) as err:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err.value) == \"pipeline_resources contains resources for a step not in the \"\n \"pipeline: 'invalid_step_name'\"\n )\n step_resources.pop(\"invalid_step_name\")\n # step name not a string\n step_resources.update({123: resources})\n with pytest.raises(TypeError) as err2:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err2.value) == \"pipeline_resources keys must be type str, not <class 'int'>\"\n )\n step_resources.pop(123)\n # step resource not a Resources object\n step_resources.update({\"step_1\": \"not_resources\"})\n with pytest.raises(TypeError) as err3:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err3.value)\n == \"pipeline_resources values must be type Resources, not <class 'str'>\"\n )", "def clone( m, orig):\r\n if m.ObjType not in (1, 6): return\r\n if not orig: return\r\n \r\n if m.ObjType == 6: # Target is a Folder\r\n if orig.ObjType == 6: cloned = m.CopyFolderDisp( orig) # Orig is Folder too\r\n else: cloned = m.CopyFCODisp( orig) # Orig is FCO\r\n elif m.ObjType == 1:\r\n cloned = m.CopyFCODisp( orig, metaRole( orig)) # Target is Model, Orig is FCO\r\n \r\n if cloned:\r\n \tcloned.Name = \"Cloned\" + orig.Name\r\n return cloned" ]
[ "0.66606313", "0.66042787", "0.6354918", "0.63137066", "0.62398744", "0.6187213", "0.61756086", "0.610647", "0.60987633", "0.60784256", "0.60639167", "0.59603596", "0.59550446", "0.5896772", "0.58627504", "0.58563215", "0.5846192", "0.5841501", "0.5821267", "0.5804612", "0.5798881", "0.5793275", "0.576209", "0.5747986", "0.57352495", "0.571422", "0.5675261", "0.56747997", "0.5668155", "0.5667453" ]
0.85880643
0
Verify the expected sequence of calls when a pipeline definition is logged as an artifact to the pipeline's model version. Fetching the registered model version is patched instead of mocking a response to avoid having to pass the RM's id down through multiple pytest fixtures.
def test_log_pipeline_definition_artifact( model_version_name, mocked_responses, make_mock_pipeline_graph, make_mock_registered_model, make_mock_registered_model_version, ) -> None: rm = make_mock_registered_model(id=123, name="test_rm") rmv = make_mock_registered_model_version() # Fetch the registered model version mocked_responses.get( f"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/{rmv.id}", json={ "model_version": { "id": rmv.id, "registered_model_id": rmv.registered_model_id, "version": model_version_name, } }, status=200, ) mocked_responses.put( f"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/registered_models/{rmv.registered_model_id}/model_versions/{rmv.id}", json={}, status=200, ) # Fetch the artifact upload URL mocked_responses.post( f"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/{rmv.id}/getUrlForArtifact", json={ "url": f"https://account.s3.amazonaws.com/development/ModelVersionEntity/" f"{rmv.id}/pipeline.json" }, status=200, ) # Upload the artifact mocked_responses.put( f"https://account.s3.amazonaws.com/development/ModelVersionEntity/{rmv.id}/pipeline.json", json={}, status=200, ) with patch.object( verta.pipeline.PipelineStep, "_get_registered_model", return_value=rm ): pipeline = RegisteredPipeline( graph=make_mock_pipeline_graph(), registered_model_version=rmv, ) pipeline._log_pipeline_definition_artifact()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_pipeline_definition_artifact(\n make_mock_registered_model_version,\n make_mock_simple_pipeline_definition,\n) -> None:\n rmv = make_mock_registered_model_version()\n pipeline_definition = RegisteredPipeline._get_pipeline_definition_artifact(\n registered_model_version=rmv,\n )\n assert pipeline_definition == make_mock_simple_pipeline_definition(id=rmv.id)", "def test_from_pipeline_definition(\n make_mock_registered_model_version,\n mocked_responses,\n) -> None:\n rmv = make_mock_registered_model_version()\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/1\",\n json={},\n status=200,\n )\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/2\",\n json={},\n status=200,\n )\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/registered_models/0\",\n json={},\n status=200,\n )\n pipeline = RegisteredPipeline._from_pipeline_definition(\n registered_model_version=rmv,\n )\n assert isinstance(pipeline, RegisteredPipeline)\n assert pipeline.id == rmv.id", "def test_to_pipeline_definition(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n pipeline_definition = pipeline._to_pipeline_definition()\n assert pipeline_definition == {\n \"pipeline_version_id\": pipeline.id,\n \"graph\": graph._to_graph_definition(),\n \"steps\": graph._to_steps_definition(),\n }", "async def test_version(doof, repo_info, event_loop, mocker):\n a_hash = 'hash'\n version = '1.2.3'\n fetch_release_hash_mock = mocker.patch('bot.fetch_release_hash', autospec=True, return_value=a_hash)\n get_version_tag_mock = mocker.patch('bot.get_version_tag', autospec=True, return_value=\"v{}\".format(version))\n await doof.run_command(\n manager='mitodl_user',\n channel_id=repo_info.channel_id,\n words=['version'],\n loop=event_loop,\n )\n assert doof.said(\n \"Wait a minute! My evil scheme is at version {}!\".format(version)\n )\n\n fetch_release_hash_mock.assert_called_once_with(repo_info.prod_hash_url)\n get_version_tag_mock.assert_called_once_with(GITHUB_ACCESS, repo_info.repo_url, a_hash)", "async def test_release(doof, repo_info, event_loop, mocker, command):\n version = '1.2.3'\n pr = ReleasePR(\n version=version,\n url='http://new.url',\n body='Release PR body',\n )\n get_release_pr_mock = mocker.patch('bot.get_release_pr', autospec=True, side_effect=[None, pr, pr])\n release_mock = mocker.patch('bot.release', autospec=True)\n\n wait_for_deploy_sync_mock = mocker.Mock()\n\n async def wait_for_deploy_fake(*args, **kwargs):\n \"\"\"await cannot be used with mock objects\"\"\"\n wait_for_deploy_sync_mock(*args, **kwargs)\n\n mocker.patch('bot.wait_for_deploy', wait_for_deploy_fake)\n authors = ['author1', 'author2']\n mocker.patch('bot.get_unchecked_authors', return_value=authors)\n\n wait_for_checkboxes_sync_mock = mocker.Mock()\n async def wait_for_checkboxes_fake(*args, **kwargs):\n \"\"\"await cannot be used with mock objects\"\"\"\n wait_for_checkboxes_sync_mock(*args, **kwargs)\n mocker.patch('bot.wait_for_checkboxes', wait_for_checkboxes_fake)\n\n command_words = command.split() + [version]\n me = 'mitodl_user'\n await doof.run_command(\n manager=me,\n channel_id=repo_info.channel_id,\n words=command_words,\n loop=event_loop,\n )\n\n org, repo = get_org_and_repo(repo_info.repo_url)\n get_release_pr_mock.assert_any_call(GITHUB_ACCESS, org, repo)\n release_mock.assert_called_once_with(\n github_access_token=GITHUB_ACCESS,\n repo_url=repo_info.repo_url,\n new_version=pr.version,\n )\n wait_for_deploy_sync_mock.assert_called_once_with(\n github_access_token=GITHUB_ACCESS,\n repo_url=repo_info.repo_url,\n hash_url=repo_info.rc_hash_url,\n watch_branch='release-candidate',\n )\n assert doof.said(\"Now deploying to RC...\")\n assert doof.said(\"These people have commits in this release: {}\".format(', '.join(authors)))\n wait_for_checkboxes_sync_mock.assert_called_once_with(GITHUB_ACCESS, org, repo)\n assert doof.said(\n \"Release {version} is ready for the Merginator {name}\".format(\n version=pr.version,\n name=format_user_id(me),\n )\n )", "def test_release_version_found(self, mock_git_info): # pylint: disable=invalid-name, unused-argument\n set_version_from_git_tag(self.project, self.logger)\n self.assertEqual(self.logger.info.call_count, 2)\n self.assertEqual(self.project.version, '1.2.3')", "def test_check_version_release(self):\n with self.assertLogs(\"dakara_feeder.version\", \"DEBUG\") as logger:\n with patch.multiple(\n \"dakara_feeder.version\", __version__=\"0.0.0\", __date__=\"1970-01-01\"\n ):\n check_version()\n\n # assert effect on logs\n self.assertListEqual(\n logger.output,\n [\"INFO:dakara_feeder.version:\" \"Dakara feeder 0.0.0 (1970-01-01)\"],\n )", "def test_version_details(converted_tests):\n with mock.patch(\"mig3_client.git\") as patched_git:\n patched_git.Repo().head.object.hexsha = \"a1\" * 20\n patched_git.Repo().head.object.author.email = \"[email protected]\"\n\n submission = SubmissionBuilder(\"t\", \"b\", converted_tests).build()\n\n assert submission.get(\"version\", {}).get(\"hash\") == (\"a1\" * 20), submission\n assert submission.get(\"version\", {}).get(\"author\") == (\"[email protected]\"), submission", "def test_pipeline_run_monitor_success(monkeypatch, mocker: MockFixture):\n # arrange\n monkeypatch.setenv(\"WORKSPACE_NAME\", \"mock_workspace_name\")\n monkeypatch.setenv(\"SUBSCRIPTION_ID\", \"mock_subscription_id\")\n monkeypatch.setenv(\"RESOURCE_GROUP\", \"mock_resource_group\")\n monkeypatch.setenv(\"APP_INSIGHTS_CONNECTION_STRING\", \"InstrumentationKey=00000000-0000-0000-0000-000000000001\")\n\n # create mock logger by assign name and info function\n mock_logger = logging.getLogger('src.PipelineRunMonitor.pipeline_run_monitor')\n mock_logger_info = mocker.patch.object(mock_logger, 'info')\n\n mock_workspace = mocker.patch(\"src.PipelineRunMonitor.pipeline_run_monitor.Workspace\")\n mock_run = mocker.patch(\"src.PipelineRunMonitor.pipeline_run_monitor.Run\")\n mock_run.return_value.get.return_value = mock_run\n\n event = func.EventGridEvent(\n id=\"xxx\",\n data={\"runId\": \"xxx\"},\n topic=\"httpxxx\",\n subject=\"xxx\",\n event_type=\"Microsoft.MachineLearningServices.RunCompleted\",\n event_time=0,\n data_version=\"xxx\"\n )\n\n # act\n main(event)\n\n # assert\n mock_logger_info.assert_called_once()\n mock_workspace.assert_called_once()", "def test_model_by_version_get(self):\n\n # Firstly, find existing version - latest\n response = self.client().get('/model')\n latest_model = Model.from_json(response.data.decode())\n latest_version = latest_model.version\n\n # Accesses latest model\n response = self.client().get('/models/'+str(latest_version))\n self.assertEqual(response.status_code, 200)\n loaded_model = Model.from_json(response.data.decode())\n self.assertEqual(loaded_model.version, latest_version)\n self.assertEqual(loaded_model, latest_model)\n\n # Accesses random model version\n random_version = random.choice(list(self.data_manipulation.versions.keys()))\n random_model = self.data_manipulation.versions[random_version]\n response = self.client().get('/models/'+str(random_version))\n self.assertEqual(response.status_code, 200)\n loaded_model = Model.from_json(response.data.decode())\n self.assertEqual(loaded_model.version, random_version)\n self.assertEqual(loaded_model, random_model)\n\n # Random version is removed\n del self.data_manipulation.versions[random_version]\n response = self.client().get('/models/'+str(random_version))\n self.assertEqual(response.status_code, 404)", "def test_get_version(self):\n pass", "def test_rpc_stage_dependencies(self, mock_handle_resp, mock_decode_resp_str,\n mock_send_request, mock_gen_request,\n mock_precheck):\n self.client.initialize()\n\n expected_response_str = ('{\"id\": 0, \"result\": 123, \"error\": null, '\n '\"callback\": null}')\n expected_response_dict = {\n 'id': 0,\n 'result': 123,\n 'error': None,\n 'callback': None,\n }\n expected_request = ('{\"id\": 10, \"method\": \"some_rpc\", \"params\": [1, 2],'\n '\"kwargs\": {\"test_key\": 3}')\n expected_result = 123\n\n mock_gen_request.return_value = expected_request\n mock_send_request.return_value = expected_response_str\n mock_decode_resp_str.return_value = expected_response_dict\n mock_handle_resp.return_value = expected_result\n rpc_result = self.client.some_rpc(1, 2, test_key=3)\n\n mock_precheck.assert_called()\n mock_gen_request.assert_called_with(0, 'some_rpc', 1, 2, test_key=3)\n mock_send_request.assert_called_with(expected_request)\n mock_decode_resp_str.assert_called_with(0, expected_response_str)\n mock_handle_resp.assert_called_with('some_rpc', expected_response_dict)\n self.assertEqual(rpc_result, expected_result)", "def test_new_deployment_pipeline(self):\n # set up\n new_config_patcher = patch(\n 'factories.new_config',\n return_value=5,\n )\n mock_new_config = new_config_patcher.start()\n\n new_env_patcher = patch('factories.new_env', return_value=9)\n mock_new_env = new_env_patcher.start()\n\n # run SUT passing branch_id: 1, copy_config_id: 6, copy_env_id: None\n pipeline_id = new_deployment_pipeline(1, 6)\n\n # confirm that new config was based on config 6\n mock_new_config.assert_called_once_with(6)\n\n # confirm that new env was not based on anything\n mock_new_env.assert_called_once_with(None)\n\n # confirm reasonable sql was executed to make a pipeline\n self.mock_get_cur.return_value.execute.assert_called_once_with(\n \"INSERT INTO deployment_pipeline \" + \\\n \"(branch_id, config_id, environment_id, automatic) \" + \\\n \"VALUES (%s, %s, %s, %s) \" + \\\n \"RETURNING deployment_pipeline_id\",\n (1, 5, 9, False),\n )\n\n # make sure we closed the cursor\n self.mock_get_cur.return_value.close.assert_called_once_with()", "def test_check_dependencies_with_found(self):\n self.spy_on(check_install, op=kgb.SpyOpMatchAny([\n {\n 'args': (['cm', 'version'],),\n 'op': kgb.SpyOpReturn(True),\n },\n ]))\n\n client = self.build_client(setup=False)\n client.check_dependencies()\n\n self.assertSpyCallCount(check_install, 1)\n self.assertSpyCalledWith(check_install, ['cm', 'version'])", "def test_alert_pipeline(monkeypatch: pytest.MonkeyPatch, github_auth: str):\n test_status_repo = \"conbench/benchalerts\"\n test_status_commit = \"f6e70aeb29ce07c40eed0c0175e9dced488ed6ee\"\n\n monkeypatch.setenv(\"CONBENCH_URL\", \"https://velox-conbench.voltrondata.run/\")\n velox_commit = \"c76715c9db1eea7cf3f32dca6fe78fc35c4f3ecd\"\n\n github_run_id = os.getenv(\"GITHUB_RUN_ID\", \"2974120883\")\n build_url = f\"https://github.com/{test_status_repo}/actions/runs/{github_run_id}\"\n\n # first, test error handlers\n pipeline = AlertPipeline(\n steps=[\n steps.GitHubCheckStep(\n commit_hash=test_status_commit,\n comparison_step_name=\"doesnt_exist\",\n repo=test_status_repo,\n external_id=\"123\",\n build_url=build_url,\n )\n ],\n error_handlers=[\n steps.GitHubCheckErrorHandler(\n commit_hash=test_status_commit,\n repo=test_status_repo,\n build_url=build_url,\n )\n ],\n )\n with pytest.raises(KeyError):\n pipeline.run_pipeline()\n\n # sleep to see the updated statuses on the PR\n time.sleep(1)\n\n # now a real pipeline\n pipeline_steps = [\n steps.GetConbenchZComparisonStep(\n commit_hash=velox_commit,\n baseline_run_type=steps.BaselineRunCandidates.parent,\n z_score_threshold=None,\n step_name=\"z_none\",\n ),\n steps.GitHubCheckStep(\n repo=test_status_repo,\n commit_hash=test_status_commit,\n comparison_step_name=\"z_none\",\n external_id=\"123\",\n build_url=build_url,\n ),\n ]\n if not os.getenv(\"CI\"): # don't post PR comments in CI\n pipeline_steps.append(\n steps.GitHubPRCommentAboutCheckStep(pr_number=5, repo=test_status_repo)\n )\n\n pipeline = AlertPipeline(pipeline_steps)\n outputs = pipeline.run_pipeline()\n\n assert outputs[\"GitHubCheckStep\"][0][\"conclusion\"] == \"failure\"\n if not os.getenv(\"CI\"):\n expected_comment = \"\"\"Conbench analyzed the 1 benchmark run on commit `c76715c9`.\n\nThere was 1 benchmark result indicating a performance regression:\n\n- Commit Run on `GitHub-runner-8-core` at [2023-02-28 18:08:51Z](http://velox-conbench.voltrondata.run/compare/runs/GHA-4286800623-1...GHA-4296026775-1/)\n - [`velox_benchmark_basic_vector_fuzzer` (C++) with source=cpp-micro, suite=velox_benchmark_basic_vector_fuzzer](http://velox-conbench.voltrondata.run/compare/benchmarks/a128eb19cc9442409148c91f7fa18cdf...ff7a1a86df5a4d56b6dbfb006c13c638)\n\nThe [full Conbench report](https://github.com/conbench/benchalerts/runs/RUN_ID) has more details.\"\"\"\n\n actual_comment = outputs[\"GitHubPRCommentAboutCheckStep\"][\"body\"].strip()\n actual_comment = re.sub(\n r\"benchalerts/runs/\\d+\", \"benchalerts/runs/RUN_ID\", actual_comment\n )\n\n assert expected_comment == actual_comment\n\n # sleep to see the updated statuses on the PR\n time.sleep(1)", "def test_copy_graph(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n copied_graph = pipeline.copy_graph()\n # convert from sets to lists and sort for side-by-side comparison\n graph_steps_sorted = sorted(graph.steps, key=lambda x: x.name)\n copied_graph_steps_sorted = sorted(copied_graph.steps, key=lambda x: x.name)\n\n for orig_step, copied_step in zip(graph_steps_sorted, copied_graph_steps_sorted):\n assert orig_step is not copied_step\n assert orig_step.name == copied_step.name\n assert orig_step.predecessors == copied_step.predecessors\n assert (\n orig_step.registered_model_version.id\n == copied_step.registered_model_version.id\n )\n assert copied_graph is not graph", "def test_to_pipeline_configuration_no_resources(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n pipeline_configuration = pipeline._to_pipeline_configuration()\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n for graph_step, config_step in zip(graph.steps, pipeline_configuration[\"steps\"]):\n # All steps are included in the configuration\n assert graph_step.name == config_step[\"name\"]\n # No resources are found in the resulting configuration\n assert \"resources\" not in config_step.keys()", "def testSimpleFailingMethodReturnsVersion(self):\n body = dumps({'id': 100, 'jsonrpc': '2.0', 'method': 'fail',\n 'params': [39, 'steps']})\n headers = Headers({'Content-Length': [str(len(body))],\n 'Content-Type': ['application/json']})\n request = FakeRequest(headers=headers, body=body)\n resource = TestResource(None, None)\n result = yield resource.deferred_render_POST(request)\n response = loads(result)\n self.assertEqual('2.0', response['jsonrpc'])", "def test_analyzer_task_error(db, mocker, versions):\n job = Job(name=\"analyzer\")\n job.save()\n\n mock = mocker.patch(\"creator.tasks.analyze_version\")\n logging = mocker.patch(\"creator.tasks.logger\")\n mock.side_effect = Exception(\"error occurred\")\n\n versions = VersionFactory(analysis=None)\n\n analyzer_task()\n\n assert mock.call_count == Version.objects.count()\n assert logging.warning.call_count == Version.objects.count()\n assert (\n f\"Failed to analyze {Version.objects.count()} versions\"\n in logging.info.call_args_list[-1][0][0]\n )", "def test_force_and_update(self, _get_repo_name, # pylint: disable=unused-argument\n set_version_from_git_tag_mock):\n force_semver_git_tag_plugin(self.project, self.logger)\n self.project.set_property('semver_git_tag_increment_part', 'minor')\n update_version_from_git_tag(self.project, self.logger)\n self.assertEqual(set_version_from_git_tag_mock.call_count, 2)\n self.assertEqual(self.logger.info.call_count, 2)\n self.logger.warn.assert_called_once_with(\n \"Property `semver_git_tag_increment_part` was changed. \"\n \"For better compatibility recommended to use \"\n \"command line `pyb ... -P semver_git_tag_increment_part=...`, \"\n \"otherwise some version-related properties could \"\n \"be spoiled.\"\n )", "def test_report_result_changed_version(self):\n mock_status_callback = MagicMock()\n mock_firmware_handler = self.MockFirmwareHandler()\n firmware_version = \"1.0\"\n mock_firmware_handler.get_current_version = MagicMock(\n return_value=firmware_version\n )\n firmware_update = OSFirmwareUpdate(\n mock_firmware_handler, mock_status_callback\n )\n firmware_update.logger.setLevel(logging.CRITICAL)\n with open(\"last_firmware_version.txt\", \"w\") as file:\n file.write(firmware_update.firmware_handler.get_current_version())\n\n firmware_version = \"2.0\"\n mock_firmware_handler.get_current_version = MagicMock(\n return_value=firmware_version\n )\n expected_status = FirmwareUpdateStatus(\n FirmwareUpdateStatusType.SUCCESS\n )\n\n firmware_update.report_result()\n\n firmware_update.status_callback.assert_called_once_with(\n expected_status\n )", "def test_status_ok(client_mock):\n client_mock.get.return_value = {\n 'channel-map': [\n {\n 'channel': 'latest/beta',\n 'expiration-date': None,\n 'platform': {'architecture': 'all', 'os': 'all', 'series': 'all'},\n 'progressive': {'paused': None, 'percentage': None},\n 'revision': 5,\n 'when': '2020-07-16T18:45:24Z',\n }, {\n 'channel': 'latest/edge/mybranch',\n 'expiration-date': '2020-08-16T18:46:02Z',\n 'platform': {'architecture': 'all', 'os': 'all', 'series': 'all'},\n 'progressive': {'paused': None, 'percentage': None},\n 'revision': 10,\n 'when': '2020-07-16T18:46:02Z',\n }\n ],\n 'package': {\n 'channels': [\n {\n 'branch': None,\n 'fallback': None,\n 'name': 'latest/stable',\n 'risk': 'stable',\n 'track': 'latest',\n }, {\n 'branch': 'mybranch',\n 'fallback':\n 'latest/stable',\n 'name': 'latest/edge/mybranch',\n 'risk': 'edge',\n 'track': 'latest',\n },\n ]\n },\n 'revisions': [\n {\n 'revision': 5,\n 'version': '5',\n 'created-at': '2020-06-29T22:11:05',\n 'status': 'approved',\n 'errors': None,\n }, {\n 'revision': 10,\n 'version': '63a852b',\n 'created-at': '2020-06-29T22:11:10',\n 'status': 'approved',\n 'errors': None,\n },\n ],\n }\n\n store = Store()\n channel_map, channels, revisions = store.list_releases('testname')\n\n # check how the client is used\n assert client_mock.mock_calls == [\n call.get('/v1/charm/testname/releases'),\n ]\n\n # check response\n cmap1, cmap2 = channel_map\n assert cmap1.revision == 5\n assert cmap1.channel == 'latest/beta'\n assert cmap1.expires_at is None\n assert cmap2.revision == 10\n assert cmap2.channel == 'latest/edge/mybranch'\n assert cmap2.expires_at == parser.parse('2020-08-16T18:46:02Z')\n\n channel1, channel2 = channels\n assert channel1.name == 'latest/stable'\n assert channel1.track == 'latest'\n assert channel1.risk == 'stable'\n assert channel1.branch is None\n assert channel2.name == 'latest/edge/mybranch'\n assert channel2.track == 'latest'\n assert channel2.risk == 'edge'\n assert channel2.branch == 'mybranch'\n\n rev1, rev2 = revisions\n assert rev1.revision == 5\n assert rev1.version == '5'\n assert rev1.created_at == parser.parse('2020-06-29T22:11:05')\n assert rev1.status == 'approved'\n assert rev1.errors == []\n assert rev2.revision == 10\n assert rev2.version == '63a852b'\n assert rev2.created_at == parser.parse('2020-06-29T22:11:10')\n assert rev2.status == 'approved'\n assert rev2.errors == []", "def test_update_creates_a_new_version(self):\n company = CompanyFactory(name='Foo ltd.')\n\n assert Version.objects.get_for_object(company).count() == 0\n\n response = self.api_client.patch(\n reverse('api-v4:company:item', kwargs={'pk': company.pk}),\n data={'name': 'Acme'},\n )\n\n assert response.status_code == status.HTTP_200_OK\n assert response.json()['name'] == 'Acme'\n\n # check version created\n assert Version.objects.get_for_object(company).count() == 1\n version = Version.objects.get_for_object(company).first()\n assert version.revision.user == self.user\n assert version.field_dict['name'] == 'Acme'", "def test_get_version(mocker):\n client = wsgi.application.test_client(mocker)\n\n url = '/api/v0/version'\n\n response = client.get(url)\n\n output = {\n \"message\": f\"AIOPS Publisher Version {wsgi.VERSION}\",\n \"status\": \"OK\",\n \"version\": wsgi.VERSION\n }\n assert response.get_json() == output\n assert response.status_code == 200", "def test_edit_seed_job_type_and_update(self, mock_create, mock_msg_mgr):\n\n url = '/%s/job-types/' % self.api\n manifest = copy.deepcopy(job_test_utils.MINIMUM_MANIFEST)\n manifest['job']['packageVersion'] = '1.0.1'\n\n json_data = {\n 'icon_code': 'BEEF',\n 'is_published': True,\n 'max_scheduled': 1,\n 'is_active': False,\n 'docker_image': 'my-job-1.0.0-seed:1.0.1',\n 'manifest': manifest,\n 'configuration': self.configuration,\n 'auto_update': True\n }\n\n response = self.client.generic('POST', url, json.dumps(json_data), 'application/json')\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertEqual(len(results['warnings']), 1)\n self.assertEqual(results['warnings'][0]['name'], 'DEPRECATED_RECIPES')\n\n job_type = JobType.objects.filter(name='my-minimum-job', version='1.0.0').first()\n\n results = json.loads(response.content)\n self.assertTrue(results['is_valid'])\n self.assertEqual(len(results['warnings']), 1)\n self.assertEqual(results['warnings'][0]['name'], 'DEPRECATED_RECIPES')\n \n self.assertEqual(True, job_type.is_published)\n self.assertFalse(job_type.is_active)\n self.assertEqual(job_type.docker_image, 'my-job-1.0.0-seed:1.0.1')\n self.assertEqual(2, job_type.revision_num)\n\n mock_create.assert_called_with(self.recipe_type1.id, job_type.id)", "def test_get_labware_on_labware(\n decoy: Decoy, mock_engine_client: EngineClient, subject: ProtocolCore\n) -> None:\n mock_labware_core = decoy.mock(cls=LabwareCore)\n mock_other_labware_core = decoy.mock(cls=LabwareCore)\n\n decoy.when(mock_labware_core.labware_id).then_return(\"abc\")\n decoy.when(mock_engine_client.state.labware.get_id_by_labware(\"abc\")).then_return(\n \"123\"\n )\n\n subject._labware_cores_by_id[\"123\"] = mock_other_labware_core\n\n assert subject.get_labware_on_labware(mock_labware_core) == mock_other_labware_core", "def test_get_labware_on_module(\n decoy: Decoy, mock_engine_client: EngineClient, subject: ProtocolCore\n) -> None:\n mock_module_core = decoy.mock(cls=ModuleCore)\n mock_labware_core = decoy.mock(cls=LabwareCore)\n\n decoy.when(mock_module_core.module_id).then_return(\"abc\")\n decoy.when(mock_engine_client.state.labware.get_id_by_module(\"abc\")).then_return(\n \"123\"\n )\n\n subject._labware_cores_by_id[\"123\"] = mock_labware_core\n\n assert subject.get_labware_on_module(mock_module_core) == mock_labware_core", "def test_load_labware_on_labware(\n decoy: Decoy,\n mock_engine_client: EngineClient,\n subject: ProtocolCore,\n) -> None:\n mock_labware_core = decoy.mock(cls=LabwareCore)\n decoy.when(mock_labware_core.labware_id).then_return(\"labware-id\")\n\n decoy.when(\n mock_engine_client.state.labware.find_custom_labware_load_params()\n ).then_return([EngineLabwareLoadParams(\"hello\", \"world\", 654)])\n\n decoy.when(\n load_labware_params.resolve_loadname(\n \"some_labware\",\n )\n ).then_return(\"labware_some\")\n\n decoy.when(\n load_labware_params.resolve(\n \"labware_some\",\n \"a_namespace\",\n 456,\n [EngineLabwareLoadParams(\"hello\", \"world\", 654)],\n )\n ).then_return((\"some_namespace\", 9001))\n\n decoy.when(\n mock_engine_client.load_labware(\n location=OnLabwareLocation(labwareId=\"labware-id\"),\n load_name=\"labware_some\",\n display_name=\"some_display_name\",\n namespace=\"some_namespace\",\n version=9001,\n )\n ).then_return(\n commands.LoadLabwareResult(\n labwareId=\"abc123\",\n definition=LabwareDefinition.construct(), # type: ignore[call-arg]\n offsetId=None,\n )\n )\n\n decoy.when(mock_engine_client.state.labware.get_definition(\"abc123\")).then_return(\n LabwareDefinition.construct(ordering=[]) # type: ignore[call-arg]\n )\n\n decoy.when(\n mock_engine_client.state.labware.get_id_by_labware(\"labware-id\")\n ).then_return(\"abc123\")\n\n result = subject.load_labware(\n load_name=\"some_labware\",\n location=mock_labware_core,\n label=\"some_display_name\",\n namespace=\"a_namespace\",\n version=456,\n )\n\n assert isinstance(result, LabwareCore)\n assert result.labware_id == \"abc123\"\n assert subject.get_labware_cores() == [subject.fixed_trash, result]\n\n decoy.verify(\n deck_conflict.check(\n engine_state=mock_engine_client.state,\n existing_labware_ids=[\"fixed-trash-123\"],\n existing_module_ids=[],\n new_labware_id=\"abc123\",\n )\n )\n\n assert subject.get_labware_on_labware(mock_labware_core) is result", "def test_version_successful(self):\n\n url = '/%s/job-types/job-type-for-view-test/' % self.api\n response = self.client.get(url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n self.assertEqual(len(result['results']), 3)\n\n for entry in result['results']:\n expected = None\n if entry['id'] == self.job_type4.id:\n expected = self.job_type4\n elif entry['id'] == self.job_type5.id:\n expected = self.job_type5\n elif entry['id'] == self.job_type6.id:\n expected = self.job_type6\n else:\n self.fail('Found unexpected result: %s' % entry['id'])\n self.assertEqual(entry['name'], expected.name)\n self.assertEqual(entry['version'], expected.version)\n self.assertEqual(entry['title'], expected.get_title())\n self.assertEqual(entry['description'], expected.get_description())\n self.assertEqual(entry['icon_code'], expected.icon_code)\n self.assertEqual(entry['is_published'], expected.is_published)\n self.assertEqual(entry['is_active'], expected.is_active)\n self.assertEqual(entry['is_paused'], expected.is_paused)\n self.assertEqual(entry['is_system'], expected.is_system)\n self.assertEqual(entry['max_scheduled'], expected.max_scheduled)\n self.assertEqual(entry['revision_num'], expected.revision_num)\n self.assertEqual(entry['docker_image'], expected.docker_image)", "def test_get_entity_version(self):\n self.metadata.create_or_update(data=self.create)\n\n # Find by name\n res_name = self.metadata.get_by_name(\n entity=Dashboard, fqn=self.entity.fullyQualifiedName\n )\n res = self.metadata.get_entity_version(\n entity=Dashboard, entity_id=res_name.id.__root__, version=0.1\n )\n\n # check we get the correct version requested and the correct entity ID\n assert res.version.__root__ == 0.1\n assert res.id == res_name.id" ]
[ "0.732434", "0.7297416", "0.69765425", "0.63282025", "0.62099767", "0.6095259", "0.6094727", "0.60649484", "0.58922017", "0.587223", "0.584923", "0.5840194", "0.5815585", "0.578171", "0.5772538", "0.57495606", "0.57491666", "0.5727522", "0.5712284", "0.5710002", "0.56978595", "0.56807363", "0.5664492", "0.5659156", "0.5655548", "0.56517816", "0.5640414", "0.56353027", "0.5620032", "0.56126523" ]
0.8264145
0
Test that a pipeline definition artifact can be fetched from the registered model version associated with a RegisteredPipeline object.
def test_get_pipeline_definition_artifact( make_mock_registered_model_version, make_mock_simple_pipeline_definition, ) -> None: rmv = make_mock_registered_model_version() pipeline_definition = RegisteredPipeline._get_pipeline_definition_artifact( registered_model_version=rmv, ) assert pipeline_definition == make_mock_simple_pipeline_definition(id=rmv.id)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_from_pipeline_definition(\n make_mock_registered_model_version,\n mocked_responses,\n) -> None:\n rmv = make_mock_registered_model_version()\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/1\",\n json={},\n status=200,\n )\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/2\",\n json={},\n status=200,\n )\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/registered_models/0\",\n json={},\n status=200,\n )\n pipeline = RegisteredPipeline._from_pipeline_definition(\n registered_model_version=rmv,\n )\n assert isinstance(pipeline, RegisteredPipeline)\n assert pipeline.id == rmv.id", "def test_to_pipeline_definition(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n pipeline_definition = pipeline._to_pipeline_definition()\n assert pipeline_definition == {\n \"pipeline_version_id\": pipeline.id,\n \"graph\": graph._to_graph_definition(),\n \"steps\": graph._to_steps_definition(),\n }", "def test_log_pipeline_definition_artifact(\n model_version_name,\n mocked_responses,\n make_mock_pipeline_graph,\n make_mock_registered_model,\n make_mock_registered_model_version,\n) -> None:\n rm = make_mock_registered_model(id=123, name=\"test_rm\")\n rmv = make_mock_registered_model_version()\n # Fetch the registered model version\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/{rmv.id}\",\n json={\n \"model_version\": {\n \"id\": rmv.id,\n \"registered_model_id\": rmv.registered_model_id,\n \"version\": model_version_name,\n }\n },\n status=200,\n )\n mocked_responses.put(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/registered_models/{rmv.registered_model_id}/model_versions/{rmv.id}\",\n json={},\n status=200,\n )\n # Fetch the artifact upload URL\n mocked_responses.post(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/{rmv.id}/getUrlForArtifact\",\n json={\n \"url\": f\"https://account.s3.amazonaws.com/development/ModelVersionEntity/\"\n f\"{rmv.id}/pipeline.json\"\n },\n status=200,\n )\n # Upload the artifact\n mocked_responses.put(\n f\"https://account.s3.amazonaws.com/development/ModelVersionEntity/{rmv.id}/pipeline.json\",\n json={},\n status=200,\n )\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=rm\n ):\n pipeline = RegisteredPipeline(\n graph=make_mock_pipeline_graph(),\n registered_model_version=rmv,\n )\n pipeline._log_pipeline_definition_artifact()", "def test_load_pipeline():\n\n # Given\n pipeline_file_name = f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\"\n\n # When\n subject = utils.load_pipeline(file_name=pipeline_file_name)\n\n # Then\n assert isinstance(subject, sklearn.pipeline.Pipeline)", "def test_is_pipeline(model):\n assert type(model) == Pipeline", "def test_get_pipeline_by_id(self):\n response = self.client.get_pipeline_by_id(2)\n self.assertEqual(response['id'], 2)", "def test_to_pipeline_configuration_no_resources(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n pipeline_configuration = pipeline._to_pipeline_configuration()\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n for graph_step, config_step in zip(graph.steps, pipeline_configuration[\"steps\"]):\n # All steps are included in the configuration\n assert graph_step.name == config_step[\"name\"]\n # No resources are found in the resulting configuration\n assert \"resources\" not in config_step.keys()", "def test_to_pipeline_configuration_valid_complete(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n step_resources = {step.name: resources for step in graph.steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n\n pipeline_configuration = pipeline._to_pipeline_configuration(\n pipeline_resources=step_resources\n )\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n assert len(graph.steps) == len(pipeline_configuration[\"steps\"])\n for graph_step, config_step in zip(graph.steps, pipeline_configuration[\"steps\"]):\n # All steps provided are included in the configuration.\n assert graph_step.name == config_step[\"name\"]\n # All steps in the config have resources\n assert \"resources\" in config_step.keys()", "def test_list_pipeline_add_one(self):\n response = self.client.list_pipelines()\n exsit = False\n for pipeline in response.pipelines:\n if pipeline.pipeline_name == self.pipeline_name:\n exsit = True\n break\n nose.tools.assert_true(exsit)", "def test_read_artifact(self):\n pass", "def test_get_pipeline_returns_none_if_non_existent(tmp_path: str) -> None:\n Repo.init(tmp_path)\n repo = Repository(str(tmp_path))\n repo.set_active_stack(\"local_stack\")\n our_pipeline = repo.get_pipeline(\"not_a_pipeline\")\n assert our_pipeline is None", "def test_to_pipeline_configuration_invalid_resources(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rmv\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n step_resources = {step.name: resources for step in graph.steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n # step name not in pipeline\n step_resources[\"invalid_step_name\"] = resources\n with pytest.raises(ValueError) as err:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err.value) == \"pipeline_resources contains resources for a step not in the \"\n \"pipeline: 'invalid_step_name'\"\n )\n step_resources.pop(\"invalid_step_name\")\n # step name not a string\n step_resources.update({123: resources})\n with pytest.raises(TypeError) as err2:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err2.value) == \"pipeline_resources keys must be type str, not <class 'int'>\"\n )\n step_resources.pop(123)\n # step resource not a Resources object\n step_resources.update({\"step_1\": \"not_resources\"})\n with pytest.raises(TypeError) as err3:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err3.value)\n == \"pipeline_resources values must be type Resources, not <class 'str'>\"\n )", "def test_new_deployment_pipeline(self):\n # set up\n new_config_patcher = patch(\n 'factories.new_config',\n return_value=5,\n )\n mock_new_config = new_config_patcher.start()\n\n new_env_patcher = patch('factories.new_env', return_value=9)\n mock_new_env = new_env_patcher.start()\n\n # run SUT passing branch_id: 1, copy_config_id: 6, copy_env_id: None\n pipeline_id = new_deployment_pipeline(1, 6)\n\n # confirm that new config was based on config 6\n mock_new_config.assert_called_once_with(6)\n\n # confirm that new env was not based on anything\n mock_new_env.assert_called_once_with(None)\n\n # confirm reasonable sql was executed to make a pipeline\n self.mock_get_cur.return_value.execute.assert_called_once_with(\n \"INSERT INTO deployment_pipeline \" + \\\n \"(branch_id, config_id, environment_id, automatic) \" + \\\n \"VALUES (%s, %s, %s, %s) \" + \\\n \"RETURNING deployment_pipeline_id\",\n (1, 5, 9, False),\n )\n\n # make sure we closed the cursor\n self.mock_get_cur.return_value.close.assert_called_once_with()", "def assert_pipeline_running(self, request):\r\n self.assertTrue(pipeline.running(request))", "def test_to_pipeline_configuration_valid_incomplete(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n partial_steps = list(graph.steps)[:-1]\n excluded_step = list(graph.steps)[-1]\n step_resources = {step.name: resources for step in partial_steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n\n pipeline_configuration = pipeline._to_pipeline_configuration(\n pipeline_resources=step_resources\n )\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n # All steps have been included in the configuration\n assert len(graph.steps) == len(pipeline_configuration[\"steps\"])\n # Compare the steps that have resources, allowing zip to drop the excluded step.\n for graph_step, config_step in zip(partial_steps, pipeline_configuration[\"steps\"]):\n # All steps provided are included in the configuration.\n assert graph_step.name == config_step[\"name\"]\n # All steps for which resource were provided have resources in the config.\n assert \"resources\" in config_step.keys()\n # The step for which no resources were provided is in the config without resources.\n assert excluded_step.name == pipeline_configuration[\"steps\"][-1][\"name\"]\n assert \"resources\" not in pipeline_configuration[\"steps\"][-1].keys()", "def test_save_pipeline():\n\n # Given\n try:\n pipeline_for_test = joblib.load(\n core.TRAINED_MODEL_DIR\n / f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\"\n )\n subject_file_name = (\n f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\"\n )\n except:\n subject_file_name = f\"fake_pipe_line_model_v{_version}.pkl\"\n\n # When\n utils.save_pipeline(pipeline_for_test, subject_file_name)\n\n # Then\n # Get the files in the model save's directory\n trained_model_dir_file_list = [\n file.name for file in core.TRAINED_MODEL_DIR.iterdir()\n ]\n\n # Check if the pipeline was saved in TRAINED_MODEL_DIR and with the right filename\n assert subject_file_name in trained_model_dir_file_list\n # Check if the __init__.py file is in the TRAINED_MODEL_DIR\n assert \"__init__.py\" in trained_model_dir_file_list\n # Check if the TRAINED_MODEL_DIR folder contains just the new saved pipeline and the __init__.py file\n assert len(trained_model_dir_file_list) == 2\n # remove the fake pipeline\n if subject_file_name == f\"fake_pipe_line_model_v{_version}.pkl\":\n core.TRAINED_MODEL_DIR / subject_file_name.unlink()", "def test_copy_graph(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n copied_graph = pipeline.copy_graph()\n # convert from sets to lists and sort for side-by-side comparison\n graph_steps_sorted = sorted(graph.steps, key=lambda x: x.name)\n copied_graph_steps_sorted = sorted(copied_graph.steps, key=lambda x: x.name)\n\n for orig_step, copied_step in zip(graph_steps_sorted, copied_graph_steps_sorted):\n assert orig_step is not copied_step\n assert orig_step.name == copied_step.name\n assert orig_step.predecessors == copied_step.predecessors\n assert (\n orig_step.registered_model_version.id\n == copied_step.registered_model_version.id\n )\n assert copied_graph is not graph", "def test_get_pipeline_returns_same_when_stack_specified(tmp_path: str) -> None:\n Repo.init(tmp_path)\n repo = Repository(str(tmp_path))\n repo.set_active_stack(\"local_stack\")\n our_pipeline_default = repo.get_pipeline(\"pipeline_1\")\n our_pipeline_local = repo.get_pipeline(\n \"pipeline_1\", stack_key=\"local_stack\"\n )\n assert our_pipeline_default == our_pipeline_local", "def test_valid_repository():\n config = load_json_fixture(\"basic-addon-config.json\")\n\n custom_registry = \"registry.gitlab.com/company/add-ons/core/test-example\"\n config[\"image\"] = custom_registry\n valid_config = vd.SCHEMA_ADDON_CONFIG(config)\n assert valid_config[\"image\"] == custom_registry", "def pipeline_artifact(self):\n pass", "def test_get_version(self):\n pass", "def test_list_pipeline_delete_one(self):\n response = self.client.delete_pipeline(self.pipeline_name)\n nose.tools.assert_is_not_none(response)\n\n response = self.client.list_pipelines()\n exsit = False\n for pipeline in response.pipelines:\n if pipeline.pipeline_name == self.pipeline_name:\n exsit = True\n break\n nose.tools.assert_false(exsit)", "def test_ls():\n\n with pipeline.fixture(assets=[\"Asset1\"],\n subsets=[\"animRig\"],\n versions=1) as root:\n asset = next(pipeline.ls())\n\n reference = {\n \"schema\": \"pyblish-mindbender:asset-1.0\",\n \"name\": \"Asset1\",\n \"subsets\": [\n {\n \"schema\": \"pyblish-mindbender:subset-1.0\",\n \"name\": \"animRig\",\n \"versions\": [\n {\n \"schema\": \"pyblish-mindbender:version-1.0\",\n \"version\": 1,\n \"path\": os.path.join(\n root,\n \"Asset1\",\n \"publish\",\n \"animRig\",\n \"v001\"\n ),\n \"source\": os.path.join(\n \"{project}\",\n \"maya\",\n \"scenes\",\n \"scene.ma\"\n ),\n \"representations\": [\n {\n \"schema\": (\"pyblish-mindbender:\"\n \"representation-1.0\"),\n \"format\": \".ma\",\n \"path\": os.path.join(\n \"{dirname}\",\n \"Asset1{format}\"\n ),\n }\n ],\n \"time\": \"\",\n \"author\": \"mottosso\",\n },\n ]\n }\n ]\n }\n\n # Printed on error\n print(\"# Comparing result:\")\n print(json.dumps(asset, indent=4, sort_keys=True))\n print(\"# With reference:\")\n print(json.dumps(reference, indent=4, sort_keys=True))\n\n assert_equals(asset, reference)", "def testInstantiate(self):\n artifact_name = 'artifact'\n artifact = base.BaseArtifact(artifact_name)\n\n self.assertEqual(artifact.size, 0)\n self.assertEqual(artifact.name, artifact_name)\n expected_remote_path = 'Base/artifact'\n self.assertEqual(artifact.remote_path, expected_remote_path)", "def test_get_entity_version(self):\n self.metadata.create_or_update(data=self.create)\n\n # Find by name\n res_name = self.metadata.get_by_name(\n entity=Dashboard, fqn=self.entity.fullyQualifiedName\n )\n res = self.metadata.get_entity_version(\n entity=Dashboard, entity_id=res_name.id.__root__, version=0.1\n )\n\n # check we get the correct version requested and the correct entity ID\n assert res.version.__root__ == 0.1\n assert res.id == res_name.id", "def test_load_response_descriptor_projects_release_release_resource(self):\n pass", "def test_model_by_version_get(self):\n\n # Firstly, find existing version - latest\n response = self.client().get('/model')\n latest_model = Model.from_json(response.data.decode())\n latest_version = latest_model.version\n\n # Accesses latest model\n response = self.client().get('/models/'+str(latest_version))\n self.assertEqual(response.status_code, 200)\n loaded_model = Model.from_json(response.data.decode())\n self.assertEqual(loaded_model.version, latest_version)\n self.assertEqual(loaded_model, latest_model)\n\n # Accesses random model version\n random_version = random.choice(list(self.data_manipulation.versions.keys()))\n random_model = self.data_manipulation.versions[random_version]\n response = self.client().get('/models/'+str(random_version))\n self.assertEqual(response.status_code, 200)\n loaded_model = Model.from_json(response.data.decode())\n self.assertEqual(loaded_model.version, random_version)\n self.assertEqual(loaded_model, random_model)\n\n # Random version is removed\n del self.data_manipulation.versions[random_version]\n response = self.client().get('/models/'+str(random_version))\n self.assertEqual(response.status_code, 404)", "def test_build_pipeline_four(self):\n args = \"Test_APP ONE FIVE\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_ResolvedPackage(self):\n resolved_pkgs = ResolvedPackage.objects.all()\n versions = [rp.package.version for rp in resolved_pkgs]\n\n self.assertEqual(4, resolved_pkgs.count())\n self.assertEqual('mimetex', resolved_pkgs[0].package.name)\n self.assertIn('1.50-1.1', versions)\n self.assertIn('1.74-1', versions)", "def test_get_iiqtools_version_ok(self, fake_get_distribution):\n fake_get_distribution.return_value = self.FakeDistVersion('1.2.3')\n\n v = versions.get_iiqtools_version()\n\n self.assertTrue(isinstance(v, versions.Version))" ]
[ "0.7688491", "0.72997695", "0.72265136", "0.69778377", "0.6683026", "0.6655028", "0.646409", "0.6193778", "0.61219335", "0.60657907", "0.60468465", "0.5986037", "0.59753895", "0.5972712", "0.59150094", "0.58124006", "0.5805494", "0.57608837", "0.56313217", "0.5624219", "0.56044954", "0.55941886", "0.5579552", "0.5573401", "0.5536048", "0.55115277", "0.5463096", "0.5424089", "0.5404182", "0.54026216" ]
0.8222838
0
Test that a pipeline definition can be constructed from a RegisteredPipeline object. In depth testing of the `_to_graph_definition` and `_to_steps_definition` functions are handled in unit tests for PipelineGraph.
def test_to_pipeline_definition( make_mock_pipeline_graph, make_mock_registered_model_version, make_mock_registered_model, ) -> None: mocked_rm = make_mock_registered_model(id=123, name="test_rm") with patch.object( verta.pipeline.PipelineStep, "_get_registered_model", return_value=mocked_rm ): graph = make_mock_pipeline_graph() pipeline = RegisteredPipeline( graph=graph, registered_model_version=make_mock_registered_model_version(), ) pipeline_definition = pipeline._to_pipeline_definition() assert pipeline_definition == { "pipeline_version_id": pipeline.id, "graph": graph._to_graph_definition(), "steps": graph._to_steps_definition(), }
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_from_pipeline_definition(\n make_mock_registered_model_version,\n mocked_responses,\n) -> None:\n rmv = make_mock_registered_model_version()\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/1\",\n json={},\n status=200,\n )\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/2\",\n json={},\n status=200,\n )\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/registered_models/0\",\n json={},\n status=200,\n )\n pipeline = RegisteredPipeline._from_pipeline_definition(\n registered_model_version=rmv,\n )\n assert isinstance(pipeline, RegisteredPipeline)\n assert pipeline.id == rmv.id", "def test_to_pipeline_configuration_valid_complete(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n step_resources = {step.name: resources for step in graph.steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n\n pipeline_configuration = pipeline._to_pipeline_configuration(\n pipeline_resources=step_resources\n )\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n assert len(graph.steps) == len(pipeline_configuration[\"steps\"])\n for graph_step, config_step in zip(graph.steps, pipeline_configuration[\"steps\"]):\n # All steps provided are included in the configuration.\n assert graph_step.name == config_step[\"name\"]\n # All steps in the config have resources\n assert \"resources\" in config_step.keys()", "def test_to_pipeline_configuration_no_resources(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n pipeline_configuration = pipeline._to_pipeline_configuration()\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n for graph_step, config_step in zip(graph.steps, pipeline_configuration[\"steps\"]):\n # All steps are included in the configuration\n assert graph_step.name == config_step[\"name\"]\n # No resources are found in the resulting configuration\n assert \"resources\" not in config_step.keys()", "def test_get_pipeline_definition_artifact(\n make_mock_registered_model_version,\n make_mock_simple_pipeline_definition,\n) -> None:\n rmv = make_mock_registered_model_version()\n pipeline_definition = RegisteredPipeline._get_pipeline_definition_artifact(\n registered_model_version=rmv,\n )\n assert pipeline_definition == make_mock_simple_pipeline_definition(id=rmv.id)", "def test_load_pipeline():\n\n # Given\n pipeline_file_name = f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\"\n\n # When\n subject = utils.load_pipeline(file_name=pipeline_file_name)\n\n # Then\n assert isinstance(subject, sklearn.pipeline.Pipeline)", "def test_is_pipeline(model):\n assert type(model) == Pipeline", "def test_to_pipeline_configuration_invalid_resources(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rmv\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n step_resources = {step.name: resources for step in graph.steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n # step name not in pipeline\n step_resources[\"invalid_step_name\"] = resources\n with pytest.raises(ValueError) as err:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err.value) == \"pipeline_resources contains resources for a step not in the \"\n \"pipeline: 'invalid_step_name'\"\n )\n step_resources.pop(\"invalid_step_name\")\n # step name not a string\n step_resources.update({123: resources})\n with pytest.raises(TypeError) as err2:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err2.value) == \"pipeline_resources keys must be type str, not <class 'int'>\"\n )\n step_resources.pop(123)\n # step resource not a Resources object\n step_resources.update({\"step_1\": \"not_resources\"})\n with pytest.raises(TypeError) as err3:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err3.value)\n == \"pipeline_resources values must be type Resources, not <class 'str'>\"\n )", "def test_to_pipeline_configuration_valid_incomplete(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n partial_steps = list(graph.steps)[:-1]\n excluded_step = list(graph.steps)[-1]\n step_resources = {step.name: resources for step in partial_steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n\n pipeline_configuration = pipeline._to_pipeline_configuration(\n pipeline_resources=step_resources\n )\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n # All steps have been included in the configuration\n assert len(graph.steps) == len(pipeline_configuration[\"steps\"])\n # Compare the steps that have resources, allowing zip to drop the excluded step.\n for graph_step, config_step in zip(partial_steps, pipeline_configuration[\"steps\"]):\n # All steps provided are included in the configuration.\n assert graph_step.name == config_step[\"name\"]\n # All steps for which resource were provided have resources in the config.\n assert \"resources\" in config_step.keys()\n # The step for which no resources were provided is in the config without resources.\n assert excluded_step.name == pipeline_configuration[\"steps\"][-1][\"name\"]\n assert \"resources\" not in pipeline_configuration[\"steps\"][-1].keys()", "def test_copy_graph(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n copied_graph = pipeline.copy_graph()\n # convert from sets to lists and sort for side-by-side comparison\n graph_steps_sorted = sorted(graph.steps, key=lambda x: x.name)\n copied_graph_steps_sorted = sorted(copied_graph.steps, key=lambda x: x.name)\n\n for orig_step, copied_step in zip(graph_steps_sorted, copied_graph_steps_sorted):\n assert orig_step is not copied_step\n assert orig_step.name == copied_step.name\n assert orig_step.predecessors == copied_step.predecessors\n assert (\n orig_step.registered_model_version.id\n == copied_step.registered_model_version.id\n )\n assert copied_graph is not graph", "def test_generate_pipeline_code():\n pipeline = ['KNeighborsClassifier',\n ['CombineDFs',\n ['GradientBoostingClassifier',\n 'input_matrix',\n 38.0,\n 0.87],\n ['GaussianNB',\n ['ZeroCount',\n 'input_matrix']]],\n 18,\n 33]\n\n expected_code = \"\"\"make_pipeline(\n make_union(\n make_union(VotingClassifier([('branch',\n GradientBoostingClassifier(learning_rate=1.0, max_features=1.0, n_estimators=500)\n )]), FunctionTransformer(lambda X: X)),\n make_union(VotingClassifier([('branch',\n make_pipeline(\n ZeroCount(),\n GaussianNB()\n )\n )]), FunctionTransformer(lambda X: X))\n ),\n KNeighborsClassifier(n_neighbors=5, weights=\"distance\")\n)\"\"\"\n\n assert expected_code == generate_pipeline_code(pipeline)", "def test_make_pipeline(self):\n\n umap = UMAPVisualizer() # Should not cause an exception.\n assert umap.transformer_ is not None\n\n assert len(umap.transformer_.steps) == 1", "def create_pipeline(\n pipeline_name: Text,\n pipeline_root: Text,\n data_root: Text,\n module_file: Text,\n metadata_path: Text,\n beam_pipeline_args: List[Text],\n) -> tfx.dsl.Pipeline:\n example_gen = tfx.components.CsvExampleGen(input_base=data_root)\n\n # Computes statistics over data for visualization and example validation.\n statistics_gen = tfx.components.StatisticsGen(\n examples=example_gen.outputs['examples'])\n\n # Generates schema based on statistics files.\n schema_gen = tfx.components.SchemaGen(\n statistics=statistics_gen.outputs['statistics'],\n infer_feature_shape=True)\n\n # Performs anomaly detection based on statistics and data schema.\n example_validator = tfx.components.ExampleValidator(\n statistics=statistics_gen.outputs['statistics'],\n schema=schema_gen.outputs['schema'],\n )\n\n trainer_custom_config = {\n 'objective': 'reg:squarederror',\n 'learning_rate': 0.3,\n 'max_depth': 4,\n 'num_boost_round': 200,\n 'early_stopping_rounds': 40,\n }\n\n trainer = tfx.components.Trainer(\n module_file=module_file,\n examples=example_gen.outputs['examples'],\n schema=schema_gen.outputs['schema'],\n train_args=tfx.proto.TrainArgs(),\n eval_args=tfx.proto.EvalArgs(),\n custom_config=trainer_custom_config,\n )\n\n return tfx.dsl.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=pipeline_root,\n components=[\n example_gen,\n statistics_gen,\n schema_gen,\n example_validator,\n trainer,\n ],\n enable_cache=True,\n metadata_connection_config=tfx.orchestration.metadata.\n sqlite_metadata_connection_config(metadata_path),\n beam_pipeline_args=beam_pipeline_args,\n )", "def to_pipeline_spec(self) -> pipeline_spec_pb2.PipelineSpec:\n # import here to aviod circular module dependency\n from kfp.compiler import compiler_utils\n from kfp.compiler import pipeline_spec_builder as builder\n from kfp.dsl import pipeline_channel\n from kfp.dsl import pipeline_task\n from kfp.dsl import tasks_group\n\n args_dict = {}\n pipeline_inputs = self.inputs or {}\n\n for arg_name, input_spec in pipeline_inputs.items():\n args_dict[arg_name] = pipeline_channel.create_pipeline_channel(\n name=arg_name,\n channel_type=input_spec.type,\n is_artifact_list=input_spec.is_artifact_list)\n\n task = pipeline_task.PipelineTask(self, args_dict)\n\n # instead of constructing a pipeline with pipeline_context.Pipeline,\n # just build the single task group\n group = tasks_group.TasksGroup(\n group_type=tasks_group.TasksGroupType.PIPELINE)\n group.tasks.append(task)\n\n group.name = uuid.uuid4().hex\n\n pipeline_name = self.name\n task_group = group\n\n pipeline_outputs = {}\n pipeline_output_spec = self.outputs or {}\n\n for arg_name, output_spec in pipeline_output_spec.items():\n pipeline_outputs[\n arg_name] = pipeline_channel.create_pipeline_channel(\n name=arg_name,\n channel_type=output_spec.type,\n task_name=task.name)\n\n utils.validate_pipeline_name(pipeline_name)\n\n pipeline_spec = pipeline_spec_pb2.PipelineSpec()\n pipeline_spec.pipeline_info.name = pipeline_name\n pipeline_spec.sdk_version = f'kfp-{kfp.__version__}'\n # Schema version 2.1.0 is required for kfp-pipeline-spec>0.1.13\n pipeline_spec.schema_version = '2.1.0'\n\n # if we decide to surface component outputs to pipeline level,\n # can just assign the component_spec_proto directly to .root\n component_spec_proto = builder._build_component_spec_from_component_spec_structure(\n self)\n pipeline_spec.root.CopyFrom(component_spec_proto)\n\n builder._build_dag_outputs(\n component_spec=pipeline_spec.root, dag_outputs=pipeline_outputs)\n\n deployment_config = pipeline_spec_pb2.PipelineDeploymentConfig()\n root_group = task_group\n\n task_name_to_parent_groups, group_name_to_parent_groups = compiler_utils.get_parent_groups(\n root_group)\n\n def get_inputs(task_group: tasks_group.TasksGroup,\n task_name_to_parent_groups):\n inputs = collections.defaultdict(set)\n if len(task_group.tasks) != 1:\n raise ValueError(\n f'Error compiling component. Expected one task in task group, got {len(task_group.tasks)}.'\n )\n only_task = task_group.tasks[0]\n if only_task.channel_inputs:\n for group_name in task_name_to_parent_groups[only_task.name]:\n inputs[group_name].add((only_task.channel_inputs[-1], None))\n return inputs\n\n inputs = get_inputs(task_group, task_name_to_parent_groups)\n\n builder.build_spec_by_group(\n pipeline_spec=pipeline_spec,\n deployment_config=deployment_config,\n group=root_group,\n inputs=inputs,\n outputs=collections.defaultdict(\n dict), # empty -- no sub-DAG outputs to surface\n dependencies={}, # no dependencies for single-component pipeline\n rootgroup_name=root_group.name,\n task_name_to_parent_groups=task_name_to_parent_groups,\n group_name_to_parent_groups=group_name_to_parent_groups,\n name_to_for_loop_group={}, # no for loop in single-component pipeline\n platform_spec=pipeline_spec_pb2.PlatformSpec(\n ), # no PlatformSpec single-component pipeline\n is_compiled_component=True,\n )\n\n return pipeline_spec", "def __init__(\n self,\n pipeline_path: Optional[str] = None,\n pipeline_definition: Optional[Dict] = None,\n validate: bool = False,\n ):\n if not pipeline_path and not pipeline_definition:\n # at least one parameter should be provided\n raise ValueError(\"At least one parameter must be provided ('pipeline_path' or 'pipeline_definition')\")\n if pipeline_path and pipeline_definition:\n # only one parameter should be provided\n raise ValueError(\"Only one parameter should be provided ('pipeline_path' or 'pipeline_definition')\")\n\n if pipeline_path:\n # supporting loading pipeline from file\n if not os.path.exists(pipeline_path):\n raise ValueError(f\"Pipeline file not found: '{pipeline_path}'\\n\")\n\n with open(pipeline_path) as f:\n try:\n self._pipeline_definition = json.load(f)\n except ValueError as ve:\n raise ValueError(f\"Pipeline file is invalid: \\n {ve}\")\n else:\n # supporting passing the pipeline definition directly\n self._pipeline_definition = pipeline_definition\n\n if validate:\n self.validate()\n\n self.propagate_pipeline_default_properties()", "def test_build_pipeline_four(self):\n args = \"Test_APP ONE FIVE\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_new_deployment_pipeline(self):\n # set up\n new_config_patcher = patch(\n 'factories.new_config',\n return_value=5,\n )\n mock_new_config = new_config_patcher.start()\n\n new_env_patcher = patch('factories.new_env', return_value=9)\n mock_new_env = new_env_patcher.start()\n\n # run SUT passing branch_id: 1, copy_config_id: 6, copy_env_id: None\n pipeline_id = new_deployment_pipeline(1, 6)\n\n # confirm that new config was based on config 6\n mock_new_config.assert_called_once_with(6)\n\n # confirm that new env was not based on anything\n mock_new_env.assert_called_once_with(None)\n\n # confirm reasonable sql was executed to make a pipeline\n self.mock_get_cur.return_value.execute.assert_called_once_with(\n \"INSERT INTO deployment_pipeline \" + \\\n \"(branch_id, config_id, environment_id, automatic) \" + \\\n \"VALUES (%s, %s, %s, %s) \" + \\\n \"RETURNING deployment_pipeline_id\",\n (1, 5, 9, False),\n )\n\n # make sure we closed the cursor\n self.mock_get_cur.return_value.close.assert_called_once_with()", "def test_log_pipeline_definition_artifact(\n model_version_name,\n mocked_responses,\n make_mock_pipeline_graph,\n make_mock_registered_model,\n make_mock_registered_model_version,\n) -> None:\n rm = make_mock_registered_model(id=123, name=\"test_rm\")\n rmv = make_mock_registered_model_version()\n # Fetch the registered model version\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/{rmv.id}\",\n json={\n \"model_version\": {\n \"id\": rmv.id,\n \"registered_model_id\": rmv.registered_model_id,\n \"version\": model_version_name,\n }\n },\n status=200,\n )\n mocked_responses.put(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/registered_models/{rmv.registered_model_id}/model_versions/{rmv.id}\",\n json={},\n status=200,\n )\n # Fetch the artifact upload URL\n mocked_responses.post(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/{rmv.id}/getUrlForArtifact\",\n json={\n \"url\": f\"https://account.s3.amazonaws.com/development/ModelVersionEntity/\"\n f\"{rmv.id}/pipeline.json\"\n },\n status=200,\n )\n # Upload the artifact\n mocked_responses.put(\n f\"https://account.s3.amazonaws.com/development/ModelVersionEntity/{rmv.id}/pipeline.json\",\n json={},\n status=200,\n )\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=rm\n ):\n pipeline = RegisteredPipeline(\n graph=make_mock_pipeline_graph(),\n registered_model_version=rmv,\n )\n pipeline._log_pipeline_definition_artifact()", "def create_fake_pipeline(*_args, **_kwargs):\n return Pipeline(\n [\n node(match.clean_match_data, \"fake_match_data\", \"clean_match_data\"),\n node(\n common.convert_match_rows_to_teammatch_rows,\n \"clean_match_data\",\n \"match_data_b\",\n ),\n node(match.add_out_of_state, \"match_data_b\", \"match_data_c\"),\n node(match.add_travel_distance, \"match_data_c\", \"match_data_d\"),\n node(match.add_result, \"match_data_d\", \"match_data_e\"),\n node(match.add_margin, \"match_data_e\", \"match_data_f\"),\n node(\n match.add_shifted_team_features(\n shift_columns=[\n \"score\",\n \"oppo_score\",\n \"result\",\n \"margin\",\n \"team_goals\",\n \"team_behinds\",\n ]\n ),\n \"match_data_f\",\n \"match_data_g\",\n ),\n node(match.add_cum_win_points, \"match_data_g\", \"match_data_h\"),\n node(match.add_win_streak, \"match_data_h\", \"match_data_i\"),\n ]\n )", "def test_pipeline_granular(mockpipe, testdir):\n test = testdir.makepyfile(TEST_OK_GRANULAR)\n result = testdir.inline_run(\n \"-v\",\n f\"--base-pipeline-dir={test.dirname}\",\n test\n )\n passed, skipped, failed = result.listoutcomes()\n\n assert len(passed) == 2\n assert len(skipped) == 0\n assert len(failed) == 0", "def test_build_pipeline_three(self):\n args = \"Test_APP ONE FOUR\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_creation(self):\n\n # Create a new model-free data pipe.\n name = 'new'\n pipes.create(name, 'mf')\n\n # Test that the data pipe exists.\n self.assert_(name in ds)\n\n # Test that the current pipe is the new pipe.\n self.assertEqual(pipes.cdp_name(), name)", "def create_pipeline(pipeline_name: Text, \n pipeline_root: Text, \n dataset_name: Text,\n train_steps: data_types.RuntimeParameter,\n eval_steps: data_types.RuntimeParameter,\n accuracy_threshold: data_types.RuntimeParameter,\n ai_platform_training_args: Dict[Text, Text],\n ai_platform_serving_args: Dict[Text, Text],\n beam_pipeline_args: List[Text],\n model_regisrty_uri: Text,\n enable_cache: Optional[bool] = False) -> pipeline.Pipeline:\n\n # Dataset, table and/or 'where conditions' can be passed as pipeline args.\n query=sql_utils.generate_source_query(dataset_name=dataset_name)\n \n # Brings data into the pipeline from BigQuery.\n example_gen = tfx.components.BigQueryExampleGen(\n query=query\n )\n\n # Computes statistics over data for visualization and example validation.\n statistics_gen = tfx.components.StatisticsGen(\n input_data=example_gen.outputs.examples)\n\n # Import schema from local directory.\n schema_importer = ImporterNode(\n instance_name='RawSchemaImporter',\n source_uri=RAW_SCHEMA_DIR,\n artifact_type=Schema,\n )\n\n # Performs anomaly detection based on statistics and data schema.\n validate_stats = tfx.components.ExampleValidator(\n stats=statistics_gen.outputs.output, \n schema=schema_importer.outputs.result\n )\n\n # Performs transformations and feature engineering in training and serving.\n transform = tfx.components.Transform(\n input_data=example_gen.outputs.examples,\n schema=schema_importer.outputs.result,\n module_file=TRANSFORM_MODULE_FILE\n )\n\n\n # Get the latest blessed model for model validation.\n latest_model_resolver = tfx.components.ResolverNode(\n instance_name='latest_blessed_model_resolver',\n resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing)\n )\n \n # Train and save model for evaluation and serving.\n trainer = tfx.components.Trainer(\n# custom_executor_spec=executor_spec.ExecutorClassSpec(\n# ai_platform_trainer_executor.GenericExecutor),\n custom_executor_spec=executor_spec.ExecutorClassSpec(\n trainer_executor.GenericExecutor),\n module_file=TRAIN_MODULE_FILE,\n transformed_examples=transform.outputs.transformed_examples,\n schema=schema_importer.outputs.result,\n transform_output=transform.outputs.transform_output,\n base_model=latest_model_resolver.outputs.model,\n train_args={'num_steps': train_steps},\n eval_args={'num_steps': eval_steps},\n custom_config={'ai_platform_training_args': ai_platform_training_args}\n )\n\n\n # Uses TFMA to compute a evaluation statistics over features of a model.\n model_evaluator = tfx.components.Evaluator(\n examples=example_gen.outputs.examples,\n model=trainer.outputs.model,\n baseline_model=latest_model_resolver.outputs.model,\n eval_config=helper.get_eval_config()\n )\n \n # Use a custom AccuracyModelValidator component to validate the model.\n model_validator = AccuracyModelValidator(\n eval_results=model_evaluator.outputs.output,\n model=trainer.outputs.model,\n accuracy_threshold=accuracy_threshold,\n slice_accuracy_tolerance=0.15,\n )\n\n# # Checks whether the model passed the validation steps and pushes the model\n# # to its destination if check passed.\n# pusher = tfx.components.Pusher(\n# custom_executor_spec=executor_spec.ExecutorClassSpec(\n# ai_platform_pusher_executor.Executor),\n# model_export=trainer.outputs.output,\n# model_blessing=model_evaluator.outputs.blessing,\n# #model_blessing=model_validator.outputs.blessing,\n# custom_config={'ai_platform_serving_args': ai_platform_serving_args}\n# )\n \n register = tfx.components.Pusher(\n model=trainer.outputs.model,\n model_blessing=model_validator.outputs.blessing,\n #model_blessing=model_evaluator.outputs.blessing,\n push_destination=tfx.proto.pusher_pb2.PushDestination(\n filesystem=tfx.proto.pusher_pb2.PushDestination.Filesystem(\n base_directory=os.path.join(model_regisrty_uri, pipeline_name)))\n )\n \n return pipeline.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=pipeline_root,\n components=[\n example_gen, \n statistics_gen, \n schema_importer, \n validate_stats,\n latest_model_resolver,\n transform,\n trainer, \n model_evaluator, \n model_validator, \n #pusher\n register\n ],\n enable_cache=enable_cache,\n beam_pipeline_args=beam_pipeline_args)", "def test_build_pipeline_eight(self):\n args = \"Test_APP ONE SIX A B\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_bad_mutation_of_graph_steps_exception(\n make_mock_registered_model,\n make_mock_registered_model_version,\n make_mock_pipeline_graph,\n):\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n mocked_rmv = make_mock_registered_model_version()\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n\n graph.steps.add(\"not_a_step\")\n with pytest.raises(TypeError) as err:\n RegisteredPipeline(graph=graph, registered_model_version=mocked_rmv)\n assert (\n str(err.value) == f\"individual steps of a PipelineGraph must be type\"\n f\" PipelineStep, not <class 'str'>.\"\n )", "def test_create_plot(self):\n create_plot(\n dataset_name='ds',\n chart_name='My Chart',\n series=[{'column': 1}],\n validate=True\n )\n create_plot(\n dataset_name='ds',\n chart_name='My Chart',\n series=[{'column': 1}],\n xaxis_range='0:10',\n validate=True\n )\n # Have an error raised if values of invalid data type are given\n with self.assertRaises(ValueError):\n create_plot(\n dataset_name='ds',\n chart_name='My Chart',\n series=[{'column': 'abc'}],\n xaxis_range='0:10',\n validate=True\n )\n with self.assertRaises(ValueError):\n create_plot(\n dataset_name='ds',\n chart_name='My Chart',\n series=[{'column': 1, 'label': [], 'range': '0-10'}],\n xaxis_range='0:10',\n validate=True\n )\n # Get dictionary serialization of command arguments. Ensure that we\n # can create a valid command instance from the returned result.\n obj = create_plot(\n dataset_name='ds',\n chart_name='My Chart',\n series=[{'column': 1}],\n xaxis_range='0:10',\n validate=True\n ).arguments.to_list()\n ModuleCommand(\n package_id=plot.PACKAGE_PLOT,\n command_id=plot.PLOT_SIMPLE_CHART,\n arguments=obj,\n packages=PACKAGES\n )\n # Delete a mandatory element from the serialization to ensure that\n # validation fails\n index = -1\n for i in range(len(obj)):\n if obj[i][ARG_ID] == plot.PARA_SERIES:\n index = i\n break\n del obj[i]\n with self.assertRaises(ValueError):\n ModuleCommand(\n package_id=plot.PACKAGE_PLOT,\n command_id=plot.PLOT_SIMPLE_CHART,\n arguments=obj,\n packages=PACKAGES\n )\n # Add an unknown argument to ensure that the validation fails\n obj = create_plot(\n dataset_name='ds',\n chart_name='My Chart',\n series=[{'column': 1}],\n xaxis_range='0:10',\n validate=True\n ).arguments.to_list()\n obj.append(ARG(id='someUnknownLabel', value=''))\n with self.assertRaises(ValueError):\n ModuleCommand(\n package_id=plot.PACKAGE_PLOT,\n command_id=plot.PLOT_SIMPLE_CHART,\n arguments=obj,\n packages=PACKAGES\n )", "def test_hello_pipeline():\n result = execute_pipeline(hello_pipeline, mode=\"test\")\n\n assert result.success\n assert result.output_for_solid(\"hello\") == \"Hello, NMDC!\"", "def main(): # pragma: no cover\n parser = argparse.ArgumentParser(\"Gets the pipeline definition for the pipeline script.\")\n\n parser.add_argument(\n \"-n\",\n \"--module-name\",\n dest=\"module_name\",\n type=str,\n help=\"The module name of the pipeline to import.\",\n )\n parser.add_argument(\n \"-f\",\n \"--file-name\",\n dest=\"file_name\",\n type=str,\n default=None,\n help=\"The file to output the pipeline definition json to.\",\n )\n parser.add_argument(\n \"-kwargs\",\n \"--kwargs\",\n dest=\"kwargs\",\n default=None,\n help=\"Dict string of keyword arguments for the pipeline generation (if supported)\",\n )\n args = parser.parse_args()\n\n if args.module_name is None:\n parser.print_help()\n sys.exit(2)\n\n try:\n pipeline = get_pipeline_driver(args.module_name, args.kwargs)\n content = pipeline.definition()\n if args.file_name:\n with open(args.file_name, \"w\") as f:\n f.write(content)\n else:\n print(content)\n except Exception as e: # pylint: disable=W0703\n print(f\"Exception: {e}\")\n sys.exit(1)", "def test_pipeline1(self):\n\n nlp = Pipeline[DataPack]()\n reader = SentenceReader()\n nlp.set_reader(reader)\n dummy = DummyPackProcessor()\n nlp.add(dummy)\n nlp.initialize()\n data_path = data_samples_root + \"/random_texts/0.txt\"\n num_packs = 0\n for pack in nlp.process_dataset(data_path):\n types = list(pack.get_entries_by_type(NewType))\n num_packs += 1\n self.assertEqual(len(types), 1)\n self.assertEqual(types[0].value, \"[PACK]\")\n\n # check that all packs are yielded\n self.assertEqual(num_packs, reader.count)", "def _setup_pipeline_cfg(self):", "def test_save_pipeline():\n\n # Given\n try:\n pipeline_for_test = joblib.load(\n core.TRAINED_MODEL_DIR\n / f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\"\n )\n subject_file_name = (\n f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\"\n )\n except:\n subject_file_name = f\"fake_pipe_line_model_v{_version}.pkl\"\n\n # When\n utils.save_pipeline(pipeline_for_test, subject_file_name)\n\n # Then\n # Get the files in the model save's directory\n trained_model_dir_file_list = [\n file.name for file in core.TRAINED_MODEL_DIR.iterdir()\n ]\n\n # Check if the pipeline was saved in TRAINED_MODEL_DIR and with the right filename\n assert subject_file_name in trained_model_dir_file_list\n # Check if the __init__.py file is in the TRAINED_MODEL_DIR\n assert \"__init__.py\" in trained_model_dir_file_list\n # Check if the TRAINED_MODEL_DIR folder contains just the new saved pipeline and the __init__.py file\n assert len(trained_model_dir_file_list) == 2\n # remove the fake pipeline\n if subject_file_name == f\"fake_pipe_line_model_v{_version}.pkl\":\n core.TRAINED_MODEL_DIR / subject_file_name.unlink()" ]
[ "0.6992653", "0.6769633", "0.6746867", "0.66939527", "0.6548289", "0.6532664", "0.6424661", "0.63188297", "0.60581475", "0.6034345", "0.59983337", "0.59542525", "0.5939424", "0.59258586", "0.5908199", "0.5816665", "0.5774693", "0.5701459", "0.5650962", "0.56478715", "0.56415206", "0.563926", "0.56295663", "0.56286037", "0.5605182", "0.5571744", "0.5568255", "0.55596715", "0.5553829", "0.5540363" ]
0.77992266
0
Test that a pipeline configuration can be constructed from a RegisteredPipeline object and a valid list of pipeline resources, where resources are provided for every step.
def test_to_pipeline_configuration_valid_complete( resources, make_mock_pipeline_graph, make_mock_registered_model_version, make_mock_registered_model, ) -> None: mocked_rm = make_mock_registered_model(id=123, name="test_rm") with patch.object( verta.pipeline.PipelineStep, "_get_registered_model", return_value=mocked_rm ): graph = make_mock_pipeline_graph() step_resources = {step.name: resources for step in graph.steps} pipeline = RegisteredPipeline( graph=graph, registered_model_version=make_mock_registered_model_version(), ) pipeline_configuration = pipeline._to_pipeline_configuration( pipeline_resources=step_resources ) assert pipeline_configuration["pipeline_version_id"] == pipeline.id assert len(graph.steps) == len(pipeline_configuration["steps"]) for graph_step, config_step in zip(graph.steps, pipeline_configuration["steps"]): # All steps provided are included in the configuration. assert graph_step.name == config_step["name"] # All steps in the config have resources assert "resources" in config_step.keys()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_to_pipeline_configuration_valid_incomplete(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n partial_steps = list(graph.steps)[:-1]\n excluded_step = list(graph.steps)[-1]\n step_resources = {step.name: resources for step in partial_steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n\n pipeline_configuration = pipeline._to_pipeline_configuration(\n pipeline_resources=step_resources\n )\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n # All steps have been included in the configuration\n assert len(graph.steps) == len(pipeline_configuration[\"steps\"])\n # Compare the steps that have resources, allowing zip to drop the excluded step.\n for graph_step, config_step in zip(partial_steps, pipeline_configuration[\"steps\"]):\n # All steps provided are included in the configuration.\n assert graph_step.name == config_step[\"name\"]\n # All steps for which resource were provided have resources in the config.\n assert \"resources\" in config_step.keys()\n # The step for which no resources were provided is in the config without resources.\n assert excluded_step.name == pipeline_configuration[\"steps\"][-1][\"name\"]\n assert \"resources\" not in pipeline_configuration[\"steps\"][-1].keys()", "def test_to_pipeline_configuration_invalid_resources(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rmv\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n step_resources = {step.name: resources for step in graph.steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n # step name not in pipeline\n step_resources[\"invalid_step_name\"] = resources\n with pytest.raises(ValueError) as err:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err.value) == \"pipeline_resources contains resources for a step not in the \"\n \"pipeline: 'invalid_step_name'\"\n )\n step_resources.pop(\"invalid_step_name\")\n # step name not a string\n step_resources.update({123: resources})\n with pytest.raises(TypeError) as err2:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err2.value) == \"pipeline_resources keys must be type str, not <class 'int'>\"\n )\n step_resources.pop(123)\n # step resource not a Resources object\n step_resources.update({\"step_1\": \"not_resources\"})\n with pytest.raises(TypeError) as err3:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err3.value)\n == \"pipeline_resources values must be type Resources, not <class 'str'>\"\n )", "def test_to_pipeline_configuration_no_resources(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n pipeline_configuration = pipeline._to_pipeline_configuration()\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n for graph_step, config_step in zip(graph.steps, pipeline_configuration[\"steps\"]):\n # All steps are included in the configuration\n assert graph_step.name == config_step[\"name\"]\n # No resources are found in the resulting configuration\n assert \"resources\" not in config_step.keys()", "def test_load_pipeline():\n\n # Given\n pipeline_file_name = f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\"\n\n # When\n subject = utils.load_pipeline(file_name=pipeline_file_name)\n\n # Then\n assert isinstance(subject, sklearn.pipeline.Pipeline)", "def test_build_pipeline_four(self):\n args = \"Test_APP ONE FIVE\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_build_pipeline_three(self):\n args = \"Test_APP ONE FOUR\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_from_pipeline_definition(\n make_mock_registered_model_version,\n mocked_responses,\n) -> None:\n rmv = make_mock_registered_model_version()\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/1\",\n json={},\n status=200,\n )\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/2\",\n json={},\n status=200,\n )\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/registered_models/0\",\n json={},\n status=200,\n )\n pipeline = RegisteredPipeline._from_pipeline_definition(\n registered_model_version=rmv,\n )\n assert isinstance(pipeline, RegisteredPipeline)\n assert pipeline.id == rmv.id", "def test_build_pipeline_six(self):\n args = \"Test_APP FIVE A B\".split(\" \")\n task_list = build_pipeline(args, False)\n self.assertEqual(1, len(task_list))", "def test_build_pipeline_eight(self):\n args = \"Test_APP ONE SIX A B\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_build_pipeline_seven(self):\n args = \"Test_APP SIX A B C\".split(\" \")\n task_list = build_pipeline(args, False)\n self.assertEqual(1, len(task_list))", "def test_list_pipeline_add_one(self):\n response = self.client.list_pipelines()\n exsit = False\n for pipeline in response.pipelines:\n if pipeline.pipeline_name == self.pipeline_name:\n exsit = True\n break\n nose.tools.assert_true(exsit)", "def test_new_deployment_pipeline(self):\n # set up\n new_config_patcher = patch(\n 'factories.new_config',\n return_value=5,\n )\n mock_new_config = new_config_patcher.start()\n\n new_env_patcher = patch('factories.new_env', return_value=9)\n mock_new_env = new_env_patcher.start()\n\n # run SUT passing branch_id: 1, copy_config_id: 6, copy_env_id: None\n pipeline_id = new_deployment_pipeline(1, 6)\n\n # confirm that new config was based on config 6\n mock_new_config.assert_called_once_with(6)\n\n # confirm that new env was not based on anything\n mock_new_env.assert_called_once_with(None)\n\n # confirm reasonable sql was executed to make a pipeline\n self.mock_get_cur.return_value.execute.assert_called_once_with(\n \"INSERT INTO deployment_pipeline \" + \\\n \"(branch_id, config_id, environment_id, automatic) \" + \\\n \"VALUES (%s, %s, %s, %s) \" + \\\n \"RETURNING deployment_pipeline_id\",\n (1, 5, 9, False),\n )\n\n # make sure we closed the cursor\n self.mock_get_cur.return_value.close.assert_called_once_with()", "def test_build_pipeline_nine(self):\n args = \"Test_APP TASK\".split(\" \")\n task_list = build_pipeline(args, False)\n self.assertEqual(1, len(task_list))", "def test_makeliststep_call_from_within_pipeline():\n config_file = t_path(\n Path('steps') / 'makelist_pipeline.cfg'\n )\n results = MakeListPipeline.call(config_file=config_file)\n assert results == [43.0, 'My hovercraft is full of eels.', False]", "def test_build_pipeline_five(self):\n args = \"Test_APP FOUR A B\".split(\" \")\n task_list = build_pipeline(args, False)\n self.assertEqual(1, len(task_list))", "def test_execute_pipeline_three(self):\n task_list = [Task()]\n with self.assertRaises(NotImplementedError):\n execute_pipeline(task_list)", "def test_execute_pipeline_two(self):\n task_list = [Test()]\n with self.assertRaises(AttributeError):\n execute_pipeline(task_list)", "def test_build_pipeline_one(self):\n args = \"Test_APP ONE TWO THREE\".split(\" \")\n task_list = build_pipeline(args, False)\n self.assertEqual(\"Task One\", task_list[0].execute())\n self.assertEqual(\"Task Two\", task_list[1].execute())\n self.assertEqual(\"Task Three\", task_list[2].execute())\n self.assertEqual(3, len(task_list))", "def test_build_pipeline_two(self):\n args = \"Test_APP ONE TWO ABC\".split(\" \")\n with self.assertRaises(ValueError):\n build_pipeline(args, False)", "def test_is_pipeline(model):\n assert type(model) == Pipeline", "def testAttributes(self):\n pl = Pipeline(loadInitFile=False)\n batch = Batch(pl)\n self.assertIs(pl, batch.pipeline)", "def _setup_pipeline_cfg(self):", "def testLoadConfigs_loadMultipleLab(self):\n config_path = GetTestFilePath('valid/config.yaml')\n pool = lab_config.LabConfigPool(\n lab_config.LocalFileEnumerator(\n os.path.dirname(config_path), lab_config.IsYaml))\n with self.assertRaisesRegex(\n lab_config.ConfigError, r'There are multiple config files.'):\n pool.LoadConfigs()", "def test_to_pipeline_definition(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n pipeline_definition = pipeline._to_pipeline_definition()\n assert pipeline_definition == {\n \"pipeline_version_id\": pipeline.id,\n \"graph\": graph._to_graph_definition(),\n \"steps\": graph._to_steps_definition(),\n }", "def object_is_valid_pipeline(o):\n return (o is not None and\n hasattr(o, 'fit') and\n hasattr(o, 'predict') and\n hasattr(o, 'steps'))", "def object_is_valid_pipeline(o):\n return (o is not None and\n hasattr(o, 'fit') and\n hasattr(o, 'predict') and\n hasattr(o, 'steps'))", "def assert_pipeline_running(self, request):\r\n self.assertTrue(pipeline.running(request))", "def test_Pipeline_initiation(PrePd_data_dir_path):\n #TODO: Make this more than a smoke test\n\n PreP_Data = Import_PrePd_Data(bird_id, date, location=PrePd_data_dir_path)\n\n pipeline = Pipeline(PreP_Data)\n\n # Smoke Tests\n assert isinstance(pipeline.Activity_Log, dict)\n assert isinstance(pipeline.Backup, tuple)\n assert isinstance(pipeline.Status, bool)\n\n # Unit Tests\n assert pipeline.bird_id == PreP_Data.bird_id\n assert pipeline.date == PreP_Data.date\n assert pipeline.Sn_Len == PreP_Data.Sn_Len\n assert pipeline.Gap_Len == PreP_Data.Gap_Len\n assert pipeline.Num_Chan == PreP_Data.Num_Chan\n assert pipeline.Bad_Channels == PreP_Data.Bad_Channels # Debating Hard Passing Bad_Channels\n assert pipeline.Fs == PreP_Data.Fs\n assert np.all(pipeline.Song_Audio[0] == PreP_Data.Song_Audio[0]) # Debating Including Audio\n assert np.all(pipeline.Song_Neural[0] == PreP_Data.Song_Neural[0])\n assert np.all(pipeline.Silence_Audio[0] == PreP_Data.Silence_Audio[0]) # Debating Including Audio\n assert np.all(pipeline.Silence_Neural[0] == PreP_Data.Silence_Neural[0])\n assert pipeline.Num_Motifs == PreP_Data.Num_Motifs\n assert pipeline.Num_Silence == PreP_Data.Num_Silence\n assert np.all(pipeline.Good_Motifs == PreP_Data.Good_Motifs)\n assert np.all(pipeline.Bad_Motifs == PreP_Data.Bad_Motifs)\n assert np.all(pipeline.LS_Drop == PreP_Data.LS_Drop)\n assert np.all(pipeline.Last_Motifs == PreP_Data.Last_Motifs)\n assert np.all(pipeline.First_Motifs == PreP_Data.First_Motifs)\n assert np.all(pipeline.All_First_Motifs == PreP_Data.All_First_Motifs)\n assert np.all(pipeline.All_Last_Motifs == PreP_Data.All_Last_Motifs)\n assert np.all(pipeline.Good_Mid_Motifs == PreP_Data.Good_Mid_Motifs)\n assert np.all(pipeline.Good_Channels == Good_Channel_Index(PreP_Data.Num_Chan, PreP_Data.Bad_Channels))\n assert pipeline.Status\n assert pipeline.Step_Count == 0", "def __init__(\n self,\n pipeline_path: Optional[str] = None,\n pipeline_definition: Optional[Dict] = None,\n validate: bool = False,\n ):\n if not pipeline_path and not pipeline_definition:\n # at least one parameter should be provided\n raise ValueError(\"At least one parameter must be provided ('pipeline_path' or 'pipeline_definition')\")\n if pipeline_path and pipeline_definition:\n # only one parameter should be provided\n raise ValueError(\"Only one parameter should be provided ('pipeline_path' or 'pipeline_definition')\")\n\n if pipeline_path:\n # supporting loading pipeline from file\n if not os.path.exists(pipeline_path):\n raise ValueError(f\"Pipeline file not found: '{pipeline_path}'\\n\")\n\n with open(pipeline_path) as f:\n try:\n self._pipeline_definition = json.load(f)\n except ValueError as ve:\n raise ValueError(f\"Pipeline file is invalid: \\n {ve}\")\n else:\n # supporting passing the pipeline definition directly\n self._pipeline_definition = pipeline_definition\n\n if validate:\n self.validate()\n\n self.propagate_pipeline_default_properties()", "def test_pipeline_basic(mockpipe, testdir):\n test = testdir.makepyfile(TEST_OK)\n result = testdir.inline_run(\n \"-v\",\n f\"--base-pipeline-dir={test.dirname}\",\n test\n )\n passed, skipped, failed = result.listoutcomes()\n\n assert len(passed) == 1\n assert len(skipped) == 0\n assert len(failed) == 0" ]
[ "0.7530245", "0.74972314", "0.7201657", "0.6531304", "0.6396485", "0.62914926", "0.6223942", "0.6215258", "0.6160244", "0.6155715", "0.61122143", "0.60925037", "0.60755783", "0.60367584", "0.6007232", "0.5996004", "0.59956264", "0.5969494", "0.59296113", "0.59294957", "0.5909795", "0.59065515", "0.5835888", "0.5753003", "0.5658025", "0.5658025", "0.5524597", "0.5515274", "0.5514034", "0.54963505" ]
0.7840763
0
Test that a pipeline configuration can be constructed from a RegisteredPipeline object and a valid list of pipeline resources, where resources are not provided for every step.
def test_to_pipeline_configuration_valid_incomplete( resources, make_mock_pipeline_graph, make_mock_registered_model_version, make_mock_registered_model, ) -> None: mocked_rm = make_mock_registered_model(id=123, name="test_rm") with patch.object( verta.pipeline.PipelineStep, "_get_registered_model", return_value=mocked_rm ): graph = make_mock_pipeline_graph() partial_steps = list(graph.steps)[:-1] excluded_step = list(graph.steps)[-1] step_resources = {step.name: resources for step in partial_steps} pipeline = RegisteredPipeline( graph=graph, registered_model_version=make_mock_registered_model_version(), ) pipeline_configuration = pipeline._to_pipeline_configuration( pipeline_resources=step_resources ) assert pipeline_configuration["pipeline_version_id"] == pipeline.id # All steps have been included in the configuration assert len(graph.steps) == len(pipeline_configuration["steps"]) # Compare the steps that have resources, allowing zip to drop the excluded step. for graph_step, config_step in zip(partial_steps, pipeline_configuration["steps"]): # All steps provided are included in the configuration. assert graph_step.name == config_step["name"] # All steps for which resource were provided have resources in the config. assert "resources" in config_step.keys() # The step for which no resources were provided is in the config without resources. assert excluded_step.name == pipeline_configuration["steps"][-1]["name"] assert "resources" not in pipeline_configuration["steps"][-1].keys()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_to_pipeline_configuration_valid_complete(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n step_resources = {step.name: resources for step in graph.steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n\n pipeline_configuration = pipeline._to_pipeline_configuration(\n pipeline_resources=step_resources\n )\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n assert len(graph.steps) == len(pipeline_configuration[\"steps\"])\n for graph_step, config_step in zip(graph.steps, pipeline_configuration[\"steps\"]):\n # All steps provided are included in the configuration.\n assert graph_step.name == config_step[\"name\"]\n # All steps in the config have resources\n assert \"resources\" in config_step.keys()", "def test_to_pipeline_configuration_invalid_resources(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rmv\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n step_resources = {step.name: resources for step in graph.steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n # step name not in pipeline\n step_resources[\"invalid_step_name\"] = resources\n with pytest.raises(ValueError) as err:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err.value) == \"pipeline_resources contains resources for a step not in the \"\n \"pipeline: 'invalid_step_name'\"\n )\n step_resources.pop(\"invalid_step_name\")\n # step name not a string\n step_resources.update({123: resources})\n with pytest.raises(TypeError) as err2:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err2.value) == \"pipeline_resources keys must be type str, not <class 'int'>\"\n )\n step_resources.pop(123)\n # step resource not a Resources object\n step_resources.update({\"step_1\": \"not_resources\"})\n with pytest.raises(TypeError) as err3:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err3.value)\n == \"pipeline_resources values must be type Resources, not <class 'str'>\"\n )", "def test_to_pipeline_configuration_no_resources(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n pipeline_configuration = pipeline._to_pipeline_configuration()\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n for graph_step, config_step in zip(graph.steps, pipeline_configuration[\"steps\"]):\n # All steps are included in the configuration\n assert graph_step.name == config_step[\"name\"]\n # No resources are found in the resulting configuration\n assert \"resources\" not in config_step.keys()", "def test_load_pipeline():\n\n # Given\n pipeline_file_name = f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\"\n\n # When\n subject = utils.load_pipeline(file_name=pipeline_file_name)\n\n # Then\n assert isinstance(subject, sklearn.pipeline.Pipeline)", "def test_build_pipeline_four(self):\n args = \"Test_APP ONE FIVE\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_build_pipeline_three(self):\n args = \"Test_APP ONE FOUR\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_build_pipeline_eight(self):\n args = \"Test_APP ONE SIX A B\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_list_pipeline_add_one(self):\n response = self.client.list_pipelines()\n exsit = False\n for pipeline in response.pipelines:\n if pipeline.pipeline_name == self.pipeline_name:\n exsit = True\n break\n nose.tools.assert_true(exsit)", "def test_from_pipeline_definition(\n make_mock_registered_model_version,\n mocked_responses,\n) -> None:\n rmv = make_mock_registered_model_version()\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/1\",\n json={},\n status=200,\n )\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/2\",\n json={},\n status=200,\n )\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/registered_models/0\",\n json={},\n status=200,\n )\n pipeline = RegisteredPipeline._from_pipeline_definition(\n registered_model_version=rmv,\n )\n assert isinstance(pipeline, RegisteredPipeline)\n assert pipeline.id == rmv.id", "def test_build_pipeline_six(self):\n args = \"Test_APP FIVE A B\".split(\" \")\n task_list = build_pipeline(args, False)\n self.assertEqual(1, len(task_list))", "def test_build_pipeline_nine(self):\n args = \"Test_APP TASK\".split(\" \")\n task_list = build_pipeline(args, False)\n self.assertEqual(1, len(task_list))", "def test_execute_pipeline_two(self):\n task_list = [Test()]\n with self.assertRaises(AttributeError):\n execute_pipeline(task_list)", "def test_build_pipeline_seven(self):\n args = \"Test_APP SIX A B C\".split(\" \")\n task_list = build_pipeline(args, False)\n self.assertEqual(1, len(task_list))", "def test_build_pipeline_two(self):\n args = \"Test_APP ONE TWO ABC\".split(\" \")\n with self.assertRaises(ValueError):\n build_pipeline(args, False)", "def test_execute_pipeline_three(self):\n task_list = [Task()]\n with self.assertRaises(NotImplementedError):\n execute_pipeline(task_list)", "def testAttributes(self):\n pl = Pipeline(loadInitFile=False)\n batch = Batch(pl)\n self.assertIs(pl, batch.pipeline)", "def test_new_deployment_pipeline(self):\n # set up\n new_config_patcher = patch(\n 'factories.new_config',\n return_value=5,\n )\n mock_new_config = new_config_patcher.start()\n\n new_env_patcher = patch('factories.new_env', return_value=9)\n mock_new_env = new_env_patcher.start()\n\n # run SUT passing branch_id: 1, copy_config_id: 6, copy_env_id: None\n pipeline_id = new_deployment_pipeline(1, 6)\n\n # confirm that new config was based on config 6\n mock_new_config.assert_called_once_with(6)\n\n # confirm that new env was not based on anything\n mock_new_env.assert_called_once_with(None)\n\n # confirm reasonable sql was executed to make a pipeline\n self.mock_get_cur.return_value.execute.assert_called_once_with(\n \"INSERT INTO deployment_pipeline \" + \\\n \"(branch_id, config_id, environment_id, automatic) \" + \\\n \"VALUES (%s, %s, %s, %s) \" + \\\n \"RETURNING deployment_pipeline_id\",\n (1, 5, 9, False),\n )\n\n # make sure we closed the cursor\n self.mock_get_cur.return_value.close.assert_called_once_with()", "def test_is_pipeline(model):\n assert type(model) == Pipeline", "def _setup_pipeline_cfg(self):", "def object_is_valid_pipeline(o):\n return (o is not None and\n hasattr(o, 'fit') and\n hasattr(o, 'predict') and\n hasattr(o, 'steps'))", "def object_is_valid_pipeline(o):\n return (o is not None and\n hasattr(o, 'fit') and\n hasattr(o, 'predict') and\n hasattr(o, 'steps'))", "def test_build_pipeline_five(self):\n args = \"Test_APP FOUR A B\".split(\" \")\n task_list = build_pipeline(args, False)\n self.assertEqual(1, len(task_list))", "def test_makeliststep_call_from_within_pipeline():\n config_file = t_path(\n Path('steps') / 'makelist_pipeline.cfg'\n )\n results = MakeListPipeline.call(config_file=config_file)\n assert results == [43.0, 'My hovercraft is full of eels.', False]", "def testLoadConfigs_loadMultipleLab(self):\n config_path = GetTestFilePath('valid/config.yaml')\n pool = lab_config.LabConfigPool(\n lab_config.LocalFileEnumerator(\n os.path.dirname(config_path), lab_config.IsYaml))\n with self.assertRaisesRegex(\n lab_config.ConfigError, r'There are multiple config files.'):\n pool.LoadConfigs()", "def test_build_pipeline_one(self):\n args = \"Test_APP ONE TWO THREE\".split(\" \")\n task_list = build_pipeline(args, False)\n self.assertEqual(\"Task One\", task_list[0].execute())\n self.assertEqual(\"Task Two\", task_list[1].execute())\n self.assertEqual(\"Task Three\", task_list[2].execute())\n self.assertEqual(3, len(task_list))", "def test_to_pipeline_definition(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n pipeline_definition = pipeline._to_pipeline_definition()\n assert pipeline_definition == {\n \"pipeline_version_id\": pipeline.id,\n \"graph\": graph._to_graph_definition(),\n \"steps\": graph._to_steps_definition(),\n }", "def _validate_resources(self):\n resources = self.options.resources\n\n for key in ['num_machines', 'num_mpiprocs_per_machine', 'tot_num_mpiprocs']:\n if key in resources and resources[key] != 1:\n raise exceptions.FeatureNotAvailable(\n f'Cannot set resource `{key}` to value `{resources[key]}` for `{self.__class__.__name__}`: '\n 'parallelization is not supported, only a value of `1` is accepted.'\n )", "def test_resources_exception(self):\n with self.assertRaises(ProcessorConfigError) as context:\n self.pl.resource.remove(\"onto_specs_path\")\n self.pl.resource.remove(\"onto_specs_dict\")\n self.pl.add(\n self._stave_processor,\n config={\"port\": self._port, \"server_thread_daemon\": True},\n )\n self.pl.run(self._dataset_dir)", "def __init__(\n self,\n pipeline_path: Optional[str] = None,\n pipeline_definition: Optional[Dict] = None,\n validate: bool = False,\n ):\n if not pipeline_path and not pipeline_definition:\n # at least one parameter should be provided\n raise ValueError(\"At least one parameter must be provided ('pipeline_path' or 'pipeline_definition')\")\n if pipeline_path and pipeline_definition:\n # only one parameter should be provided\n raise ValueError(\"Only one parameter should be provided ('pipeline_path' or 'pipeline_definition')\")\n\n if pipeline_path:\n # supporting loading pipeline from file\n if not os.path.exists(pipeline_path):\n raise ValueError(f\"Pipeline file not found: '{pipeline_path}'\\n\")\n\n with open(pipeline_path) as f:\n try:\n self._pipeline_definition = json.load(f)\n except ValueError as ve:\n raise ValueError(f\"Pipeline file is invalid: \\n {ve}\")\n else:\n # supporting passing the pipeline definition directly\n self._pipeline_definition = pipeline_definition\n\n if validate:\n self.validate()\n\n self.propagate_pipeline_default_properties()", "def test_required_unknown():\n parser=argparse.ArgumentParser()\n parser.add_argument('--region',\n help='Enter a region like us-east-2.',\n dest=\"region\",\n action=ValidateRegion,\n required=True)\n parser.add_argument('--output',\n help='pretty, json, yaml',\n dest=\"output\",\n action=Validateoutput,\n nargs=\"?\",\n default=\"yaml\"\n )\n parser.add_argument('--filter-types',\n help='eg: AWS::IAM::Role or AWS::EC2::Instance. Using \"ALL\" with no quotes and we will run it for all current supported resource types',\n nargs='+',\n dest=\"types\",\n action=Validatefilter,\n required=True)\n parser.add_argument('--tag_keys',\n help='Allows you to exclude particular AWS Resources based on the presence of a particular tag key on the resource. This will only be applied to AWS Resources that support tagging. Valid values: any string that is a valid tag - multiple values can be supplied.',\n dest=\"tags\")\n \n #This should raise an error since this will cause a SystemExit since bad params were passed in \n args = [\"--region\", \"NADA\",'--output', \"NADA\",'--filter-types',\"NADA\"]\n with pytest.raises(SystemExit):\n parser.parse_args(args)\n \n \n \n \n #This should NOT raise an error since good params were passed into the parser\n args = [\"--region\", \"us-east-1\",'--output', \"yaml\",'--filter-types',\"AWS::EC2::Instance\"] \n with not_raises(SystemExit):\n parser.parse_args(args)" ]
[ "0.77696097", "0.7578576", "0.7403642", "0.6423569", "0.640242", "0.63075686", "0.61619073", "0.6150967", "0.60948867", "0.6091435", "0.60331774", "0.6016926", "0.60129344", "0.5994113", "0.59888446", "0.59842837", "0.5973404", "0.5892259", "0.5888149", "0.5880129", "0.5880129", "0.5862829", "0.58307624", "0.58115256", "0.58088404", "0.56480145", "0.5579959", "0.55504483", "0.55082995", "0.5501474" ]
0.7685353
1
Test that the expected errors are raised when an invalid pipeline resources are provided.
def test_to_pipeline_configuration_invalid_resources( resources, make_mock_pipeline_graph, make_mock_registered_model_version, make_mock_registered_model, ) -> None: mocked_rm = make_mock_registered_model(id=123, name="test_rmv") with patch.object( verta.pipeline.PipelineStep, "_get_registered_model", return_value=mocked_rm ): graph = make_mock_pipeline_graph() step_resources = {step.name: resources for step in graph.steps} pipeline = RegisteredPipeline( graph=graph, registered_model_version=make_mock_registered_model_version(), ) # step name not in pipeline step_resources["invalid_step_name"] = resources with pytest.raises(ValueError) as err: pipeline._to_pipeline_configuration(pipeline_resources=step_resources) assert ( str(err.value) == "pipeline_resources contains resources for a step not in the " "pipeline: 'invalid_step_name'" ) step_resources.pop("invalid_step_name") # step name not a string step_resources.update({123: resources}) with pytest.raises(TypeError) as err2: pipeline._to_pipeline_configuration(pipeline_resources=step_resources) assert ( str(err2.value) == "pipeline_resources keys must be type str, not <class 'int'>" ) step_resources.pop(123) # step resource not a Resources object step_resources.update({"step_1": "not_resources"}) with pytest.raises(TypeError) as err3: pipeline._to_pipeline_configuration(pipeline_resources=step_resources) assert ( str(err3.value) == "pipeline_resources values must be type Resources, not <class 'str'>" )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_creation_fail(self):\n\n # Assert that a RelaxError occurs when the pipe type is invalid.\n self.assertRaises(RelaxError, pipes.create, 'new', 'x')", "def test_execute_pipeline_two(self):\n task_list = [Test()]\n with self.assertRaises(AttributeError):\n execute_pipeline(task_list)", "def test_resources_exception(self):\n with self.assertRaises(ProcessorConfigError) as context:\n self.pl.resource.remove(\"onto_specs_path\")\n self.pl.resource.remove(\"onto_specs_dict\")\n self.pl.add(\n self._stave_processor,\n config={\"port\": self._port, \"server_thread_daemon\": True},\n )\n self.pl.run(self._dataset_dir)", "def test_build_pipeline_two(self):\n args = \"Test_APP ONE TWO ABC\".split(\" \")\n with self.assertRaises(ValueError):\n build_pipeline(args, False)", "def test_with_invalid_input(self):\n for dataset_type in ['ruler', 'pencil', 'cheese']:\n with self.assertRaises(ValueError) as exc:\n check_dataset_type(dataset_type)\n self.assertEqual(\"Dataset type not 'regular' or 'raw' is %s\" % dataset_type,\n str(exc.exception))", "def test_build_pipeline_four(self):\n args = \"Test_APP ONE FIVE\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_test(self):\n\n # The following should do nothing as the pipes exist.\n pipes.check_pipe()\n pipes.check_pipe('orig')\n pipes.check_pipe('empty')\n\n # Assert that a RelaxNoPipeError occurs when the pipe doesn't exist.\n self.assertRaises(RelaxNoPipeError, pipes.check_pipe, 'x')\n\n # Reset relax.\n reset()\n\n # Now none of the following pipes exist, hence errors should be thrown.\n self.assertRaises(RelaxNoPipeError, pipes.check_pipe)\n self.assertRaises(RelaxNoPipeError, pipes.check_pipe, 'orig')\n self.assertRaises(RelaxNoPipeError, pipes.check_pipe, 'empty')", "def test_build_pipeline_three(self):\n args = \"Test_APP ONE FOUR\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_badstageerror_raise(self, mock_isdir):\n # Set the mocked functions returned values\n mock_isdir.side_effect = [True]\n\n # Test execution\n wrong_kwargs = copy.copy(self.kwargs)\n wrong_kwargs[\"reconstruction_stage\"] = \"WRONG\"\n self.assertRaises(ValueError, recon_all, **wrong_kwargs)", "def test_object_provision_command_when_invalid_arguments_provided(mock_client):\n from IllumioCore import object_provision_command\n\n args = {\"security_policy_objects\": \"\"}\n err_msg = (\n \"security_policy_objects is a required parameter. Please provide correct value.\"\n )\n\n with pytest.raises(ValueError) as err:\n object_provision_command(mock_client, args)\n\n assert str(err.value) == err_msg", "def test_build_pipeline_eight(self):\n args = \"Test_APP ONE SIX A B\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_invalid_resource(self, mock_api_handler, mock_set_and_write):\n mock_api_handler.upload_sequencing_run.side_effect = [IridaResourceError(\"\")]\n mock_set_and_write.side_effect = [True]\n\n with self.assertRaises(IridaResourceError):\n upload_helpers.upload_sequencing_run(directory_status='status',\n sequencing_run='run',\n upload_mode='mode')\n\n mock_api_handler.upload_sequencing_run.assert_called_with(directory_status='status',\n sequencing_run='run',\n upload_mode='mode',\n run_id=None)\n mock_set_and_write.assert_called_with(\"status\", DirectoryStatus.ERROR,\n \"Could not access IRIDA resource Errors: ('',)\")", "def test_switch_fail(self):\n\n # Assert that a RelaxNoPipeError occurs when the pipe type is invalid.\n self.assertRaises(RelaxNoPipeError, pipes.switch, 'x')", "def test_pipeline_error(time):\n\n # test fit\n df = _test_df()\n\n def _func(df):\n return df[\"num1\"] == df[\"num3\"]\n\n pipeline = PdPipeline([ColByFrameFunc(\"Equality\", _func), ColDrop(\"B\")])\n with pytest.raises(PipelineApplicationError):\n pipeline.fit(df, verbose=True, time=time)\n\n # test apply\n df = _test_df()\n with pytest.raises(PipelineApplicationError):\n pipeline.apply(df, verbose=True, time=time)\n\n # test transform\n df = _test_df()\n with pytest.raises(PipelineApplicationError):\n pipeline.transform(df, verbose=True, time=time)\n\n # test fit_transform\n df = _test_df()\n with pytest.raises(PipelineApplicationError):\n pipeline.fit_transform(df, verbose=True, time=time)", "def test_enforcement_boundary_create_command_when_invalid_arguments_provided(\n err_msg, args, err_type, mock_client\n):\n with pytest.raises(err_type) as err:\n enforcement_boundary_create_command(mock_client, args)\n assert str(err.value) == err_msg", "def test_bad_number_of_files():\n with pytest.raises(Exception):\n process_files(['resources/simple_data.json', 'resources/simple_data.json', 'resources/simple_data.json'])\n with pytest.raises(Exception):\n process_files([])", "def test_workloads_list_command_when_invalid_arguments_provided(\n err_msg, args, err_type, mock_client\n):\n with pytest.raises(err_type) as err:\n workloads_list_command(mock_client, args)\n assert str(err.value) == err_msg", "def test_bad_input_data(tool):\n\n for cmd in (\"filter\", \"report\", \"start\", \"stats\"):\n for args in tool.bad_paths:\n if cmd == \"filter\":\n args = f\"--rfilt 'index!=0' {args}\"\n with pytest.raises(Exceptions.Error):\n tool.command(cmd, args)", "def test_base_validate_models():\n with pytest.raises(ValueError):\n base_validate_models([])\n\n assert [\"m\"] == base_validate_models([\"m\"])", "def test_execute_pipeline_three(self):\n task_list = [Task()]\n with self.assertRaises(NotImplementedError):\n execute_pipeline(task_list)", "def test_bad_structures(bad_structures, mapper):\n for index, structure in enumerate(bad_structures):\n # This is for helping devs finding any errors that may occur\n print(f\"Trying structure number {index} from 'test_bad_structures.json'\")\n with pytest.raises(ValidationError):\n StructureResource(**mapper(MAPPER).map_back(structure))", "def test_errors_on_bad_argument(self):\n self.assertRaises(Exception, Scope, 'foo')\n self.assertRaises(Exception, Scope, 1)\n self.assertRaises(Exception, Scope, [])\n self.assertRaises(Exception, Scope, tuple())", "def test_resource_err(self, integrationtest, k8sconfig):\n # Fixtures.\n config = self.k8sconfig(integrationtest, k8sconfig)\n err_resp = (K8sResource(\"\", \"\", \"\", False, \"\"), True)\n MM = MetaManifest\n\n # Sanity check: ask for a valid StatefulSet.\n _, err = k8s.resource(config, MM(\"apps/v1\", \"StatefulSet\", \"ns\", \"name\"))\n assert not err\n\n # Ask for a StatefulSet on a bogus API endpoint.\n assert k8s.resource(config, MM(\"bogus\", \"StatefulSet\", \"ns\", \"name\")) == err_resp\n\n # Ask for a bogus K8s kind.\n assert k8s.resource(config, MM(\"v1\", \"Bogus\", \"ns\", \"name\")) == err_resp\n assert k8s.resource(config, MM(\"\", \"Bogus\", \"ns\", \"name\")) == err_resp", "def test_does_not_validate_invalid_files(self):\n bad_files = (\n 'newstest2019-defr-src-ts.de.sgm',\n 'newstest2019-defr-src-ts.de.xml',\n )\n for bad_file in bad_files:\n bad_path = join(getcwd(), 'testdata', bad_file)\n with self.assertRaises(ValueError):\n _ = valitest.ValidatableTestSet(bad_path)", "def test_deletion_fail(self):\n\n # Assert that a RelaxNoPipeError occurs when the data pipe does not exist.\n self.assertRaises(RelaxNoPipeError, pipes.delete, 'x')", "def test_invalid_validation(self, mock_api_handler, mock_set_and_write):\n stub_validation_result = self.StubValidationResult()\n stub_validation_result.valid = False\n mock_api_handler.prepare_and_validate_for_upload.side_effect = [stub_validation_result]\n mock_set_and_write.side_effect = [True]\n\n with self.assertRaises(Exception):\n upload_helpers.irida_prep_and_validation(\"seqrun\", \"\")\n\n mock_api_handler.prepare_and_validate_for_upload.assert_called_with(\"seqrun\")\n mock_set_and_write.assert_called_with(\"\", DirectoryStatus.ERROR,\n 'Sequencing run can not be uploaded, Errors: []')", "def test_attach_builds_verifies_valid_state(self, echo):\n advisory = errata.Advisory(errata_id=123)\n with self.assertRaises(ValueError) as context:\n advisory.attach_builds(['build-1-123'], 'unkown_build_type')\n self.assertTrue(\"should be one of 'rpm' or 'image'\" in context.exception.__str__())", "def test_valid_file_raises():\n with pytest.raises(ValueError):\n cli._valid_file(__file__)", "def test_pauli_error_raise_invalid(self):\n self.assertRaises(NoiseError, lambda: pauli_error([('S', 1)]))", "def test_companies_fail(self):\n with self.assertRaises(AssertionError):\n self.resource.companies(-1)" ]
[ "0.69556063", "0.6845119", "0.6751194", "0.67011756", "0.66657954", "0.6635373", "0.6581409", "0.6536569", "0.6497356", "0.6491855", "0.6488647", "0.64853007", "0.64841676", "0.64522225", "0.6404109", "0.6401285", "0.6376981", "0.6353701", "0.6349466", "0.6321207", "0.6282897", "0.6273555", "0.62523675", "0.624802", "0.6247877", "0.6236497", "0.62206125", "0.62129164", "0.62120867", "0.6211584" ]
0.7442444
0
Test that a pipeline configuration can be constructed from a RegisteredPipeline object without providing any pipeline resources.
def test_to_pipeline_configuration_no_resources( make_mock_pipeline_graph, make_mock_registered_model_version, make_mock_registered_model, ) -> None: mocked_rm = make_mock_registered_model(id=123, name="test_rm") with patch.object( verta.pipeline.PipelineStep, "_get_registered_model", return_value=mocked_rm ): graph = make_mock_pipeline_graph() pipeline = RegisteredPipeline( graph=graph, registered_model_version=make_mock_registered_model_version(), ) pipeline_configuration = pipeline._to_pipeline_configuration() assert pipeline_configuration["pipeline_version_id"] == pipeline.id for graph_step, config_step in zip(graph.steps, pipeline_configuration["steps"]): # All steps are included in the configuration assert graph_step.name == config_step["name"] # No resources are found in the resulting configuration assert "resources" not in config_step.keys()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_to_pipeline_configuration_valid_incomplete(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n partial_steps = list(graph.steps)[:-1]\n excluded_step = list(graph.steps)[-1]\n step_resources = {step.name: resources for step in partial_steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n\n pipeline_configuration = pipeline._to_pipeline_configuration(\n pipeline_resources=step_resources\n )\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n # All steps have been included in the configuration\n assert len(graph.steps) == len(pipeline_configuration[\"steps\"])\n # Compare the steps that have resources, allowing zip to drop the excluded step.\n for graph_step, config_step in zip(partial_steps, pipeline_configuration[\"steps\"]):\n # All steps provided are included in the configuration.\n assert graph_step.name == config_step[\"name\"]\n # All steps for which resource were provided have resources in the config.\n assert \"resources\" in config_step.keys()\n # The step for which no resources were provided is in the config without resources.\n assert excluded_step.name == pipeline_configuration[\"steps\"][-1][\"name\"]\n assert \"resources\" not in pipeline_configuration[\"steps\"][-1].keys()", "def test_load_pipeline():\n\n # Given\n pipeline_file_name = f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\"\n\n # When\n subject = utils.load_pipeline(file_name=pipeline_file_name)\n\n # Then\n assert isinstance(subject, sklearn.pipeline.Pipeline)", "def test_to_pipeline_configuration_valid_complete(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n step_resources = {step.name: resources for step in graph.steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n\n pipeline_configuration = pipeline._to_pipeline_configuration(\n pipeline_resources=step_resources\n )\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n assert len(graph.steps) == len(pipeline_configuration[\"steps\"])\n for graph_step, config_step in zip(graph.steps, pipeline_configuration[\"steps\"]):\n # All steps provided are included in the configuration.\n assert graph_step.name == config_step[\"name\"]\n # All steps in the config have resources\n assert \"resources\" in config_step.keys()", "def test_to_pipeline_configuration_invalid_resources(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rmv\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n step_resources = {step.name: resources for step in graph.steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n # step name not in pipeline\n step_resources[\"invalid_step_name\"] = resources\n with pytest.raises(ValueError) as err:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err.value) == \"pipeline_resources contains resources for a step not in the \"\n \"pipeline: 'invalid_step_name'\"\n )\n step_resources.pop(\"invalid_step_name\")\n # step name not a string\n step_resources.update({123: resources})\n with pytest.raises(TypeError) as err2:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err2.value) == \"pipeline_resources keys must be type str, not <class 'int'>\"\n )\n step_resources.pop(123)\n # step resource not a Resources object\n step_resources.update({\"step_1\": \"not_resources\"})\n with pytest.raises(TypeError) as err3:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err3.value)\n == \"pipeline_resources values must be type Resources, not <class 'str'>\"\n )", "def _setup_pipeline_cfg(self):", "def test_build_pipeline_four(self):\n args = \"Test_APP ONE FIVE\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def __init__(self, pipeline, config=None):\n self.config = config\n self.pipeline = pipeline", "def test_is_pipeline(model):\n assert type(model) == Pipeline", "def testAttributes(self):\n pl = Pipeline(loadInitFile=False)\n batch = Batch(pl)\n self.assertIs(pl, batch.pipeline)", "def test_to_pipeline_definition(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n pipeline_definition = pipeline._to_pipeline_definition()\n assert pipeline_definition == {\n \"pipeline_version_id\": pipeline.id,\n \"graph\": graph._to_graph_definition(),\n \"steps\": graph._to_steps_definition(),\n }", "def test_build_pipeline_three(self):\n args = \"Test_APP ONE FOUR\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_new_deployment_pipeline(self):\n # set up\n new_config_patcher = patch(\n 'factories.new_config',\n return_value=5,\n )\n mock_new_config = new_config_patcher.start()\n\n new_env_patcher = patch('factories.new_env', return_value=9)\n mock_new_env = new_env_patcher.start()\n\n # run SUT passing branch_id: 1, copy_config_id: 6, copy_env_id: None\n pipeline_id = new_deployment_pipeline(1, 6)\n\n # confirm that new config was based on config 6\n mock_new_config.assert_called_once_with(6)\n\n # confirm that new env was not based on anything\n mock_new_env.assert_called_once_with(None)\n\n # confirm reasonable sql was executed to make a pipeline\n self.mock_get_cur.return_value.execute.assert_called_once_with(\n \"INSERT INTO deployment_pipeline \" + \\\n \"(branch_id, config_id, environment_id, automatic) \" + \\\n \"VALUES (%s, %s, %s, %s) \" + \\\n \"RETURNING deployment_pipeline_id\",\n (1, 5, 9, False),\n )\n\n # make sure we closed the cursor\n self.mock_get_cur.return_value.close.assert_called_once_with()", "def _init_pipeline(self, cfg: ConfigType) -> Callable:", "def test_build_pipeline_eight(self):\n args = \"Test_APP ONE SIX A B\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_from_pipeline_definition(\n make_mock_registered_model_version,\n mocked_responses,\n) -> None:\n rmv = make_mock_registered_model_version()\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/1\",\n json={},\n status=200,\n )\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/2\",\n json={},\n status=200,\n )\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/registered_models/0\",\n json={},\n status=200,\n )\n pipeline = RegisteredPipeline._from_pipeline_definition(\n registered_model_version=rmv,\n )\n assert isinstance(pipeline, RegisteredPipeline)\n assert pipeline.id == rmv.id", "def test_constructor_missing_config():\n with pytest.raises(TypeError):\n Unpacker()", "def ignore_test_handles_pipeline_with_non_existing_component(self):\n test_config = \"tests/data/test_config/test_config.json\"\n config = AnnotatorConfig(test_config)\n config['pipeline'].append(\"unknown_component\")\n\n tmp_path = create_tmp_test_jsonfile(\"tmp.json\")\n train_data = load_local_data(tmp_path)\n rm_tmp_file(\"tmp.json\")\n\n with pytest.raises(Exception) as execinfo:\n trainer = Trainer(config)\n trainer.train(train_data)\n assert \"Failed to find component\" in str(execinfo.value)", "def test_make_pipeline(self):\n\n umap = UMAPVisualizer() # Should not cause an exception.\n assert umap.transformer_ is not None\n\n assert len(umap.transformer_.steps) == 1", "def test_Pipeline_initiation(PrePd_data_dir_path):\n #TODO: Make this more than a smoke test\n\n PreP_Data = Import_PrePd_Data(bird_id, date, location=PrePd_data_dir_path)\n\n pipeline = Pipeline(PreP_Data)\n\n # Smoke Tests\n assert isinstance(pipeline.Activity_Log, dict)\n assert isinstance(pipeline.Backup, tuple)\n assert isinstance(pipeline.Status, bool)\n\n # Unit Tests\n assert pipeline.bird_id == PreP_Data.bird_id\n assert pipeline.date == PreP_Data.date\n assert pipeline.Sn_Len == PreP_Data.Sn_Len\n assert pipeline.Gap_Len == PreP_Data.Gap_Len\n assert pipeline.Num_Chan == PreP_Data.Num_Chan\n assert pipeline.Bad_Channels == PreP_Data.Bad_Channels # Debating Hard Passing Bad_Channels\n assert pipeline.Fs == PreP_Data.Fs\n assert np.all(pipeline.Song_Audio[0] == PreP_Data.Song_Audio[0]) # Debating Including Audio\n assert np.all(pipeline.Song_Neural[0] == PreP_Data.Song_Neural[0])\n assert np.all(pipeline.Silence_Audio[0] == PreP_Data.Silence_Audio[0]) # Debating Including Audio\n assert np.all(pipeline.Silence_Neural[0] == PreP_Data.Silence_Neural[0])\n assert pipeline.Num_Motifs == PreP_Data.Num_Motifs\n assert pipeline.Num_Silence == PreP_Data.Num_Silence\n assert np.all(pipeline.Good_Motifs == PreP_Data.Good_Motifs)\n assert np.all(pipeline.Bad_Motifs == PreP_Data.Bad_Motifs)\n assert np.all(pipeline.LS_Drop == PreP_Data.LS_Drop)\n assert np.all(pipeline.Last_Motifs == PreP_Data.Last_Motifs)\n assert np.all(pipeline.First_Motifs == PreP_Data.First_Motifs)\n assert np.all(pipeline.All_First_Motifs == PreP_Data.All_First_Motifs)\n assert np.all(pipeline.All_Last_Motifs == PreP_Data.All_Last_Motifs)\n assert np.all(pipeline.Good_Mid_Motifs == PreP_Data.Good_Mid_Motifs)\n assert np.all(pipeline.Good_Channels == Good_Channel_Index(PreP_Data.Num_Chan, PreP_Data.Bad_Channels))\n assert pipeline.Status\n assert pipeline.Step_Count == 0", "def __init__(self,\n config: Optional[pipeline_config.PipelineConfig] = None):\n if config is None:\n config = pipeline_config.PipelineConfig(\n supported_launcher_classes=[\n in_process_component_launcher.InProcessComponentLauncher,\n docker_component_launcher.DockerComponentLauncher,\n ],\n )\n super().__init__(config)", "def __init__(\n self,\n pipeline_path: Optional[str] = None,\n pipeline_definition: Optional[Dict] = None,\n validate: bool = False,\n ):\n if not pipeline_path and not pipeline_definition:\n # at least one parameter should be provided\n raise ValueError(\"At least one parameter must be provided ('pipeline_path' or 'pipeline_definition')\")\n if pipeline_path and pipeline_definition:\n # only one parameter should be provided\n raise ValueError(\"Only one parameter should be provided ('pipeline_path' or 'pipeline_definition')\")\n\n if pipeline_path:\n # supporting loading pipeline from file\n if not os.path.exists(pipeline_path):\n raise ValueError(f\"Pipeline file not found: '{pipeline_path}'\\n\")\n\n with open(pipeline_path) as f:\n try:\n self._pipeline_definition = json.load(f)\n except ValueError as ve:\n raise ValueError(f\"Pipeline file is invalid: \\n {ve}\")\n else:\n # supporting passing the pipeline definition directly\n self._pipeline_definition = pipeline_definition\n\n if validate:\n self.validate()\n\n self.propagate_pipeline_default_properties()", "def test_get_pipeline_returns_none_if_non_existent(tmp_path: str) -> None:\n Repo.init(tmp_path)\n repo = Repository(str(tmp_path))\n repo.set_active_stack(\"local_stack\")\n our_pipeline = repo.get_pipeline(\"not_a_pipeline\")\n assert our_pipeline is None", "def test_list_pipeline_add_one(self):\n response = self.client.list_pipelines()\n exsit = False\n for pipeline in response.pipelines:\n if pipeline.pipeline_name == self.pipeline_name:\n exsit = True\n break\n nose.tools.assert_true(exsit)", "def test_build_pipeline_two(self):\n args = \"Test_APP ONE TWO ABC\".split(\" \")\n with self.assertRaises(ValueError):\n build_pipeline(args, False)", "def object_is_valid_pipeline(o):\n return (o is not None and\n hasattr(o, 'fit') and\n hasattr(o, 'predict') and\n hasattr(o, 'steps'))", "def object_is_valid_pipeline(o):\n return (o is not None and\n hasattr(o, 'fit') and\n hasattr(o, 'predict') and\n hasattr(o, 'steps'))", "def test_get_pipeline_definition_artifact(\n make_mock_registered_model_version,\n make_mock_simple_pipeline_definition,\n) -> None:\n rmv = make_mock_registered_model_version()\n pipeline_definition = RegisteredPipeline._get_pipeline_definition_artifact(\n registered_model_version=rmv,\n )\n assert pipeline_definition == make_mock_simple_pipeline_definition(id=rmv.id)", "def test_build_pipeline_six(self):\n args = \"Test_APP FIVE A B\".split(\" \")\n task_list = build_pipeline(args, False)\n self.assertEqual(1, len(task_list))", "def test_execute_pipeline_two(self):\n task_list = [Test()]\n with self.assertRaises(AttributeError):\n execute_pipeline(task_list)", "def test_pipeline_as_nonclass_fixture(mockpipe, testdir):\n test = testdir.makepyfile(TEST_AS_NONCLASS_FIXTURE)\n result = testdir.inline_run(\n \"-v\",\n f\"--base-pipeline-dir={test.dirname}\",\n test\n )\n passed, skipped, failed = result.listoutcomes()\n\n assert len(passed) == 1\n assert len(skipped) == 0\n assert len(failed) == 0" ]
[ "0.702158", "0.6975604", "0.6957339", "0.69055235", "0.6709925", "0.6463869", "0.6353609", "0.63210964", "0.62990576", "0.6240508", "0.6217712", "0.621394", "0.6183469", "0.6107562", "0.60923505", "0.6073859", "0.6047508", "0.60444486", "0.59288347", "0.5884892", "0.5882104", "0.58722246", "0.5857961", "0.58548534", "0.5793977", "0.5793977", "0.5717249", "0.57064134", "0.5693238", "0.5684898" ]
0.76080483
0
Test that a RegisteredPipeline object can be constructed from a pipeline definition. The model version's `_get_artifact` function is overidden in the mocked RMV fixture to return a simple, consistent pipeline definition. Calls related to the fetching of the RMV and RM are mocked.
def test_from_pipeline_definition( make_mock_registered_model_version, mocked_responses, ) -> None: rmv = make_mock_registered_model_version() mocked_responses.get( f"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/1", json={}, status=200, ) mocked_responses.get( f"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/2", json={}, status=200, ) mocked_responses.get( f"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/registered_models/0", json={}, status=200, ) pipeline = RegisteredPipeline._from_pipeline_definition( registered_model_version=rmv, ) assert isinstance(pipeline, RegisteredPipeline) assert pipeline.id == rmv.id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_pipeline_definition_artifact(\n make_mock_registered_model_version,\n make_mock_simple_pipeline_definition,\n) -> None:\n rmv = make_mock_registered_model_version()\n pipeline_definition = RegisteredPipeline._get_pipeline_definition_artifact(\n registered_model_version=rmv,\n )\n assert pipeline_definition == make_mock_simple_pipeline_definition(id=rmv.id)", "def test_to_pipeline_definition(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n pipeline_definition = pipeline._to_pipeline_definition()\n assert pipeline_definition == {\n \"pipeline_version_id\": pipeline.id,\n \"graph\": graph._to_graph_definition(),\n \"steps\": graph._to_steps_definition(),\n }", "def test_log_pipeline_definition_artifact(\n model_version_name,\n mocked_responses,\n make_mock_pipeline_graph,\n make_mock_registered_model,\n make_mock_registered_model_version,\n) -> None:\n rm = make_mock_registered_model(id=123, name=\"test_rm\")\n rmv = make_mock_registered_model_version()\n # Fetch the registered model version\n mocked_responses.get(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/{rmv.id}\",\n json={\n \"model_version\": {\n \"id\": rmv.id,\n \"registered_model_id\": rmv.registered_model_id,\n \"version\": model_version_name,\n }\n },\n status=200,\n )\n mocked_responses.put(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/registered_models/{rmv.registered_model_id}/model_versions/{rmv.id}\",\n json={},\n status=200,\n )\n # Fetch the artifact upload URL\n mocked_responses.post(\n f\"{rmv._conn.scheme}://{rmv._conn.socket}/api/v1/registry/model_versions/{rmv.id}/getUrlForArtifact\",\n json={\n \"url\": f\"https://account.s3.amazonaws.com/development/ModelVersionEntity/\"\n f\"{rmv.id}/pipeline.json\"\n },\n status=200,\n )\n # Upload the artifact\n mocked_responses.put(\n f\"https://account.s3.amazonaws.com/development/ModelVersionEntity/{rmv.id}/pipeline.json\",\n json={},\n status=200,\n )\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=rm\n ):\n pipeline = RegisteredPipeline(\n graph=make_mock_pipeline_graph(),\n registered_model_version=rmv,\n )\n pipeline._log_pipeline_definition_artifact()", "def test_load_pipeline():\n\n # Given\n pipeline_file_name = f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\"\n\n # When\n subject = utils.load_pipeline(file_name=pipeline_file_name)\n\n # Then\n assert isinstance(subject, sklearn.pipeline.Pipeline)", "def test_new_deployment_pipeline(self):\n # set up\n new_config_patcher = patch(\n 'factories.new_config',\n return_value=5,\n )\n mock_new_config = new_config_patcher.start()\n\n new_env_patcher = patch('factories.new_env', return_value=9)\n mock_new_env = new_env_patcher.start()\n\n # run SUT passing branch_id: 1, copy_config_id: 6, copy_env_id: None\n pipeline_id = new_deployment_pipeline(1, 6)\n\n # confirm that new config was based on config 6\n mock_new_config.assert_called_once_with(6)\n\n # confirm that new env was not based on anything\n mock_new_env.assert_called_once_with(None)\n\n # confirm reasonable sql was executed to make a pipeline\n self.mock_get_cur.return_value.execute.assert_called_once_with(\n \"INSERT INTO deployment_pipeline \" + \\\n \"(branch_id, config_id, environment_id, automatic) \" + \\\n \"VALUES (%s, %s, %s, %s) \" + \\\n \"RETURNING deployment_pipeline_id\",\n (1, 5, 9, False),\n )\n\n # make sure we closed the cursor\n self.mock_get_cur.return_value.close.assert_called_once_with()", "def test_to_pipeline_configuration_no_resources(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n pipeline_configuration = pipeline._to_pipeline_configuration()\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n for graph_step, config_step in zip(graph.steps, pipeline_configuration[\"steps\"]):\n # All steps are included in the configuration\n assert graph_step.name == config_step[\"name\"]\n # No resources are found in the resulting configuration\n assert \"resources\" not in config_step.keys()", "def test_to_pipeline_configuration_invalid_resources(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rmv\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n step_resources = {step.name: resources for step in graph.steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n # step name not in pipeline\n step_resources[\"invalid_step_name\"] = resources\n with pytest.raises(ValueError) as err:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err.value) == \"pipeline_resources contains resources for a step not in the \"\n \"pipeline: 'invalid_step_name'\"\n )\n step_resources.pop(\"invalid_step_name\")\n # step name not a string\n step_resources.update({123: resources})\n with pytest.raises(TypeError) as err2:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err2.value) == \"pipeline_resources keys must be type str, not <class 'int'>\"\n )\n step_resources.pop(123)\n # step resource not a Resources object\n step_resources.update({\"step_1\": \"not_resources\"})\n with pytest.raises(TypeError) as err3:\n pipeline._to_pipeline_configuration(pipeline_resources=step_resources)\n assert (\n str(err3.value)\n == \"pipeline_resources values must be type Resources, not <class 'str'>\"\n )", "def test_to_pipeline_configuration_valid_complete(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n step_resources = {step.name: resources for step in graph.steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n\n pipeline_configuration = pipeline._to_pipeline_configuration(\n pipeline_resources=step_resources\n )\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n assert len(graph.steps) == len(pipeline_configuration[\"steps\"])\n for graph_step, config_step in zip(graph.steps, pipeline_configuration[\"steps\"]):\n # All steps provided are included in the configuration.\n assert graph_step.name == config_step[\"name\"]\n # All steps in the config have resources\n assert \"resources\" in config_step.keys()", "def test_is_pipeline(model):\n assert type(model) == Pipeline", "def test_save_pipeline():\n\n # Given\n try:\n pipeline_for_test = joblib.load(\n core.TRAINED_MODEL_DIR\n / f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\"\n )\n subject_file_name = (\n f\"{core.config.app_config.MODEL_PIPELINE_NAME}{_version}.pkl\"\n )\n except:\n subject_file_name = f\"fake_pipe_line_model_v{_version}.pkl\"\n\n # When\n utils.save_pipeline(pipeline_for_test, subject_file_name)\n\n # Then\n # Get the files in the model save's directory\n trained_model_dir_file_list = [\n file.name for file in core.TRAINED_MODEL_DIR.iterdir()\n ]\n\n # Check if the pipeline was saved in TRAINED_MODEL_DIR and with the right filename\n assert subject_file_name in trained_model_dir_file_list\n # Check if the __init__.py file is in the TRAINED_MODEL_DIR\n assert \"__init__.py\" in trained_model_dir_file_list\n # Check if the TRAINED_MODEL_DIR folder contains just the new saved pipeline and the __init__.py file\n assert len(trained_model_dir_file_list) == 2\n # remove the fake pipeline\n if subject_file_name == f\"fake_pipe_line_model_v{_version}.pkl\":\n core.TRAINED_MODEL_DIR / subject_file_name.unlink()", "def test_build_pipeline_four(self):\n args = \"Test_APP ONE FIVE\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_copy_graph(\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n copied_graph = pipeline.copy_graph()\n # convert from sets to lists and sort for side-by-side comparison\n graph_steps_sorted = sorted(graph.steps, key=lambda x: x.name)\n copied_graph_steps_sorted = sorted(copied_graph.steps, key=lambda x: x.name)\n\n for orig_step, copied_step in zip(graph_steps_sorted, copied_graph_steps_sorted):\n assert orig_step is not copied_step\n assert orig_step.name == copied_step.name\n assert orig_step.predecessors == copied_step.predecessors\n assert (\n orig_step.registered_model_version.id\n == copied_step.registered_model_version.id\n )\n assert copied_graph is not graph", "def __init__(self, pipeline=PIPELINE, name=\"fake_estimator\"):\n super().__init__(pipeline=pipeline, name=name)", "def create_pipeline(\n pipeline_name: Text,\n pipeline_root: Text,\n data_root: Text,\n module_file: Text,\n metadata_path: Text,\n beam_pipeline_args: List[Text],\n) -> tfx.dsl.Pipeline:\n example_gen = tfx.components.CsvExampleGen(input_base=data_root)\n\n # Computes statistics over data for visualization and example validation.\n statistics_gen = tfx.components.StatisticsGen(\n examples=example_gen.outputs['examples'])\n\n # Generates schema based on statistics files.\n schema_gen = tfx.components.SchemaGen(\n statistics=statistics_gen.outputs['statistics'],\n infer_feature_shape=True)\n\n # Performs anomaly detection based on statistics and data schema.\n example_validator = tfx.components.ExampleValidator(\n statistics=statistics_gen.outputs['statistics'],\n schema=schema_gen.outputs['schema'],\n )\n\n trainer_custom_config = {\n 'objective': 'reg:squarederror',\n 'learning_rate': 0.3,\n 'max_depth': 4,\n 'num_boost_round': 200,\n 'early_stopping_rounds': 40,\n }\n\n trainer = tfx.components.Trainer(\n module_file=module_file,\n examples=example_gen.outputs['examples'],\n schema=schema_gen.outputs['schema'],\n train_args=tfx.proto.TrainArgs(),\n eval_args=tfx.proto.EvalArgs(),\n custom_config=trainer_custom_config,\n )\n\n return tfx.dsl.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=pipeline_root,\n components=[\n example_gen,\n statistics_gen,\n schema_gen,\n example_validator,\n trainer,\n ],\n enable_cache=True,\n metadata_connection_config=tfx.orchestration.metadata.\n sqlite_metadata_connection_config(metadata_path),\n beam_pipeline_args=beam_pipeline_args,\n )", "def create_pipeline(pipeline_name: Text, \n pipeline_root: Text, \n dataset_name: Text,\n train_steps: data_types.RuntimeParameter,\n eval_steps: data_types.RuntimeParameter,\n accuracy_threshold: data_types.RuntimeParameter,\n ai_platform_training_args: Dict[Text, Text],\n ai_platform_serving_args: Dict[Text, Text],\n beam_pipeline_args: List[Text],\n model_regisrty_uri: Text,\n enable_cache: Optional[bool] = False) -> pipeline.Pipeline:\n\n # Dataset, table and/or 'where conditions' can be passed as pipeline args.\n query=sql_utils.generate_source_query(dataset_name=dataset_name)\n \n # Brings data into the pipeline from BigQuery.\n example_gen = tfx.components.BigQueryExampleGen(\n query=query\n )\n\n # Computes statistics over data for visualization and example validation.\n statistics_gen = tfx.components.StatisticsGen(\n input_data=example_gen.outputs.examples)\n\n # Import schema from local directory.\n schema_importer = ImporterNode(\n instance_name='RawSchemaImporter',\n source_uri=RAW_SCHEMA_DIR,\n artifact_type=Schema,\n )\n\n # Performs anomaly detection based on statistics and data schema.\n validate_stats = tfx.components.ExampleValidator(\n stats=statistics_gen.outputs.output, \n schema=schema_importer.outputs.result\n )\n\n # Performs transformations and feature engineering in training and serving.\n transform = tfx.components.Transform(\n input_data=example_gen.outputs.examples,\n schema=schema_importer.outputs.result,\n module_file=TRANSFORM_MODULE_FILE\n )\n\n\n # Get the latest blessed model for model validation.\n latest_model_resolver = tfx.components.ResolverNode(\n instance_name='latest_blessed_model_resolver',\n resolver_class=latest_blessed_model_resolver.LatestBlessedModelResolver,\n model=Channel(type=Model),\n model_blessing=Channel(type=ModelBlessing)\n )\n \n # Train and save model for evaluation and serving.\n trainer = tfx.components.Trainer(\n# custom_executor_spec=executor_spec.ExecutorClassSpec(\n# ai_platform_trainer_executor.GenericExecutor),\n custom_executor_spec=executor_spec.ExecutorClassSpec(\n trainer_executor.GenericExecutor),\n module_file=TRAIN_MODULE_FILE,\n transformed_examples=transform.outputs.transformed_examples,\n schema=schema_importer.outputs.result,\n transform_output=transform.outputs.transform_output,\n base_model=latest_model_resolver.outputs.model,\n train_args={'num_steps': train_steps},\n eval_args={'num_steps': eval_steps},\n custom_config={'ai_platform_training_args': ai_platform_training_args}\n )\n\n\n # Uses TFMA to compute a evaluation statistics over features of a model.\n model_evaluator = tfx.components.Evaluator(\n examples=example_gen.outputs.examples,\n model=trainer.outputs.model,\n baseline_model=latest_model_resolver.outputs.model,\n eval_config=helper.get_eval_config()\n )\n \n # Use a custom AccuracyModelValidator component to validate the model.\n model_validator = AccuracyModelValidator(\n eval_results=model_evaluator.outputs.output,\n model=trainer.outputs.model,\n accuracy_threshold=accuracy_threshold,\n slice_accuracy_tolerance=0.15,\n )\n\n# # Checks whether the model passed the validation steps and pushes the model\n# # to its destination if check passed.\n# pusher = tfx.components.Pusher(\n# custom_executor_spec=executor_spec.ExecutorClassSpec(\n# ai_platform_pusher_executor.Executor),\n# model_export=trainer.outputs.output,\n# model_blessing=model_evaluator.outputs.blessing,\n# #model_blessing=model_validator.outputs.blessing,\n# custom_config={'ai_platform_serving_args': ai_platform_serving_args}\n# )\n \n register = tfx.components.Pusher(\n model=trainer.outputs.model,\n model_blessing=model_validator.outputs.blessing,\n #model_blessing=model_evaluator.outputs.blessing,\n push_destination=tfx.proto.pusher_pb2.PushDestination(\n filesystem=tfx.proto.pusher_pb2.PushDestination.Filesystem(\n base_directory=os.path.join(model_regisrty_uri, pipeline_name)))\n )\n \n return pipeline.Pipeline(\n pipeline_name=pipeline_name,\n pipeline_root=pipeline_root,\n components=[\n example_gen, \n statistics_gen, \n schema_importer, \n validate_stats,\n latest_model_resolver,\n transform,\n trainer, \n model_evaluator, \n model_validator, \n #pusher\n register\n ],\n enable_cache=enable_cache,\n beam_pipeline_args=beam_pipeline_args)", "def test_pipeline(self):\n loss = NSSALoss\n loss_kwargs = {\"margin\": 1.0, \"adversarial_temperature\": 1.0}\n pipeline_results = pipeline(\n model=\"RotatE\",\n dataset=\"nations\",\n loss=loss,\n loss_kwargs=loss_kwargs,\n training_kwargs=dict(use_tqdm=False),\n )\n self.assertIsInstance(pipeline_results, PipelineResult)\n self.assertIsInstance(pipeline_results.model.loss, loss)\n self.assertEqual(pipeline_results.model.loss.margin, 1.0)\n self.assertEqual(pipeline_results.model.loss.inverse_softmax_temperature, 1.0)", "def test_make_pipeline(self):\n\n umap = UMAPVisualizer() # Should not cause an exception.\n assert umap.transformer_ is not None\n\n assert len(umap.transformer_.steps) == 1", "def test_to_pipeline_configuration_valid_incomplete(\n resources,\n make_mock_pipeline_graph,\n make_mock_registered_model_version,\n make_mock_registered_model,\n) -> None:\n mocked_rm = make_mock_registered_model(id=123, name=\"test_rm\")\n with patch.object(\n verta.pipeline.PipelineStep, \"_get_registered_model\", return_value=mocked_rm\n ):\n graph = make_mock_pipeline_graph()\n partial_steps = list(graph.steps)[:-1]\n excluded_step = list(graph.steps)[-1]\n step_resources = {step.name: resources for step in partial_steps}\n pipeline = RegisteredPipeline(\n graph=graph,\n registered_model_version=make_mock_registered_model_version(),\n )\n\n pipeline_configuration = pipeline._to_pipeline_configuration(\n pipeline_resources=step_resources\n )\n assert pipeline_configuration[\"pipeline_version_id\"] == pipeline.id\n # All steps have been included in the configuration\n assert len(graph.steps) == len(pipeline_configuration[\"steps\"])\n # Compare the steps that have resources, allowing zip to drop the excluded step.\n for graph_step, config_step in zip(partial_steps, pipeline_configuration[\"steps\"]):\n # All steps provided are included in the configuration.\n assert graph_step.name == config_step[\"name\"]\n # All steps for which resource were provided have resources in the config.\n assert \"resources\" in config_step.keys()\n # The step for which no resources were provided is in the config without resources.\n assert excluded_step.name == pipeline_configuration[\"steps\"][-1][\"name\"]\n assert \"resources\" not in pipeline_configuration[\"steps\"][-1].keys()", "def test_get_pipeline_by_id(self):\n response = self.client.get_pipeline_by_id(2)\n self.assertEqual(response['id'], 2)", "def test_pipeline_class_fixture(mockpipe, testdir):\n test = testdir.makepyfile(TEST_OK_CLASS_FIXTURE)\n result = testdir.inline_run(\n \"-v\",\n f\"--base-pipeline-dir={test.dirname}\",\n test\n )\n passed, skipped, failed = result.listoutcomes()\n\n assert len(passed) == 1\n assert len(skipped) == 0\n assert len(failed) == 0", "def test_build_pipeline_three(self):\n args = \"Test_APP ONE FOUR\".split(\" \")\n with self.assertRaises(TypeError):\n build_pipeline(args, False)", "def test_get_pipeline_returns_none_if_non_existent(tmp_path: str) -> None:\n Repo.init(tmp_path)\n repo = Repository(str(tmp_path))\n repo.set_active_stack(\"local_stack\")\n our_pipeline = repo.get_pipeline(\"not_a_pipeline\")\n assert our_pipeline is None", "def main(): # pragma: no cover\n parser = argparse.ArgumentParser(\"Gets the pipeline definition for the pipeline script.\")\n\n parser.add_argument(\n \"-n\",\n \"--module-name\",\n dest=\"module_name\",\n type=str,\n help=\"The module name of the pipeline to import.\",\n )\n parser.add_argument(\n \"-f\",\n \"--file-name\",\n dest=\"file_name\",\n type=str,\n default=None,\n help=\"The file to output the pipeline definition json to.\",\n )\n parser.add_argument(\n \"-kwargs\",\n \"--kwargs\",\n dest=\"kwargs\",\n default=None,\n help=\"Dict string of keyword arguments for the pipeline generation (if supported)\",\n )\n args = parser.parse_args()\n\n if args.module_name is None:\n parser.print_help()\n sys.exit(2)\n\n try:\n pipeline = get_pipeline_driver(args.module_name, args.kwargs)\n content = pipeline.definition()\n if args.file_name:\n with open(args.file_name, \"w\") as f:\n f.write(content)\n else:\n print(content)\n except Exception as e: # pylint: disable=W0703\n print(f\"Exception: {e}\")\n sys.exit(1)", "def test_fit_with_pipeline_as_meta_estimator(self) -> type(None):\n X, y = get_dataset_for_regression()\n rgr = StackingRegressor(\n base_estimators_types=[LinearRegression, KNeighborsRegressor],\n base_estimators_params=[dict(), {'n_neighbors': 1}],\n meta_estimator_type=Pipeline,\n meta_estimator_params={\n 'steps': [('lin_reg', LinearRegression())]\n },\n keep_meta_X=True\n )\n rgr.fit(X, y)\n true_meta_X_ = np.array(\n [[6.69395712, 15.0],\n [10.76647173, 15.0],\n [14.83898635, 15.0],\n [18.91150097, 21.0],\n [22.98401559, 23.0],\n [9.74141049, 13.0],\n [13.70235081, 13.0],\n [17.66329114, 13.0],\n [21.62423146, 13.0],\n [15.94394213, 21.0],\n [19.8032967, 15.0],\n [23.92527473, 19.0],\n [28.04725275, 23.0],\n [32.16923077, 23.0],\n [11.94542125, 8.0]]\n )\n np.testing.assert_allclose(rgr.meta_X_, true_meta_X_)\n true_coefs_of_base_lr = np.array([1.05304994, 2.97421767])\n np.testing.assert_allclose(\n rgr.base_estimators_[0].coef_,\n true_coefs_of_base_lr\n )\n true_coefs_of_meta_estimator = np.array([1.01168028, -0.04313311])\n np.testing.assert_allclose(\n rgr.meta_estimator_.named_steps.lin_reg.coef_,\n true_coefs_of_meta_estimator\n )", "def testInstantiate(self):\n artifact_name = 'artifact'\n artifact = base.BaseArtifact(artifact_name)\n\n self.assertEqual(artifact.size, 0)\n self.assertEqual(artifact.name, artifact_name)\n expected_remote_path = 'Base/artifact'\n self.assertEqual(artifact.remote_path, expected_remote_path)", "def __init__(\n self,\n pipeline_path: Optional[str] = None,\n pipeline_definition: Optional[Dict] = None,\n validate: bool = False,\n ):\n if not pipeline_path and not pipeline_definition:\n # at least one parameter should be provided\n raise ValueError(\"At least one parameter must be provided ('pipeline_path' or 'pipeline_definition')\")\n if pipeline_path and pipeline_definition:\n # only one parameter should be provided\n raise ValueError(\"Only one parameter should be provided ('pipeline_path' or 'pipeline_definition')\")\n\n if pipeline_path:\n # supporting loading pipeline from file\n if not os.path.exists(pipeline_path):\n raise ValueError(f\"Pipeline file not found: '{pipeline_path}'\\n\")\n\n with open(pipeline_path) as f:\n try:\n self._pipeline_definition = json.load(f)\n except ValueError as ve:\n raise ValueError(f\"Pipeline file is invalid: \\n {ve}\")\n else:\n # supporting passing the pipeline definition directly\n self._pipeline_definition = pipeline_definition\n\n if validate:\n self.validate()\n\n self.propagate_pipeline_default_properties()", "def to_pipeline_spec(self) -> pipeline_spec_pb2.PipelineSpec:\n # import here to aviod circular module dependency\n from kfp.compiler import compiler_utils\n from kfp.compiler import pipeline_spec_builder as builder\n from kfp.dsl import pipeline_channel\n from kfp.dsl import pipeline_task\n from kfp.dsl import tasks_group\n\n args_dict = {}\n pipeline_inputs = self.inputs or {}\n\n for arg_name, input_spec in pipeline_inputs.items():\n args_dict[arg_name] = pipeline_channel.create_pipeline_channel(\n name=arg_name,\n channel_type=input_spec.type,\n is_artifact_list=input_spec.is_artifact_list)\n\n task = pipeline_task.PipelineTask(self, args_dict)\n\n # instead of constructing a pipeline with pipeline_context.Pipeline,\n # just build the single task group\n group = tasks_group.TasksGroup(\n group_type=tasks_group.TasksGroupType.PIPELINE)\n group.tasks.append(task)\n\n group.name = uuid.uuid4().hex\n\n pipeline_name = self.name\n task_group = group\n\n pipeline_outputs = {}\n pipeline_output_spec = self.outputs or {}\n\n for arg_name, output_spec in pipeline_output_spec.items():\n pipeline_outputs[\n arg_name] = pipeline_channel.create_pipeline_channel(\n name=arg_name,\n channel_type=output_spec.type,\n task_name=task.name)\n\n utils.validate_pipeline_name(pipeline_name)\n\n pipeline_spec = pipeline_spec_pb2.PipelineSpec()\n pipeline_spec.pipeline_info.name = pipeline_name\n pipeline_spec.sdk_version = f'kfp-{kfp.__version__}'\n # Schema version 2.1.0 is required for kfp-pipeline-spec>0.1.13\n pipeline_spec.schema_version = '2.1.0'\n\n # if we decide to surface component outputs to pipeline level,\n # can just assign the component_spec_proto directly to .root\n component_spec_proto = builder._build_component_spec_from_component_spec_structure(\n self)\n pipeline_spec.root.CopyFrom(component_spec_proto)\n\n builder._build_dag_outputs(\n component_spec=pipeline_spec.root, dag_outputs=pipeline_outputs)\n\n deployment_config = pipeline_spec_pb2.PipelineDeploymentConfig()\n root_group = task_group\n\n task_name_to_parent_groups, group_name_to_parent_groups = compiler_utils.get_parent_groups(\n root_group)\n\n def get_inputs(task_group: tasks_group.TasksGroup,\n task_name_to_parent_groups):\n inputs = collections.defaultdict(set)\n if len(task_group.tasks) != 1:\n raise ValueError(\n f'Error compiling component. Expected one task in task group, got {len(task_group.tasks)}.'\n )\n only_task = task_group.tasks[0]\n if only_task.channel_inputs:\n for group_name in task_name_to_parent_groups[only_task.name]:\n inputs[group_name].add((only_task.channel_inputs[-1], None))\n return inputs\n\n inputs = get_inputs(task_group, task_name_to_parent_groups)\n\n builder.build_spec_by_group(\n pipeline_spec=pipeline_spec,\n deployment_config=deployment_config,\n group=root_group,\n inputs=inputs,\n outputs=collections.defaultdict(\n dict), # empty -- no sub-DAG outputs to surface\n dependencies={}, # no dependencies for single-component pipeline\n rootgroup_name=root_group.name,\n task_name_to_parent_groups=task_name_to_parent_groups,\n group_name_to_parent_groups=group_name_to_parent_groups,\n name_to_for_loop_group={}, # no for loop in single-component pipeline\n platform_spec=pipeline_spec_pb2.PlatformSpec(\n ), # no PlatformSpec single-component pipeline\n is_compiled_component=True,\n )\n\n return pipeline_spec", "def test_build_model(arguments):\n ...", "def test_hello_pipeline():\n result = execute_pipeline(hello_pipeline, mode=\"test\")\n\n assert result.success\n assert result.output_for_solid(\"hello\") == \"Hello, NMDC!\"", "def test_generate_pipeline_code():\n pipeline = ['KNeighborsClassifier',\n ['CombineDFs',\n ['GradientBoostingClassifier',\n 'input_matrix',\n 38.0,\n 0.87],\n ['GaussianNB',\n ['ZeroCount',\n 'input_matrix']]],\n 18,\n 33]\n\n expected_code = \"\"\"make_pipeline(\n make_union(\n make_union(VotingClassifier([('branch',\n GradientBoostingClassifier(learning_rate=1.0, max_features=1.0, n_estimators=500)\n )]), FunctionTransformer(lambda X: X)),\n make_union(VotingClassifier([('branch',\n make_pipeline(\n ZeroCount(),\n GaussianNB()\n )\n )]), FunctionTransformer(lambda X: X))\n ),\n KNeighborsClassifier(n_neighbors=5, weights=\"distance\")\n)\"\"\"\n\n assert expected_code == generate_pipeline_code(pipeline)" ]
[ "0.77940893", "0.77576274", "0.7122229", "0.7110022", "0.67007685", "0.6504595", "0.63064855", "0.62975585", "0.6293141", "0.6096061", "0.58818185", "0.5858421", "0.58582985", "0.58366627", "0.58244514", "0.58215785", "0.5737838", "0.5727093", "0.57111335", "0.5692733", "0.56899387", "0.56402135", "0.5588215", "0.5585357", "0.55756927", "0.55753034", "0.5563896", "0.5559165", "0.55540496", "0.5551859" ]
0.7792976
1
Test that we throw the correct exception when a user tries to mutate the steps of a graph in an inappropriate way.
def test_bad_mutation_of_graph_steps_exception( make_mock_registered_model, make_mock_registered_model_version, make_mock_pipeline_graph, ): mocked_rm = make_mock_registered_model(id=123, name="test_rm") mocked_rmv = make_mock_registered_model_version() with patch.object( verta.pipeline.PipelineStep, "_get_registered_model", return_value=mocked_rm ): graph = make_mock_pipeline_graph() graph.steps.add("not_a_step") with pytest.raises(TypeError) as err: RegisteredPipeline(graph=graph, registered_model_version=mocked_rmv) assert ( str(err.value) == f"individual steps of a PipelineGraph must be type" f" PipelineStep, not <class 'str'>." )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_case31(self):\n\n self.assertRaises(ValueError, lambda: self.graph1.swapStudents(\"student1\",\"supervisor2\",\"student3\",\"supervisor1\"))", "def test_runtime_errors(self, graph_entry_class):\n graph_entry_class.return_value.state = \"Pending\"\n graph_entry_class.return_value.path = \"foo/app1\"\n graph_entry_class.return_value.execute.return_value = (0, ['Failure'], False)\n\n graph = ApplyGraph('plan', self.graph, self.post_graph, \"foo\")\n\n self.assertRaises(RuntimeError, graph.execute_graph())\n self.assertRaises(RuntimeError, graph.execute_post_graph())", "def test_case5(self):\n\n self.assertRaises(ValueError, lambda: self.graph1.removeEdge(\"supervisor2\",\"student1\"))", "def test_exception(\n self,\n ):\n with pytest.raises(ValueError, match=\"cannot be larger than number of subsystems\"):\n symplectic.reduced_state(np.array([0, 0]), np.identity(2), [6, 4])", "def test_graph_cant_delete_an_unpresent_node(graph_no_edges):\n with pytest.raises(ValueError):\n graph_no_edges.del_nodes(3.14)", "def test_wrong_predicate(self):\n\n with pytest.raises(ValueError):\n naughty_case_opt = lq.optimizers.CaseOptimizer(\n (lambda var: False, lq.optimizers.Bop()),\n default_optimizer=optimizers.Adam(0.01),\n )\n\n # Simple MNIST model\n mnist = tf.keras.datasets.mnist\n (train_images, train_labels), _ = mnist.load_data()\n model = tf.keras.Sequential(\n [\n tf.keras.layers.Flatten(input_shape=(28, 28)),\n tf.keras.layers.Dense(128, activation=\"relu\"),\n tf.keras.layers.Dense(10, activation=\"softmax\"),\n ]\n )\n model.compile(\n loss=\"sparse_categorical_crossentropy\",\n optimizer=naughty_case_opt,\n metrics=[\"acc\"],\n )\n\n # Should raise on first call to apply_gradients()\n model.fit(train_images[:1], train_labels[:1], epochs=1)", "def experiment3():\n raise FAKE_ERROR", "def test_node_error_if_nonpresent(graph_no_edges):\n with pytest.raises(ValueError):\n graph_no_edges.adjacent('Raccoon', 'Rocket')", "def test_case4(self):\n\n graph = BipartiteGraph()\n self.assertRaises(KeyError, lambda: graph.removeEdge(\"supervisor1\",\"student1\"))", "def test_validate_self_invalid_transition_state(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.dtm1.transitions['q5'] = self.dtm1.transitions['q0']\n self.dtm1.validate_self()", "def test_nonrev_exception(self):\n preds = predicate.MotifChange(\"A\", \"G\", forward_only=True)\n with self.assertRaises(ValueError):\n sm = substitution_model.TimeReversibleNucleotide(predicates=[preds])", "def test_self_loop_raises_error(self):\n g = nx.complete_graph(3).to_directed()\n edge_weight_data = {edge: (i + 1) * 0.5 for i, edge in enumerate(g.edges)}\n for k, v in edge_weight_data.items():\n g[k[0]][k[1]][\"weight\"] = v\n\n g.add_edge(1, 1) # add self loop\n\n with pytest.raises(ValueError, match=\"Graph contains self-loops\"):\n loss_hamiltonian(g)", "def test_error_node():\n try:\n node_a = Node({'a':'a'})\n except Exception as e:\n assert str(e) == 'input connected nodes info is not in a list.'", "def test_attemptMigrationFails(self):\n obj, migration, pendingMigration = self._mkMigrationJunk()\n\n def _explode(*a, **kw):\n return fail(ValueError('42'))\n object.__setattr__(self.mockStore, 'storeObject', _explode)\n\n def _eb(f):\n # .store is set to None on deletion\n self.assertNotIdentical(pendingMigration.store, None)\n tb = pendingMigration.lastFailure\n [tb2] = self.flushLoggedErrors(ValueError)\n self.assertIn(u'ValueError: 42', tb)\n self.assertEquals(tb.encode('ascii'), tb2.getTraceback())\n\n d = pendingMigration.attemptMigration()\n return self.assertFailure(d, ValueError).addErrback(_eb)", "def test_case32(self):\n\n self.assertRaises(KeyError, lambda: self.graph1.swapStudents(\"student5\",\"supervisor5\",\"student6\",\"supervisor6\"))", "def test_update_node_state_smartfail(self):\n pass", "def test_validate_self_invalid_transition_result_direction(self):\n with nose.assert_raises(tmexceptions.InvalidDirectionError):\n self.dtm1.transitions['q0']['y'] = ('q3', 'y', 'U')\n self.dtm1.validate_self()", "def test_edge_driver_errors(self):\n\n with pytest.raises(\n ValueError, match=r\"Encountered invalid entry in 'reward', expected 2-bit bitstrings.\"\n ):\n qaoa.edge_driver(Graph([(0, 1), (1, 2)]), [\"10\", \"11\", 21, \"g\"])\n\n with pytest.raises(\n ValueError,\n match=r\"'reward' cannot contain either '10' or '01', must contain neither or both.\",\n ):\n qaoa.edge_driver(Graph([(0, 1), (1, 2)]), [\"11\", \"00\", \"01\"])\n\n with pytest.raises(ValueError, match=r\"Input graph must be a nx.Graph\"):\n qaoa.edge_driver([(0, 1), (1, 2)], [\"00\", \"11\"])", "def test_addPath_oneStepAwayCycle(self):\n g = Garden()\n g.addPath('chicken', 'v1', [\n ('egg', 'v1'),\n ])\n self.assertRaises(CycleError, g.addPath, 'egg', 'v1', [\n ('chicken', 'v1'),\n ])", "def test_cost_graph_error(self):\n\n graph = [(0, 1), (1, 2)]\n\n with pytest.raises(ValueError, match=r\"Input graph must be a nx\\.Graph\"):\n qaoa.maxcut(graph)\n with pytest.raises(ValueError, match=r\"Input graph must be a nx\\.Graph\"):\n qaoa.max_independent_set(graph)\n with pytest.raises(ValueError, match=r\"Input graph must be a nx\\.Graph\"):\n qaoa.min_vertex_cover(graph)\n with pytest.raises(ValueError, match=r\"Input graph must be a nx\\.Graph\"):\n qaoa.max_clique(graph)", "def test_illegal_input(self):\n cs = ConfigurationSpace()\n cs.add_hyperparameter(UniformFloatHyperparameter('test', 1, 10, 5))\n scen = Scenario({'run_obj': 'quality', 'cs': cs})\n stats = Stats(scen)\n # Recorded runs but no incumbent.\n stats.ta_runs = 10\n smac = SMAC(scen, stats=stats, rng=np.random.RandomState(42))\n self.output_dirs.append(scen.output_dir)\n self.assertRaises(ValueError, smac.optimize)\n # Incumbent but no recoreded runs.\n incumbent = cs.get_default_configuration()\n smac = SMAC(scen, restore_incumbent=incumbent,\n rng=np.random.RandomState(42))\n self.assertRaises(ValueError, smac.optimize)", "def test_graph_cant_delete_without_argument(graph_no_edges):\n with pytest.raises(TypeError):\n graph_no_edges.del_nodes()", "def test_validate_self_invalid_transition_result_state(self):\n with nose.assert_raises(exceptions.InvalidStateError):\n self.dtm1.transitions['q0']['y'] = ('q5', 'y', 'R')\n self.dtm1.validate_self()", "def test_duplicate_named_input_edge(self):\n with self.assertRaises(ValidationError):\n with Graph('g'):\n n1, n2 = Node('a'), Node('b')\n n1 | 'bar' * n2\n n1 * 'foo' | 'bar' * n2", "def test_fails(self):\n raise FoolishError(\"I am a broken test\")", "def unexpectedException(self):", "def test_create_unexpected_problem(self):\n pass", "def duplicate_transition_raise_error(old_transition, new_transition):\n raise ValueError(\"Attempting to re-insert transition %s\" % old_transition)", "def test_addPath_obviousCycle(self):\n g = Garden()\n self.assertRaises(CycleError, g.addPath, 'foo', 'v1', [\n ('foo', 'v1'),\n ])", "def test_invalid_tensor_op_object_graph_pairing(self, data, description):\n with self.assertRaises((KeyError, AssertionError, TypeError), msg=description):\n data()" ]
[ "0.66778755", "0.6674113", "0.6551536", "0.6528747", "0.6398362", "0.6366871", "0.634431", "0.6334882", "0.6333377", "0.6324761", "0.6293048", "0.6279606", "0.6269768", "0.6257504", "0.6240395", "0.6227926", "0.6223702", "0.6170484", "0.6166687", "0.6150387", "0.6145652", "0.6124922", "0.6118476", "0.6086358", "0.6074203", "0.6073845", "0.6064073", "0.60579485", "0.60554636", "0.6034109" ]
0.7053854
0
Invert using 2D convolution function, using the specified convolution function Use the image im as a template. Do PSF in a separate call. This is at the bottom of the layering i.e. all transforms are eventually expressed in terms of this function. . Any shifting needed is performed here.
def invert_2d(vis: Visibility, im: Image, dopsf: bool = False, normalize: bool = True, gcfcf=None, **kwargs) -> (Image, numpy.ndarray): assert isinstance(vis, Visibility), vis svis = copy_visibility(vis) if dopsf: svis.data['vis'][...] = 1.0+0.0j svis = shift_vis_to_image(svis, im, tangent=True, inverse=False) if gcfcf is None: gcf, cf = create_pswf_convolutionfunction(im, support=get_parameter(kwargs, "support", 6), oversampling=get_parameter(kwargs, "oversampling", 128)) else: gcf, cf = gcfcf griddata = create_griddata_from_image(im) griddata, sumwt = grid_visibility_to_griddata(svis, griddata=griddata, cf=cf) imaginary = get_parameter(kwargs, "imaginary", False) if imaginary: result0, result1 = fft_griddata_to_image(griddata, gcf, imaginary=imaginary) log.debug("invert_2d: retaining imaginary part of dirty image") if normalize: result0 = normalize_sumwt(result0, sumwt) result1 = normalize_sumwt(result1, sumwt) return result0, sumwt, result1 else: result = fft_griddata_to_image(griddata, gcf) if normalize: result = normalize_sumwt(result, sumwt) return result, sumwt
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_conv2d():\n img = np.array([\n [0.3, 0.5, 0.7, 0.9],\n [0.1, 0.3, 0.5, 0.7],\n [0.9, 0.7, 0.5, 0.3],\n ])\n template = np.array([\n [1, 0],\n [1, 0],\n ])\n template = np.flipud(np.fliplr(template))\n return fftconvolve(img, template, mode='valid')", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n\t\n ### START YOUR CODE HERE ### (You can change anything inside this block) \n\t\n H,W = np.shape(im)\n h,w = np.shape(kernel)\n t_b = (H-h)//2\n l_r = (W-w)//2\n kernel_padded = np.pad(kernel, ((t_b, t_b+1),(l_r, l_r+1)), 'constant')\n kernel_padded = np.pad(kernel, ((0, 2*t_b),(0, 2*l_r)), 'constant')\n fft_kernel = np.fft.fft2(kernel_padded, s=None, axes=(-2, -1), norm=None)\n \n \n im_fft = np.fft.fft2(im, s=None, axes=(-2, -1), norm=None) \n im_filt = im_fft*fft_kernel \n conv_result = np.fft.ifft2(im_filt, s=None, axes=(-2, -1), norm=None).real \n\n if verbose:\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(12, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 2, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 2, 2) \n plt.imshow(conv_result, cmap=\"gray\")\n\n ### END YOUR CODE HERE ###\n return conv_result", "def convolve_im(im: np.array,\n kernel: np.array,\n verbose=True):\n ### START YOUR CODE HERE ### (You can change anything inside this block)\n \"\"\"\n\tcompared to the 4a solution this just adds padding to the filter if its smaller than the image\n\tthis is done by using the second parameter in fft.fft2 \n\t\n\tfirst it applies fourier transforms on the kernel and the image\n\tthen it sets the image to be the pointwise multiplication of the transforms\n\n the image is inverse fourier transformed and filtered for real values\n the domain image is shifted and taken the absolute value of\n the fourier transform of the image and kernel are also shifted and set to be the absolute value\n\tlastly everything is displayed in the subplots\n \"\"\"\n conv_result = im \n \n if verbose:\n fftKernel=np.fft.fft2(kernel,im.shape)\n fftImage=np.fft.fft2(conv_result)\n\t\t\n\t\t\n\t\t\n conv_result=np.multiply(fftImage,fftKernel)\n fftImageTransformed=conv_result\n\t\t\n \n conv_result=np.fft.ifft2(conv_result)\n \n conv_result=np.real(conv_result)\n\n fftImageTransformed=np.fft.fftshift(fftImageTransformed)\n fftImage=np.fft.fftshift(fftImage)\n fftKernel=np.fft.fftshift(fftKernel)\n\n fftImageTransformed=np.absolute(fftImageTransformed)\n fftImage=np.absolute(fftImage)\n fftKernel=np.absolute(fftKernel)\n\t\t\n\t\t\n # Use plt.subplot to place two or more images beside eachother\n plt.figure(figsize=(20, 4))\n # plt.subplot(num_rows, num_cols, position (1-indexed))\n plt.subplot(1, 5, 1)\n plt.imshow(im, cmap=\"gray\")\n plt.subplot(1, 5, 2)\n plt.imshow(fftImage, cmap=\"gray\")\n plt.subplot(1, 5, 3)\n plt.imshow(fftKernel, cmap=\"gray\")\n plt.subplot(1, 5, 4)\n plt.imshow(fftImageTransformed, cmap=\"gray\")\n plt.subplot(1, 5, 5)\n plt.imshow(conv_result, cmap=\"gray\")\n ### END YOUR CODE HERE ###\n return conv_result", "def invert(self, img):\n return self.inverse()(img)", "def deconvolve(self, img, psf):\n self.data = pysap.Image(data=self.deconv.deconvolve(img, psf))", "def _conv2d_transpose_block(in_f, out_f, *args, **kwargs):\n return nn.Sequential(\n nn.ConvTranspose2d(in_f, out_f, *args, **kwargs),\n nn.BatchNorm2d(out_f),\n nn.ReLU(),\n nn.Dropout2d(p=0.2)\n )", "def image_conv(image, kernel):\n \n # Filter2D used for performance\n return cv2.filter2D(image, -1, kernel)", "def conv2D(inImage: np.ndarray, kernel2: np.ndarray) -> np.ndarray:\r\n flip_kernel = np.flipud(np.fliplr(kernel2))\r\n kernel_row = flip_kernel.shape[0]\r\n kernel_col = flip_kernel.shape[1]\r\n\r\n new_img = np.zeros_like(inImage)\r\n padded_img = padded_replicate(inImage, kernel_row, kernel_col)\r\n\r\n for x in range(inImage.shape[0]):\r\n for y in range(inImage.shape[1]):\r\n new_img[x, y] = (padded_img[x: x + kernel_col, y: y + kernel_row] * flip_kernel).sum()\r\n if flip_kernel.sum() != 0:\r\n new_img[x, y] /= flip_kernel.sum()\r\n\r\n return new_img", "def deconv(inp):\n num_filters = inp.get_shape().as_list()[-1]\n\n x = Conv2DTranspose(\n filters=num_filters,\n kernel_size=4,\n strides=2,\n padding=\"same\",\n use_bias=False,\n kernel_initializer=\"he_uniform\",\n )(inp)\n x = BatchNormalization()(x)\n x = Activation(\"elu\")(x)\n\n return x", "def perform(self, node, inp, out):\r\n img2d, filtersflipped = inp\r\n z, = out\r\n if not imported_scipy_signal:\r\n raise theano.gof.utils.MethodNotDefined(\r\n \"c_headers\", type(self), self.__class__.__name__,\r\n \"Need the python package for scipy.signal to be installed \"\r\n \"for the python implementation. You can use the C\"\r\n \" implementation instead.\")\r\n\r\n # TODO: move these back out to global scope when they no longer\r\n # cause an atexit error\r\n imshp = self.imshp\r\n if imshp is None or any([x is None for x in imshp]):\r\n imshp = tuple(img2d.shape[1:])\r\n kshp = self.kshp\r\n if kshp is None or any([x is None for x in kshp]):\r\n kshp = tuple(filtersflipped.shape[2:])\r\n bsize = self.bsize\r\n if bsize is None:\r\n bsize = img2d.shape[0]\r\n nkern = self.nkern\r\n if nkern is None:\r\n nkern = filtersflipped.shape[0]\r\n\r\n imshp_logical = self.imshp_logical\r\n if imshp_logical is None:\r\n imshp_logical = imshp\r\n if numpy.any([x is None for x in imshp_logical]):\r\n imshp_logical = tuple(img2d.shape[1:])\r\n\r\n kshp_logical = self.kshp_logical\r\n if kshp_logical is None:\r\n kshp_logical = kshp\r\n if numpy.any([x is None for x in kshp_logical]):\r\n kshp = tuple(filtersflipped.shape[2:])\r\n\r\n if self.fulloutshp is not None:\r\n fulloutshp = tuple(self.fulloutshp)\r\n else:\r\n fulloutshp = tuple(ConvOp.getOutputShape(imshp_logical[\r\n 1:], kshp_logical, (1, 1), self.out_mode))\r\n\r\n if z[0] is None or z[0].shape != (bsize, nkern,) + fulloutshp:\r\n z[0] = numpy.zeros((bsize, nkern,) + fulloutshp,\r\n dtype=img2d.dtype)\r\n zz = z[0]\r\n\r\n stacklen = imshp[0]\r\n\r\n img2d = img2d.reshape((bsize,) + imshp)\r\n filtersflipped = filtersflipped.reshape((nkern, stacklen) + kshp)\r\n\r\n if self.imshp != self.imshp_logical:\r\n # assuming that to get from imshp to imshp logical we insert zeros in missing spots\r\n rstride = int(numpy.ceil(imshp_logical[1] / float(imshp[1])))\r\n cstride = int(numpy.ceil(imshp_logical[2] / float(imshp[2])))\r\n buf = numpy.zeros((bsize,) + imshp_logical, dtype=img2d.dtype)\r\n buf[:, :, ::rstride, ::cstride] = img2d\r\n img2d = buf\r\n del buf, rstride, cstride\r\n\r\n if kshp != kshp_logical:\r\n rstride = int(numpy.ceil(kshp_logical[0] / float(kshp[0])))\r\n cstride = int(numpy.ceil(kshp_logical[1] / float(kshp[1])))\r\n buf = numpy.zeros((nkern, stacklen) +\r\n self.kshp_logical, dtype=filtersflipped.dtype)\r\n if self.kshp_logical_top_aligned:\r\n roffset = coffset = 0\r\n else:\r\n roffset = (kshp_logical[0] - (kshp[0] *\r\n rstride) - 1 + rstride) % rstride\r\n coffset = (kshp_logical[1] - (kshp[1] *\r\n cstride) - 1 + cstride) % cstride\r\n assert roffset >= 0\r\n assert coffset >= 0\r\n buf[:, :, roffset::rstride, coffset::cstride] = filtersflipped\r\n filtersflipped = buf\r\n del buf, rstride, cstride\r\n\r\n val = _valfrommode(self.out_mode)\r\n bval = _bvalfromboundary('fill')\r\n\r\n for b in xrange(bsize):\r\n for n in xrange(nkern):\r\n zz[b, n, ...].fill(0)\r\n for im0 in xrange(stacklen):\r\n zz[b, n, ...] += _convolve2d(img2d[b, im0, ...],\r\n filtersflipped[n, im0, ...],\r\n 1, val, bval, 0)\r\n\r\n if False:\r\n if False and self.out_mode == \"full\":\r\n img2d2 = numpy.zeros((bsize, stacklen,\r\n imshp[1] + 2 * kshp[0] - 2,\r\n imshp[2] + 2 * kshp[1] - 2))\r\n img2d2[:, :, kshp[0] - 1:kshp[0] - 1 + imshp[1],\r\n kshp[1] - 1:kshp[1] - 1 + imshp[2]] = img2d\r\n img2d = img2d2\r\n #N_image_shape = image_data.shape\r\n\r\n for b in xrange(bsize):\r\n for n in xrange(nkern):\r\n zz[b, n, ...].fill(0)\r\n for im0 in xrange(stacklen):\r\n for row in xrange(0, zz.shape[2], self.dx):\r\n for col in xrange(0, zz.shape[3], self.dy):\r\n zz[b, n, row, col] += (img2d[b, im0, row:row + kshp[0], col:col + kshp[1]] *\r\n filtersflipped[n, im0, ::-1, ::-1]).sum()\r\n\r\n #We copy it to remove the Stride mismatch warning from DEBUG_MODE.\r\n #The copy make that we return an object with the same stride as the c version.\r\n #The copy don't affect the performence during our experience as in that case we\r\n #execute the c version which is much faster.\r\n if self.dx > 1 or self.dy > 1:\r\n zz = zz[:, :, 0::self.dx, 0::self.dy].copy()\r\n\r\n z[0] = zz", "def invert_image(image, *args, **kwargs):\n # TODO: Implement the method\n\n image2 = Image.fromarray(image)\n image3 = Image.eval(image2, lambda a: 255-a)\n inv_image = num.asarray(image3)\n\n return inv_image", "def inv_conv2d(output_mat: np.ndarray,\n filter_mat: np.ndarray,\n theta: np.ndarray) -> np.ndarray:\n (N, M) = filter_mat.shape\n (P, Q) = output_mat.shape\n dim = (N + P - 1) * (M + Q - 1)\n assert len(theta) == dim - P * Q, 'theta dimensions wrong'\n huge_mat = _huge(filter_mat, (P, Q))\n flattened = np.concatenate((output_mat.reshape(P * Q), theta))\n input_flattened = np.dot(np.linalg.inv(huge_mat), flattened)\n return input_flattened.reshape((N + P - 1, M + Q - 1))", "def inv_conv2d_fft(output_mat: np.ndarray,\n filter_mat: np.ndarray,\n theta: np.ndarray) -> np.ndarray:\n output_shape = np.asarray(output_mat.shape)\n filter_shape = np.asarray(filter_mat.shape)\n input_shape = output_shape - filter_shape + 1\n\n fshape = [fftpack.helper.next_fast_len(int(d)) for d in output_shape]\n fslice = tuple([slice(0, int(sz)) for sz in input_shape])\n\n output_mat = _uncentered(output_mat, fshape)\n flipped_mat = filter_mat[::-1, ::-1]\n\n sp1 = np.fft.rfftn(output_mat, fshape)\n sp2 = np.fft.rfftn(flipped_mat, fshape)\n ret = np.fft.irfftn(sp1 / sp2, fshape)[fslice].copy()\n return ret.real", "def convolve_one_image(self,input4D, one_image, image_shape, \n Pstruct, filter_shape,\n image_index,\n channel_index): \n \n \n ## We look at the composition for the first channel in the beginning \n rank = Pstruct[0]['U1'].shape[1]\n fwidth = filter_shape[2]\n fheight = filter_shape[3]\n \n \n # Construct horizontal filters\n #TODO save the filters in the correct shape\n horizontal_filter_shape = (rank, 1, fwidth)\n horizontal_filters = np.ndarray(horizontal_filter_shape)\n horizontal_filters[:, 0, :] = np.transpose(Pstruct[channel_index]['U1']);\n \n # Output is 1 x rank x W x H\n horizontal_conv_out = conv.conv2d(input=one_image, \n filters = horizontal_filters,\n filter_shape = horizontal_filter_shape, \n image_shape = image_shape)\n \n # Construct vertical filters\n vertical_filter_shape = (rank, fheight, 1)\n vertical_filters = np.ndarray(vertical_filter_shape) \n vertical_filters[:,:, 0] = np.transpose(Pstruct[channel_index]['U2']);\n\n initial_n_rows = image_shape[1]\n final_n_rows = initial_n_rows- fwidth + 1\n final_n_cols = image_shape[2] - fheight + 1 \n conv_out = theano.shared(np.zeros((rank, final_n_rows, final_n_cols)))\n for r in range(rank):\n # temp is 1x1x imageW x imageH\n A = conv.conv2d(input = horizontal_conv_out[:,r,:,:], \n filters = vertical_filters[r,:,:],\n filter_shape = (1, fheight, 1), \n image_shape = (1, initial_n_rows, final_n_cols))\n conv_out = T.set_subtensor(conv_out[r,:,:], A[0,:,:])\n \n nbr_filters = Pstruct[0]['U3'].shape[0]\n # Final number of rows and columns \n ## numberof images, number of filters, image width, image height\n alphas = Pstruct[channel_index]['U3'] \n for f in range(nbr_filters): \n temp = theano.shared(np.zeros((final_n_rows, final_n_cols)))\n for r in range(rank):\n temp = temp + conv_out[r, :,:]* alphas[f, r] * Pstruct[channel_index]['lmbda'][r]; \n input4D =T.set_subtensor(input4D[image_index,f,:,:], temp)\n return input4D", "def vflip(img, data_format='CHW'):\n _assert_image_tensor(img, data_format)\n\n h_axis = _get_image_h_axis(data_format)\n\n return img.flip(axis=[h_axis])", "def invert_functional_brain_mask_workflow(workflow, resource_pool, config,\n name=\"_\"):\n\n import copy\n import nipype.pipeline.engine as pe\n from nipype.interfaces.afni import preprocess\n\n if \"functional_brain_mask\" not in resource_pool.keys():\n\n from functional_preproc import functional_brain_mask_workflow\n old_rp = copy.copy(resource_pool)\n workflow, resource_pool = \\\n functional_brain_mask_workflow(workflow, resource_pool, config, name)\n if resource_pool == old_rp:\n return workflow, resource_pool\n \n # 3dcalc to invert the binary functional brain mask\n invert_mask = pe.Node(interface=preprocess.Calc(), \n name='invert_mask%s' % name)\n\n invert_mask.inputs.expr = \"iszero(a)\"\n invert_mask.inputs.outputtype = \"NIFTI_GZ\"\n\n # functional_brain_mask -> 3dcalc \n if len(resource_pool[\"functional_brain_mask\"]) == 2:\n node, out_file = resource_pool[\"functional_brain_mask\"]\n workflow.connect(node, out_file, invert_mask, 'in_file_a')\n else:\n invert_mask.inputs.in_file_a = resource_pool[\"functional_brain_mask\"]\n\n resource_pool[\"inverted_functional_brain_mask\"] = (invert_mask, 'out_file')\n\n return workflow, resource_pool", "def fwd_conv2d_fft(input_mat: np.ndarray,\n filter_mat: np.ndarray) -> np.ndarray:\n flipped_mat = filter_mat[::-1, ::-1]\n return fftconvolve(input_mat, flipped_mat, mode='full')", "def dense_conv_forward_2d(inp_image: np.ndarray, kernel: np.ndarray, stride, padding):\n assert len(inp_image.shape) == 3, 'single 2D images only. No batches.'\n assert len(kernel.shape) == 4\n\n height, width, colors = inp_image.shape\n kernel_height, kernel_width, colors_in, colors_out = kernel.shape\n kernel_stride_x, kernel_stride_y = stride\n kernel_padding_x, kernel_padding_y = padding\n i_f = int(np.floor(kernel_width / 2.0))\n j_f = int(np.floor(kernel_height / 2.0))\n\n out_pixels = np.zeros((height, width, colors_out))\n for y in range(kernel_padding_y, height - kernel_padding_y,\n kernel_stride_y): # todo: add kernel_padding_y and kernel_stride_y fix to glsl\n for x in range(kernel_padding_x, width - kernel_padding_x,\n kernel_stride_x): # todo: add kernel_padding_x and kernel_stride_x fix to glsl\n output_select = [y, x, 0]\n input_select = np.asarray(\n [y * kernel_stride_y, x * kernel_stride_x, 0]\n )\n for i in range(-np.int(np.floor(kernel_width / 2.0)), np.int(np.ceil(kernel_width / 2.0))):\n for j in range(-np.int(np.floor(kernel_height / 2.0)), np.int(np.ceil(kernel_height / 2.0))):\n in_pixel_select = np.copy(input_select)\n in_pixel_select += [j, i, 0]\n for co in range(colors_out):\n output_select[2] = co\n for ci in range(colors_in):\n in_pixel_select[2] = ci\n kernel_select = np.asarray([j_f + j, i_f + i, ci, co])\n\n out_pixels[tuple(output_select)] += kernel[tuple(kernel_select)] * inp_image[\n tuple(in_pixel_select)]\n return out_pixels", "def flip_augmentation():\n return lambda image: ImageOps.flip(image)", "def conv(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n # For this assignment, we will use edge values to pad the images.\n # Zero padding as used in the previous assignment can make\n # derivatives at the image boundary very big.\n \n pad_width0 = Hk // 2\n pad_width1 = Wk // 2\n pad_width = ((pad_width0,pad_width0),(pad_width1,pad_width1))\n padded = np.pad(image, pad_width, mode='edge') \n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n kernel = np.flipud(np.fliplr(kernel)) # flip h/v\n for h in range(Hi):\n for w in range(Wi):\n out[h, w] = np.sum(np.multiply(kernel, padded[h : h + Hk, w : w + Wk]))\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def partial_x(img):\n\n out = None\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n kernel = np.array([[0.5, 0, -0.5]])\n out = conv(img, kernel)\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def inverse(im): \t \n x,y = np.shape(im)\n img = np.zeros([x,y])\n\t\n for i in range(x):\n for j in range(y):\n img[i,j] = 255 - im[i,j]\n return img", "def convolve(img, fourier_kernel):\n return np.fft.ifftshift(np.fft.irfft2(np.fft.rfft2(img) * fourier_kernel))", "def askapsoft_decimate_n_extract(af, over_sampling, kernel_support):\n\n # why is this normalization required..?\n rescale = over_sampling*over_sampling\n #rescale = 1\n\n cSize = 2 * kernel_support + 1\n itsConvFunc=np.zeros((over_sampling, over_sampling, cSize, cSize), dtype=complex)\n\n for fracu in range(0,over_sampling):\n for fracv in range(0,over_sampling):\n\n # Now cut out the inner part of the convolution function and\n # insert it into the convolution function\n for iy in range(-kernel_support,kernel_support+1):\n for ix in range(-kernel_support,kernel_support+1):\n\n nx = af.shape[0]\n ny = af.shape[1]\n\n # assumes support is the same for all w-planes:\n xval = (ix) * over_sampling + fracu + nx / 2\n yval = (iy) * over_sampling + fracv + ny / 2\n\n itsConvFunc[fracu, fracv, ix+cSize/2, iy+cSize/2] \\\n = rescale * af[xval, yval]\n\n return itsConvFunc[::-1,::-1]", "def _conv2d_block(in_f, out_f, *args, **kwargs):\n return nn.Sequential(\n nn.Conv2d(in_f, out_f, *args, **kwargs),\n nn.BatchNorm2d(out_f),\n nn.ReLU(),\n nn.Dropout2d(p=0.2)\n )", "def test_conv_inverts_deconv(transformer_factory, input_size, filter_size, padding, stride):\n\n # convolutions whose output size are not an even multiple of stride cannot be exactly inverted\n a = (input_size + sum(padding) - filter_size) % stride\n conv_output = utils.conv_output_dim(input_size, filter_size, padding, stride)\n deconv_output = utils.deconv_output_dim(conv_output, filter_size, padding, stride)\n\n assert deconv_output == (input_size - a), (\"Convolution and Deconvolution do not invert:\\n\"\n \"output ({}) != input ({}) - a ({})\\n\"\n \"filter: {}, padding: {}, stride: {}\"\n ).format(deconv_output, input_size, a,\n filter_size, padding, stride)", "def get_fc_inv(fc):\n return scipy.linalg.pinvh(fc.T @ fc) @ fc.T", "def MyConvolve(img, ff):\n result = np.zeros(img.shape)\n x_len = img.shape[0]\n y_len = img.shape[1]\n\n ff = np.flipud(np.fliplr(ff)) # Flip filters\n\n # Apply filter to pixels\n for x in range(1, x_len - 1):\n for y in range(1, y_len - 1):\n # Left column\n top_left = img[x - 1, y - 1] * ff[0, 0]\n left = img[x, y - 1] * ff[1, 0]\n btm_left = img[x + 1, y - 1] * ff[2, 0]\n # Middle column\n top = img[x - 1, y] * ff[0, 1]\n middle = img[x, y] * ff[1, 1]\n btm = img[x + 1, y] * ff[2, 1]\n # Right column\n top_right = img[x - 1, y + 1] * ff[0, 2]\n right = img[x, y + 1] * ff[1, 2]\n btm_right = img[x + 1, y + 1] * ff[2, 2]\n\n result[x, y] = top_left + left + btm_left + top + middle + btm + top_right + right + btm_right\n\n return result", "def conv(image, kernel):\n Hi, Wi = image.shape\n Hk, Wk = kernel.shape\n out = np.zeros((Hi, Wi))\n\n # For this assignment, we will use edge values to pad the images.\n # Zero padding will make derivatives at the image boundary very big,\n # whereas we want to ignore the edges at the boundary.\n pad_width0 = Hk // 2\n pad_width1 = Wk // 2\n pad_width = ((pad_width0,pad_width0),(pad_width1,pad_width1))\n padded = np.pad(image, pad_width, mode='edge')\n\n ### YOUR CODE HERE\n for i in range(Hi):\n for j in range(Wi):\n out[i,j] = np.sum(padded[i : i + Hk, j : j + Wk] * np.flip(kernel))\n ### END YOUR CODE\n\n return out", "def deconv2d(layer_input, skip_input, filters, f_size=3, dropout_rate=0, padding='same', strides=2):\n u = UpSampling2D(size=2)(layer_input)\n u = Conv2D(filters, kernel_size=f_size, strides=strides, padding=padding, activation='relu')(u)\n if dropout_rate:\n u = Dropout(dropout_rate)(u)\n u = BatchNormalization()(u)\n u = UpSampling2D(size=2)(u)\n u = Concatenate()([u, skip_input])\n return u\n\n # Image input" ]
[ "0.62379956", "0.6189527", "0.60548985", "0.5914404", "0.58883965", "0.58311486", "0.58114636", "0.5808238", "0.5806143", "0.57928836", "0.5729075", "0.57058555", "0.5699199", "0.56814635", "0.56235063", "0.56076056", "0.55912095", "0.5589819", "0.55747116", "0.55583715", "0.55356115", "0.5529207", "0.55267155", "0.55122286", "0.55015445", "0.54880524", "0.54739755", "0.54404664", "0.54381335", "0.5434118" ]
0.62493515
0
Predict the visibility from a Skycomponent, add to existing visibility, for Visibility or BlockVisibility
def predict_skycomponent_visibility(vis: Union[Visibility, BlockVisibility], sc: Union[Skycomponent, List[Skycomponent]]) -> Union[Visibility, BlockVisibility]: if sc is None: return vis if not isinstance(sc, collections.Iterable): sc = [sc] if isinstance(vis, Visibility): _, im_nchan = list(get_frequency_map(vis, None)) for comp in sc: assert isinstance(comp, Skycomponent), comp assert_same_chan_pol(vis, comp) l, m, n = skycoord_to_lmn(comp.direction, vis.phasecentre) phasor = simulate_point(vis.uvw, l, m) comp_flux = comp.flux[im_nchan, :] vis.data['vis'][...] += comp_flux[:,:] * phasor[:, numpy.newaxis] elif isinstance(vis, BlockVisibility): ntimes, nant, _, nchan, npol = vis.vis.shape k = numpy.array(vis.frequency) / constants.c.to('m s^-1').value for comp in sc: # assert isinstance(comp, Skycomponent), comp assert_same_chan_pol(vis, comp) flux = comp.flux if comp.polarisation_frame != vis.polarisation_frame: flux = convert_pol_frame(flux, comp.polarisation_frame, vis.polarisation_frame) l, m, n = skycoord_to_lmn(comp.direction, vis.phasecentre) uvw = vis.uvw[..., numpy.newaxis] * k phasor = numpy.ones([ntimes, nant, nant, nchan, npol], dtype='complex') for chan in range(nchan): phasor[:, :, :, chan, :] = simulate_point(uvw[..., chan], l, m)[..., numpy.newaxis] vis.data['vis'][..., :, :] += flux[:, :] * phasor[..., :] return vis
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def UpdateVisibility(self):\r\n # Clear the map\r\n self.ClearVisibilityMap()\r\n \r\n # Only update it if we have a player\r\n if not self.game.player:\r\n return\r\n \r\n max_vis_day = self.data.get('max_visibility', self.game.data['map']['max_visibility'])\r\n max_vis_night = self.data.get('max_visibility_night', self.game.data['map']['max_visibility_night'])\r\n \r\n #TODO(g): Add day/night cycle\r\n max_vis = max_vis_day\r\n \r\n # Cast rays from the player. Step out from the player and find the\r\n # angle to the player to determine if visible.\r\n center = self.game.player.pos.ToList()\r\n \r\n # Check every tile\r\n for y in range(center[1] - max_vis, center[1] + max_vis):\r\n for x in range(center[0] - max_vis, center[0] + max_vis):\r\n dist = rpg_base.GetDistance(center, [x, y])\r\n # Only really test tiles that are within viewing range\r\n if dist <= max_vis:\r\n #Log('%s -> %s = %s' % (center, [x, y], dist))\r\n if self.game.map.HasLineOfSightToPlayer(x, y):\r\n self.SetVisibility(x, y)", "def test_visibility(self, data, visible):\n layer = Points(data)\n assert layer.visible is True\n\n layer = Points(data, visible=visible)\n assert layer.visible is visible\n\n layer.visible = not visible\n assert layer.visible is not visible", "def _switch_layer_visibility(self, change: Dict):\n try:\n self.logger.debug(\"Checkbox callback called.\")\n if change[\"name\"] == \"value\":\n owner = change[\"owner\"]\n self.viewer.set_layer_visibility(\n owner.layer_type, owner.layer_name, owner.layer_subtype, change.new\n )\n\n self.viewer.layer_update()\n except: # pylint: disable=bare-except\n self.logger.exception(\"Exception in view checkbox callback on click.\")", "def predict_2d(vis: Union[BlockVisibility, Visibility], model: Image, gcfcf=None,\n **kwargs) -> Union[BlockVisibility, Visibility]:\n \n if model is None:\n return vis\n \n assert isinstance(vis, Visibility), vis\n\n _, _, ny, nx = model.data.shape\n \n if gcfcf is None:\n gcf, cf = create_pswf_convolutionfunction(model,\n support=get_parameter(kwargs, \"support\", 6),\n oversampling=get_parameter(kwargs, \"oversampling\", 128))\n else:\n gcf, cf = gcfcf\n \n griddata = create_griddata_from_image(model)\n griddata = fft_image_to_griddata(model, griddata, gcf)\n vis = degrid_visibility_from_griddata(vis, griddata=griddata, cf=cf)\n \n # Now we can shift the visibility from the image frame to the original visibility frame\n svis = shift_vis_to_image(vis, model, tangent=True, inverse=True)\n \n return svis", "def update_visible(self, immediate=False):\n raise NotImplementedError", "def setSurfaceVisibility(visible='both'):\n vdict = {'both':'BOTH','top':'TOP','bottom':'BOTTOM'}\n dislin.survis(vdict[visible])", "def check_visibility(self):\r\n\r\n for gs in self.ground_stations:\r\n if self.visible ^ (elevation_dot_product(self.r_ecef,self.ground_stations[gs][1],self.earth) > 0.0):\r\n self.visible ^= 1\r\n self.gs_id = self.ground_stations[gs][0]\r\n return True", "def Visibility(self, *xpars):\n return self.combineResult(*xpars)", "async def test_visibility_changes() -> None:\n\n class VisibleTester(App[None]):\n \"\"\"An app for testing visibility changes.\"\"\"\n\n CSS = \"\"\"\n Widget {\n height: 1fr;\n }\n .hidden {\n visibility: hidden;\n }\n \"\"\"\n\n def compose(self) -> ComposeResult:\n yield VerticalScroll(\n Widget(id=\"keep\"), Widget(id=\"hide-via-code\"), Widget(id=\"hide-via-css\")\n )\n\n async with VisibleTester().run_test() as pilot:\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is True\n assert pilot.app.query_one(\"#hide-via-css\").visible is True\n\n pilot.app.query_one(\"#hide-via-code\").styles.visibility = \"hidden\"\n await pilot.pause(0)\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is False\n assert pilot.app.query_one(\"#hide-via-css\").visible is True\n\n pilot.app.query_one(\"#hide-via-css\").set_class(True, \"hidden\")\n await pilot.pause(0)\n assert pilot.app.query_one(\"#keep\").visible is True\n assert pilot.app.query_one(\"#hide-via-code\").visible is False\n assert pilot.app.query_one(\"#hide-via-css\").visible is False", "def SetVisibility(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_SetVisibility(self, *args)", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def setVisibleCondition(*args):", "def set_layer_visibility(self, layer_visibility):\n # type: (Map, str) -> None\n\n if not layer_visibility:\n raise ValueError(\"Layer visibility cannot be None.\")\n\n try:\n (layers_op, layers_list) = [s.strip() for s in layer_visibility.split(\":\")]\n layers_list = [id.strip() for id in layers_list.split(\",\")]\n except:\n raise ValueError(\"Could not parse layers list to determine layer visiblity: %s\", layer_visibility)\n\n for layer in self.listLayers():\n layer = layer #type: Layer\n layer_id = str(layer.getDefinition(get_cim_version()).serviceLayerID)\n\n if caseless_equal(layers_op, \"show\"):\n # make only the specified layers visible\n if layer_id in layers_list:\n layer.visible = True\n else:\n layer.visible = False\n elif caseless_equal(layers_op, \"hide\"):\n # make all layers visible except those specified\n if layer_id in layers_list:\n layer.visible = False\n else:\n layer.visible = True\n elif caseless_equal(layers_op, \"include\"):\n # make the specified layers visible, along with the defaults\n if layer_id in layers_list:\n layer.visible = True\n elif caseless_equal(layers_op, \"exclude\"):\n # make the specified layers invisible, along with the defaults\n if layer_id in layers_list:\n layer.visible = False\n else:\n raise ValueError(\"Layer visibility operation not recognized.\")", "def on_external_visibility(self):\n state = self.external_visibility_btn.isChecked()\n self._set_filter_value('vizExternalState', state)\n if state:\n self.externalVizToggled.emit()", "def switchVisibility(Proxy):\n ProxyRep = smp.GetRepresentation(Proxy)\n ProxyRep.Visibility = not ProxyRep.Visibility", "def visibility(self, visibility):\n\n self._visibility = visibility", "def visibility(self, visibility):\n\n self._visibility = visibility", "def visibility(self, visibility):\n\n self._visibility = visibility", "def test_visible_blacklisted(self):\n\n self.feature_test.set_percentage(100)\n self.feature_test.add_to_blacklist(3)\n self.assertFalse(self.feature_test.is_visible(3))", "def calculate_visibility(qv,qc,qr,qi,qs,T,p):\n Rd = 287.\n COEFLC = 144.7\n COEFLP = 2.24\n COEFFC = 327.8\n COEFFP = 10.36\n EXPLC = 0.88\n EXPLP = 0.75\n EXPFC = 1.\n EXPFP = 0.7776\n\n Tv = T * (1+0.61*qv) # Virtual temperature\n\n rhoa = p/(Rd*Tv) # Air density [kg m^-3]\n rhow = 1e3 # Water density [kg m^-3]\n rhoi = 0.917e3 # Ice density [kg m^-3]\n\n vovmd = (1+qv)/rhoa + (qc+qr)/rhow + (qi+qs)/rhoi\n\n conc_lc = 1e3*qc/vovmd\n conc_lp = 1e3*qr/vovmd\n conc_fc = 1e3*qi/vovmd\n conc_fp = 1e3*qs/vovmd\n\n # Make sure all concentrations are positive\n conc_lc[conc_lc < 0] = 0\n conc_lp[conc_lp < 0] = 0\n conc_fc[conc_fc < 0] = 0\n conc_fp[conc_fp < 0] = 0\n\n betav = COEFFC*conc_fc**EXPFC\\\n + COEFFP*conc_fp**EXPFP\\\n + COEFLC*conc_lc**EXPLC\\\n + COEFLP*conc_lp**EXPLP+1E-10\n\n vis = -np.log(0.02)/betav # Visibility [km]\n vis[vis > 24.135] = 24.135\n\n return vis", "def test_visible_white_and_blacklisted(self):\n\n self.feature_test.set_percentage(0)\n self.feature_test.add_to_whitelist(3)\n self.feature_test.add_to_blacklist(3)\n self.assertTrue(self.feature_test.is_visible(3))" ]
[ "0.63595897", "0.6236446", "0.5877378", "0.5763672", "0.56774557", "0.5585451", "0.55744827", "0.5517068", "0.55012023", "0.545785", "0.54522437", "0.54522437", "0.54522437", "0.54522437", "0.54522437", "0.54522437", "0.54522437", "0.54522437", "0.54522437", "0.54522437", "0.54522437", "0.542736", "0.5396875", "0.5351675", "0.5335071", "0.5335071", "0.5335071", "0.5321659", "0.5271404", "0.5262097" ]
0.68695766
0
Make an empty image from params and Visibility This makes an empty, template image consistent with the visibility, allowing optional overriding of select parameters. This is a convenience function and does not transform the visibilities.
def create_image_from_visibility(vis, **kwargs) -> Image: assert isinstance(vis, Visibility) or isinstance(vis, BlockVisibility), \ "vis is not a Visibility or a BlockVisibility: %r" % (vis) log.debug("create_image_from_visibility: Parsing parameters to get definition of WCS") imagecentre = get_parameter(kwargs, "imagecentre", vis.phasecentre) phasecentre = get_parameter(kwargs, "phasecentre", vis.phasecentre) # Spectral processing options ufrequency = numpy.unique(vis.frequency) vnchan = len(ufrequency) frequency = get_parameter(kwargs, "frequency", vis.frequency) inchan = get_parameter(kwargs, "nchan", vnchan) reffrequency = frequency[0] * units.Hz channel_bandwidth = get_parameter(kwargs, "channel_bandwidth", 0.99999999999 * vis.channel_bandwidth[0]) * units.Hz if (inchan == vnchan) and vnchan > 1: log.debug( "create_image_from_visibility: Defining %d channel Image at %s, starting frequency %s, and bandwidth %s" % (inchan, imagecentre, reffrequency, channel_bandwidth)) elif (inchan == 1) and vnchan > 1: assert numpy.abs(channel_bandwidth.value) > 0.0, "Channel width must be non-zero for mfs mode" log.debug("create_image_from_visibility: Defining single channel MFS Image at %s, starting frequency %s, " "and bandwidth %s" % (imagecentre, reffrequency, channel_bandwidth)) elif inchan > 1 and vnchan > 1: assert numpy.abs(channel_bandwidth.value) > 0.0, "Channel width must be non-zero for mfs mode" log.debug("create_image_from_visibility: Defining multi-channel MFS Image at %s, starting frequency %s, " "and bandwidth %s" % (imagecentre, reffrequency, channel_bandwidth)) elif (inchan == 1) and (vnchan == 1): assert numpy.abs(channel_bandwidth.value) > 0.0, "Channel width must be non-zero for mfs mode" log.debug("create_image_from_visibility: Defining single channel Image at %s, starting frequency %s, " "and bandwidth %s" % (imagecentre, reffrequency, channel_bandwidth)) else: raise ValueError("create_image_from_visibility: unknown spectral mode ") # Image sampling options npixel = get_parameter(kwargs, "npixel", 512) uvmax = numpy.max((numpy.abs(vis.data['uvw'][:, 0:1]))) if isinstance(vis, BlockVisibility): uvmax *= numpy.max(frequency) / constants.c.to('m s^-1').value log.debug("create_image_from_visibility: uvmax = %f wavelengths" % uvmax) criticalcellsize = 1.0 / (uvmax * 2.0) log.debug("create_image_from_visibility: Critical cellsize = %f radians, %f degrees" % ( criticalcellsize, criticalcellsize * 180.0 / numpy.pi)) cellsize = get_parameter(kwargs, "cellsize", 0.5 * criticalcellsize) log.debug("create_image_from_visibility: Cellsize = %g radians, %g degrees" % (cellsize, cellsize * 180.0 / numpy.pi)) override_cellsize = get_parameter(kwargs, "override_cellsize", True) if override_cellsize and cellsize > criticalcellsize: log.debug("create_image_from_visibility: Resetting cellsize %g radians to criticalcellsize %g radians" % ( cellsize, criticalcellsize)) cellsize = criticalcellsize pol_frame = get_parameter(kwargs, "polarisation_frame", PolarisationFrame("stokesI")) inpol = pol_frame.npol # Now we can define the WCS, which is a convenient place to hold the info above # Beware of python indexing order! wcs and the array have opposite ordering shape = [inchan, inpol, npixel, npixel] log.debug("create_image_from_visibility: image shape is %s" % str(shape)) w = wcs.WCS(naxis=4) # The negation in the longitude is needed by definition of RA, DEC w.wcs.cdelt = [-cellsize * 180.0 / numpy.pi, cellsize * 180.0 / numpy.pi, 1.0, channel_bandwidth.to(units.Hz).value] # The numpy definition of the phase centre of an FFT is n // 2 (0 - rel) so that's what we use for # the reference pixel. We have to use 0 rel everywhere. w.wcs.crpix = [npixel // 2 + 1, npixel // 2 + 1, 1.0, 1.0] w.wcs.ctype = ["RA---SIN", "DEC--SIN", 'STOKES', 'FREQ'] w.wcs.crval = [phasecentre.ra.deg, phasecentre.dec.deg, 1.0, reffrequency.to(units.Hz).value] w.naxis = 4 # TODO: Why is this check being done? # direction_centre = pixel_to_skycoord(npixel // 2 + 1, npixel // 2 + 1, wcs=w, origin=1) # assert direction_centre.separation(imagecentre).value < 1e-7, \ # "Image phase centre [npixel//2, npixel//2] should be %s, actually is %s" % \ # (str(imagecentre), str(direction_centre)) w.wcs.radesys = get_parameter(kwargs, 'frame', 'ICRS') w.wcs.equinox = get_parameter(kwargs, 'equinox', 2000.0) return create_image_from_array(numpy.zeros(shape), wcs=w, polarisation_frame=pol_frame)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _blankimage():\n img = TK.PhotoImage(width=1, height=1)\n img.blank()\n return img", "def blank(width, height, channels=3, value=0):\n blank_image = np.full((height, width, channels), value, np.uint8)\n return Image(img=blank_image)", "def _read_empty(self):\n self.image_missing = True\n\n return_img = {}\n return_metadata = {}\n\n try:\n rows, cols = self.grid.subset_shape\n except AttributeError:\n rows, cols = self.grid.shape\n\n for param in self.parameters:\n data = np.full((rows, cols), np.nan)\n return_img[param] = data.flatten()\n return_metadata[param] = {'image_missing': 1}\n\n return return_img, return_metadata", "def create_empty_image(width=512, height=512):\n blank_img = np.zeros((width, height, 3), np.uint8)\n # Return instance of the class\n return ExtendedImage(blank_img)", "def EmptyBitmap(*args, **kwargs):\n val = _gdi_.new_EmptyBitmap(*args, **kwargs)\n return val", "def build_filler_images(self):", "def get_blank_img(self):\n if photos_settings.DEBUG:\n return self.get_placeholder_img()\n\n out = {\n 'blank': True,\n 'width': self.max_width,\n 'height': self.max_height,\n 'url': photos_settings.EMPTY_IMAGE_SITE_PREFIX + 'img/empty/%s.png' % (self.name),\n }\n return out", "def gen_empty_img(w=640, h=480):\n return np.zeros((h, w, 3), np.uint8)", "def blank_image(height, width):\n all_green = create_uniform_image(height, width, [0, 255, 0])\n return all_green", "def _EmptyBitmapRGBA(*args, **kwargs):\n return _gdi_._EmptyBitmapRGBA(*args, **kwargs)", "def create_base_image(self, builder, template, parameters):", "def NullImageProto(msg:str = \"\"):\n return NLImage(width=0, height=0, data=msg)", "def stop_image_builder(Name=None):\n pass", "def get_background(choice: str, origin, width, height) -> Image.Image:\n if choice == 'transparent':\n # 4-channel\n return Image.fromarray(np.uint8(np.zeros((height, width, 4))))\n elif choice == 'black':\n return Image.fromarray(np.uint8(np.zeros((height, width, 3))))\n elif choice == 'white':\n return Image.fromarray(np.uint8(np.ones((height, width, 3)) * 255))\n elif choice == 'mean':\n mean = np.mean(np.array(origin)[:])\n return Image.fromarray(np.uint8(np.ones((height, width, 3)) * mean))\n elif choice.startswith('origin'):\n opacity = float(choice[-1]) / 10\n canvas = origin.resize((width, height), Image.BICUBIC).filter(\n ImageFilter.GaussianBlur(25)\n )\n canvas = np.array(canvas)\n canvas = np.uint8(canvas[:, :, 0:3] * opacity)\n return Image.fromarray(canvas)", "def create_blank(width, height, rgb_color=(0, 0, 0)):\r\n # Create black blank image\r\n image = np.zeros((height, width, 3), np.uint8)\r\n\r\n # Since OpenCV uses BGR, convert the color first\r\n color = tuple(reversed(rgb_color))\r\n # Fill image with color\r\n image[:] = color\r\n\r\n return image", "def set_default_parameters(self):\n super().set_default_parameters()\n if not \"n_sub_images\" in vars(self):\n self.n_sub_images = -1 # do all-sub-images", "def create_blank(width, height, rgb_color=(0, 0, 0)):\n # Create black blank image\n image = np.zeros((height, width, 3), np.uint8)\n\n # Since OpenCV uses BGR, convert the color first\n color = tuple(reversed(rgb_color))\n # Fill image with color\n image[:] = color\n\n return image", "def create_blank(width, height, rgb_color=(0, 0, 0)):\n # Create black blank image\n image = np.zeros((height, width, 3), np.uint8)\n\n # Since OpenCV uses BGR, convert the color first\n color = tuple(reversed(rgb_color))\n # Fill image with color\n image[:] = color\n\n return image", "def delete_image_builder(Name=None):\n pass", "def _clear(self):\n\n self.image = Image.new(\"RGB\", (self._width, self._height), self._color)", "def create_white_picture(pic_width, pic_height):\n white_picture = Image.new(\"1\", (pic_width, pic_height), (1))\n return white_picture", "def clear(self):\n self.display(Image.new(self.mode, self.size))", "def create_full_pic(self):\n self.create_half_pic()\n mirror_update(self.flag)", "def create_blank(w, h, rgb_color=(0, 0, 0)):\n image = np.zeros((h, w), np.uint8)\n color = tuple(reversed(rgb_color))\n image[:] = 0\n return image", "def create_blank(w, h, rgb_color=(0, 0, 0)):\n image = np.zeros((h, w), np.uint8)\n color = tuple(reversed(rgb_color))\n image[:] = 0\n return image", "def make_image(vis_mxds, img_xds, grid_parms, vis_sel_parms, img_sel_parms):\n print('######################### Start make_image #########################')\n import numpy as np\n from numba import jit\n import time\n import math\n import dask.array.fft as dafft\n import xarray as xr\n import dask.array as da\n import matplotlib.pylab as plt\n import dask\n import copy, os\n from numcodecs import Blosc\n from itertools import cycle\n \n from cngi._utils._check_parms import _check_sel_parms, _check_existence_sel_parms\n from ._imaging_utils._check_imaging_parms import _check_grid_parms\n from ._imaging_utils._gridding_convolutional_kernels import _create_prolate_spheroidal_kernel, _create_prolate_spheroidal_kernel_1D\n from ._imaging_utils._standard_grid import _graph_standard_grid\n from ._imaging_utils._remove_padding import _remove_padding\n from ._imaging_utils._aperture_grid import _graph_aperture_grid\n from cngi.image import make_empty_sky_image\n \n #print('****',sel_parms,'****')\n _mxds = vis_mxds.copy(deep=True)\n _img_xds = img_xds.copy(deep=True)\n _vis_sel_parms = copy.deepcopy(vis_sel_parms)\n _img_sel_parms = copy.deepcopy(img_sel_parms)\n _grid_parms = copy.deepcopy(grid_parms)\n\n ##############Parameter Checking and Set Defaults##############\n assert(_check_grid_parms(_grid_parms)), \"######### ERROR: grid_parms checking failed\"\n assert('xds' in _vis_sel_parms), \"######### ERROR: xds must be specified in sel_parms\" #Can't have a default since xds names are not fixed.\n _vis_xds = _mxds.attrs[_vis_sel_parms['xds']]\n \n #Check vis data_group\n _check_sel_parms(_vis_xds,_vis_sel_parms)\n \n #Check img data_group\n _check_sel_parms(_img_xds,_img_sel_parms,new_or_modified_data_variables={'sum_weight':'SUM_WEIGHT','image':'IMAGE'},append_to_in_id=True)\n\n ##################################################################################\n \n # Creating gridding kernel\n _grid_parms['oversampling'] = 100\n _grid_parms['support'] = 7\n \n cgk, correcting_cgk_image = _create_prolate_spheroidal_kernel(_grid_parms['oversampling'], _grid_parms['support'], _grid_parms['image_size_padded'])\n cgk_1D = _create_prolate_spheroidal_kernel_1D(_grid_parms['oversampling'], _grid_parms['support'])\n \n _grid_parms['complex_grid'] = True\n _grid_parms['do_psf'] = False\n _grid_parms['do_imaging_weight'] = False\n grids_and_sum_weights = _graph_standard_grid(_vis_xds, cgk_1D, _grid_parms, _vis_sel_parms)\n uncorrected_dirty_image = dafft.fftshift(dafft.ifft2(dafft.ifftshift(grids_and_sum_weights[0], axes=(0, 1)), axes=(0, 1)), axes=(0, 1))\n \n #Remove Padding\n correcting_cgk_image = _remove_padding(correcting_cgk_image,_grid_parms['image_size'])\n uncorrected_dirty_image = _remove_padding(uncorrected_dirty_image,_grid_parms['image_size']).real * (_grid_parms['image_size_padded'][0] * _grid_parms['image_size_padded'][1])\n \n #############Normalize#############\n def correct_image(uncorrected_dirty_image, sum_weights, correcting_cgk):\n sum_weights_copy = copy.deepcopy(sum_weights) ##Don't mutate inputs, therefore do deep copy (https://docs.dask.org/en/latest/delayed-best-practices.html).\n sum_weights_copy[sum_weights_copy == 0] = 1\n # corrected_image = (uncorrected_dirty_image/sum_weights[:,:,None,None])/correcting_cgk[None,None,:,:]\n corrected_image = (uncorrected_dirty_image / sum_weights_copy) / correcting_cgk\n return corrected_image\n\n corrected_dirty_image = da.map_blocks(correct_image, uncorrected_dirty_image, grids_and_sum_weights[1][None, None, :, :],correcting_cgk_image[:, :, None, None])\n ####################################################\n\n if _grid_parms['chan_mode'] == 'continuum':\n freq_coords = [da.mean(_vis_xds.coords['chan'].values)]\n chan_width = da.from_array([da.mean(_vis_xds['chan_width'].data)],chunks=(1,))\n imag_chan_chunk_size = 1\n elif _grid_parms['chan_mode'] == 'cube':\n freq_coords = _vis_xds.coords['chan'].values\n chan_width = _vis_xds['chan_width'].data\n imag_chan_chunk_size = _vis_xds.DATA.chunks[2][0]\n \n phase_center = _grid_parms['phase_center']\n image_size = _grid_parms['image_size']\n cell_size = _grid_parms['cell_size']\n phase_center = _grid_parms['phase_center']\n\n pol_coords = _vis_xds.pol.data\n time_coords = [_vis_xds.time.mean().data]\n \n _img_xds = make_empty_sky_image(_img_xds,phase_center,image_size,cell_size,freq_coords,chan_width,pol_coords,time_coords)\n \n \n \n _img_xds[_img_sel_parms['data_group_out']['sum_weight']] = xr.DataArray(grids_and_sum_weights[1][None,:,:], dims=['time','chan','pol'])\n _img_xds[_img_sel_parms['data_group_out']['image']] = xr.DataArray(corrected_dirty_image[:,:,None,:,:], dims=['l', 'm', 'time', 'chan', 'pol'])\n _img_xds.attrs['data_groups'][0] = {**_img_xds.attrs['data_groups'][0],**{_img_sel_parms['data_group_out']['id']:_img_sel_parms['data_group_out']}}\n \n \n print('######################### Created graph for make_image #########################')\n return _img_xds", "def no_image(cls):\n def eval_fn(p: Posting):\n if p.img_url is None:\n return f\"I couldn't find any images for this posting.\"\n\n return cls(eval_fn)", "def no_bin(image, *args, **kwargs):\n return image", "def make_empty_img_from_img(img, dimensions=3):\n xlen, ylen, zlen = img.GetSize()\n dupe = img[:, :, :]\n for x in xrange(xlen):\n for y in xrange(ylen):\n if dimensions == 3:\n for z in xrange(zlen):\n dupe.SetPixel(x, y, z, 0)\n else:\n dupe.SetPixel(x, y, 0)\n return dupe", "def image_erosion(filter_size=3, repeat=1):\n def img_op(image: np.ndarray,**kwargs):\n structure_shape = [1] * image.ndim\n structure_shape[0]=filter_size\n structure_shape[1] = filter_size\n for i in range(repeat):\n image =ndimage.morphology.grey_erosion(image,size=(filter_size,filter_size),structure=np.ones(tuple(structure_shape)))\n return clip(image,0,255)\n\n return img_op" ]
[ "0.6082279", "0.58083063", "0.5590598", "0.5545989", "0.5522535", "0.549299", "0.54731494", "0.54632276", "0.538827", "0.5353823", "0.5334222", "0.52348214", "0.50866646", "0.5055959", "0.49559405", "0.49474832", "0.4938404", "0.4938404", "0.49174386", "0.4908951", "0.48748562", "0.48589942", "0.48497987", "0.4841284", "0.4841284", "0.48355567", "0.48348713", "0.4831051", "0.4819222", "0.4816615" ]
0.5933551
1
Methode qui permet de sauvegarder la Course
def save_course(self): print("Course sauvegardee") print(self.Course) print("self.var_nom : "+self.var_nom.get()) self.Course.name=self.var_nom.get() print("self.vqr_ete : "+str(self.var_ete.get())) if(self.var_ete.get()==1): self.Course.season = "Seulement ete" elif(self.var_hiver.get()==1): self.Course.season = "Seulement hiver" else: self.Course.season = "Toutes" if self.var_OK_invites.get() == 1: self.Course.OK_for_invitee = True if self.var_preparer_la_veille.get() == 1: self.Course.prepare_day1 = True if self.var_legume.get() == 1: self.Course.type_course = "Legume" elif self.var_viande.get() == 1: self.Course.type_course = "Viande" elif self.var_poisson.get() == 1: self.Course.type_course = "Poisson" elif self.var_puree.get() == 1: self.Course.type_course = "Puree" elif self.var_soupe.get() == 1: self.Course.type_course = "Soupe" elif self.var_salade.get() == 1: self.Course.type_course = "Salade" elif self.var_autre .get() == 1: self.Course.type_course = "Autres" else: self.Course.type_course = "Autres" self.Course.recipe = self.text_recipe.get("1.0",END) self.Course.link = self.text_link.get("1.0",END) print(self.Course) self.getListOfRecette() self.list_course.append(self.Course) self.saveListOfRecette() #on quitte la fenetreTopLevel self.parentFrame.destroy()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_courses(std):\n return std[2]", "def course_tester(courses):\n\n return False", "def items(self, course):\r\n pass", "def course(self):\n return self.section.course", "def addCourse(self):\n\t\tcourseName = input(\"What is the new course name? \")\n\t\tcourseGrade = eval(input(\"What grade point did you get? \"))\n\t\tself.courses.append(Course(courseName,courseGrade))\n\t\tself.gpa = self.calculateGPA()", "def refresh_course(self):\r\n self.course = modulestore().get_course(self.course.id)", "def build_course(self):\n courses = []\n aprovacao_d = {}\n # semestral\n for rate_it in self.__semestral_rate:\n # pega uma lista no qual o primeiro elemento é a taxa, o segundo\n # e o terceiro são quantidades\n rate_data = self.analysis[\"semestral_rate\"][rate_it.name]\n for i in rate_data[0].index:\n if i[0] not in aprovacao_d:\n aprovacao_d[i[0]] = {}\n\n periodo = str(i[1]) + \"/\" + str(i[2])\n aprovacao_d[i[0]][periodo] = [\n float(rate_data[0][i]),\n int(rate_data[1][i]),\n int(rate_data[2][i]),]\n\n note = self.analysis[\"general_note_statistic\"]\n note_last_year = self.analysis[\"last_year_statistic\"]\n for course in self.analysis[\"courses\"].index:\n course_dict = {}\n course_dict[\"disciplina_codigo\"] = course\n course_dict[\"disciplina_nome\"] = self.analysis[\"courses\"][course]\n \n # If the course code is related to more than one name,\n # concatenate these names into an unique string\n if type(course_dict[\"disciplina_nome\"]) != str:\n new_course_name = \" | \".join(list(course_dict[\"disciplina_nome\"]))\n course_dict[\"disciplina_nome\"] = new_course_name\n # quantidade de matriculas\n count = self.analysis[\"general_count_submission\"][course]\n course_dict[\"qtd_alunos\"] = count\n # notas\n course_dict[\"qtd_cursada_aprov\"] = self.analysis[\"coursed_ratio\"][course]\n course_dict[\"nota\"] = [note[0][course], note[1][course]]\n course_dict[\"nota_ultimo_ano\"] = [\n note_last_year[0][course],\n note_last_year[1][course]\n ]\n # taxas\n for rate_it in self.__rates:\n rate_data = self.analysis[\"general_rates\"][rate_it.name]\n course_dict[rate_it.name] = float(rate_data[0][course])\n course_str = rate_it.name.replace(\"taxa\", \"qtd\")\n # count_sel define qual quantidade vai para o json, a especifica\n # ou geral\n course_dict[course_str] = int(\n rate_data[rate_it.count_sel][course])\n # rate_calc = self.analysis[\"general_rates\"][rate_it.name][0]\n\n # taxas do ultimo anos\n course_dict[\"taxa_reprovacao_ultimo_absoluto\"] = self.analysis[\"last_year_taxa_reprovacao_absoluta\"][course]\n course_dict[\"taxa_reprovacao_ultimo_frequencia\"] = self.analysis[\"last_year_taxa_reprovacao_frequencia\"][course]\n\n course_dict[\"grafico_qtd_cursada_aprov\"] = self.analysis[\"coursed_count\"][course]\n course_dict[\"aprovacao_semestral\"] = aprovacao_d[course]\n courses.append(course_dict)\n return courses", "def _get_courses(self) -> None:\n\n courses_content: NavigableString = self.soup.find(\"div\", \n {\"class\": \"coursesContent\"})\n course_items: ResultSet = courses_content.find_all(\"div\", \n {\"class\": \"courseItem\"})\n\n for item in course_items:\n course_name: str = item.a[\"href\"].split(\"/\")[-2].lower()\n course_data: ParseType = self._parse(item)\n self._update(course_name, course_data)", "def test_course_filter(self):\n user = self.make_user()\n enrollment = EnrollmentFactory(grade_level__school_year__school=user.school)\n student = enrollment.student\n course_1 = CourseFactory(grade_levels=[enrollment.grade_level])\n course_2 = CourseFactory(grade_levels=[enrollment.grade_level])\n GradeFactory(student=student, graded_work__course_task__course=course_1)\n GradeFactory(student=student, graded_work__course_task__course=course_2)\n url = self.reverse(\"reports:progress\", pk=enrollment.id)\n url += f\"?course={course_1.id}\"\n\n with self.login(user):\n self.get_check_200(url)\n\n assert len(self.get_context(\"courses\")) == 1", "def save_course(self):\r\n self.course.save()\r\n self.store.update_item(self.course, self.user.id)", "def test_course_overview_view_with_course(self):\r\n course = CourseFactory.create(org='MITx', course='999', display_name='Robot Super Course')\r\n resp = self._show_course_overview(course.id)\r\n self.assertContains(\r\n resp,\r\n '<article class=\"courseware-overview\" data-locator=\"location:MITx+999+Robot_Super_Course+course+Robot_Super_Course\" data-course-key=\"slashes:MITx+999+Robot_Super_Course\">',\r\n status_code=200,\r\n html=True\r\n )", "def completed_course()->str:\r\n try:\r\n db: sqlite3.Connection = sqlite3.connect(DB_FILE)\r\n query: str = \"SELECT s.Name, s.CWID, g.Course, g.Grade, i.Name AS 'Instructor' \" \\\r\n \"FROM grades2 g JOIN students2 s ON g.StudentCWID = s.CWID \" \\\r\n \"JOIN instructors2 i ON g.InstructorCWID = i.CWID ORDER BY s.Name\" \r\n except sqlite3.OperationalError as e:\r\n print(e)\r\n\r\n data: Dict[str,str]=\\\r\n [{\"name\": name,\"cwid\": cwid,\"course\": course , \"grade\":grade, \"instructor\": instructor}\r\n for name, cwid, course, grade, instructor in db.execute(query)]\r\n db.close()\r\n \"\"\"Render the template from templates folder\"\"\"\r\n return render_template(\"student_summary.html\",\r\n title=\"Stevens Repository\",\r\n table_title=\"Student, Course, Grade and Instructor\",\r\n students=data )", "def edit_course(self, course):\n EDIT_COURSE = \"\"\"UPDATE Course SET subject_code = %s, credit_hours = %s, description = %s WHERE name = %s\"\"\"\n\n self.db_cursor.execute(EDIT_COURSE, (\n course.subject_code, course.credit_hours, course.description, course.name))\n self.db_connection.commit()\n\n DELETE_COURSE_TOPICS = \"\"\"DELETE FROM CourseTopics WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_TOPICS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_TOPICS = \"\"\"INSERT INTO CourseTopics (course_name, topic_id) VALUES (%s, %s)\"\"\"\n for ct in course.topics:\n self.db_cursor.execute(INSERT_COURSE_TOPICS, (course.name,ct))\n self.db_connection.commit()\n\n DELETE_COURSE_GOALS = \"\"\"DELETE FROM CourseGoals WHERE course_name = %s\"\"\"\n self.db_cursor.execute(DELETE_COURSE_GOALS, (course.name,))\n self.db_connection.commit()\n INSERT_COURSE_GOALS = \"\"\"INSERT INTO CourseGoals (course_name, goal_id) VALUES (%s, %s)\"\"\"\n for cg in course.goals:\n self.db_cursor.execute(INSERT_COURSE_GOALS, (course.name, cg))\n self.db_connection.commit()", "def testCourses(self):\n self.person.invokeFactory(type_name=\"FSDCourse\", id=\"test-course\")\n self.failUnless('test-course' in self.person.contentIds())\n self.failUnless('test-course' in [c.id for c in self.person.getCourses()])", "def reload_course(self):\r\n self.course = self.store.get_course(self.course.id)", "def test_has_course(self):\r\n check_has_course_method(\r\n XMLModuleStore(DATA_DIR, course_dirs=['toy', 'simple']),\r\n SlashSeparatedCourseKey('edX', 'toy', '2012_Fall'),\r\n locator_key_fields=SlashSeparatedCourseKey.KEY_FIELDS\r\n )", "def product(self):\n return self.course", "def get_courses(self):\n\n self.search([]).unlink()\n token = self.env['odoo.moodle'].search([('create_uid', '=', self.env.user.id)]).token\n domain = \"http://localhost:8888\"\n webservice_url = \"/webservice/rest/server.php?\"\n parameters = {\n \"wstoken\":token,\n 'wsfunction': 'core_course_get_courses',\n 'moodlewsrestformat': 'json'\n }\n request = requests.get(url=domain+webservice_url, params=parameters)\n request = request.json()\n print(request)\n\n for req in request:\n try:\n if req['id']==1:\n pass\n else:\n self.create({\n 'course_id': req['id'], \n 'category':req['categoryid'],\n 'fullname':req['fullname'], \n 'shortname':req['shortname'],\n 'summary': req['summary']\n }\n )\n except Exception:\n print('Course not created')", "def test_course_index_view_with_course(self):\r\n CourseFactory.create(display_name='Robot Super Educational Course')\r\n resp = self.client.get_html('/course/')\r\n self.assertContains(\r\n resp,\r\n '<h3 class=\"course-title\">Robot Super Educational Course</h3>',\r\n status_code=200,\r\n html=True\r\n )\r\n _test_no_locations(self, resp)", "def get_course(self, _):\r\n courses = self.modulestore.get_courses()\r\n return courses[0]", "def _create_course(self):\r\n super(TestOrphan, self)._create_course()\r\n\r\n self._create_item('chapter', 'Chapter1', {}, {'display_name': 'Chapter 1'}, 'course', 'runid')\r\n self._create_item('chapter', 'Chapter2', {}, {'display_name': 'Chapter 2'}, 'course', 'runid')\r\n self._create_item('chapter', 'OrphanChapter', {}, {'display_name': 'Orphan Chapter'}, None, None)\r\n self._create_item('vertical', 'Vert1', {}, {'display_name': 'Vertical 1'}, 'chapter', 'Chapter1')\r\n self._create_item('vertical', 'OrphanVert', {}, {'display_name': 'Orphan Vertical'}, None, None)\r\n self._create_item('html', 'Html1', \"<p>Goodbye</p>\", {'display_name': 'Parented Html'}, 'vertical', 'Vert1')\r\n self._create_item('html', 'OrphanHtml', \"<p>Hello</p>\", {'display_name': 'Orphan html'}, None, None)\r\n self._create_item('static_tab', 'staticuno', \"<p>tab</p>\", {'display_name': 'Tab uno'}, None, None)\r\n self._create_item('about', 'overview', \"<p>overview</p>\", {}, None, None)\r\n self._create_item('course_info', 'updates', \"<ol><li><h2>Sep 22</h2><p>test</p></li></ol>\", {}, None, None)", "def update_course(self):\n # ensure that updating course is exists\n if self.is_course_exists():\n db = Course._file.read_db()\n for crs_i in range(len(db[\"courses\"])):\n if db[\"courses\"][crs_i][\"course_name\"] == self._course_name:\n\n # ensuring that user does not provided less number of limited places\n if db[\"courses\"][crs_i][\"total_place\"] > self._total_place:\n print(\"{} course's limited places number must be more than {}\".format(\n self._course_name,\n db[\"courses\"][crs_i][\"total_place\"]\n ))\n return\n\n db[\"courses\"][crs_i][\"teacher\"] = self._teacher\n db[\"courses\"][crs_i][\"total_place\"] = self._total_place\n break\n self._file.write_db(db)\n print(\"The course - {} is updated\".format(self._course_name))\n return self.get_course().course_info()", "def download_course_given(self, course_url: str):\n self.course_url = course_url\n self.get_course_page()\n self.get_course_title()\n self.get_course_unit_titles()\n self.get_course_unit_slugs()\n self.get_course_unit_urls()\n\n print(\"\\nGenerating Path Slugs...\\n\")\n self.get_course_all_slugs()\n self.get_course_youtube_ids()\n self.download_course_videos()", "def course_about(request, course_id):\r\n\r\n if microsite.get_value(\r\n 'ENABLE_MKTG_SITE',\r\n settings.FEATURES.get('ENABLE_MKTG_SITE', False)\r\n ):\r\n raise Http404\r\n course_key = SlashSeparatedCourseKey.from_deprecated_string(course_id)\r\n course = get_course_with_access(request.user, 'see_exists', course_key)\r\n registered = registered_for_course(course, request.user)\r\n staff_access = has_access(request.user, 'staff', course)\r\n studio_url = get_studio_url(course_key, 'settings/details')\r\n\r\n if has_access(request.user, 'load', course):\r\n course_target = reverse('info', args=[course.id.to_deprecated_string()])\r\n else:\r\n course_target = reverse('about_course', args=[course.id.to_deprecated_string()])\r\n\r\n show_courseware_link = (has_access(request.user, 'load', course) or\r\n settings.FEATURES.get('ENABLE_LMS_MIGRATION'))\r\n\r\n # Note: this is a flow for payment for course registration, not the Verified Certificate flow.\r\n registration_price = 0\r\n in_cart = False\r\n reg_then_add_to_cart_link = \"\"\r\n if (settings.FEATURES.get('ENABLE_SHOPPING_CART') and\r\n settings.FEATURES.get('ENABLE_PAID_COURSE_REGISTRATION')):\r\n registration_price = CourseMode.min_course_price_for_currency(course_key,\r\n settings.PAID_COURSE_REGISTRATION_CURRENCY[0])\r\n if request.user.is_authenticated():\r\n cart = shoppingcart.models.Order.get_cart_for_user(request.user)\r\n in_cart = shoppingcart.models.PaidCourseRegistration.contained_in_order(cart, course_key)\r\n\r\n reg_then_add_to_cart_link = \"{reg_url}?course_id={course_id}&enrollment_action=add_to_cart\".format(\r\n reg_url=reverse('register_user'), course_id=course.id.to_deprecated_string())\r\n\r\n # see if we have already filled up all allowed enrollments\r\n is_course_full = CourseEnrollment.is_course_full(course)\r\n\r\n return render_to_response('courseware/course_about.html', {\r\n 'course': course,\r\n 'staff_access': staff_access,\r\n 'studio_url': studio_url,\r\n 'registered': registered,\r\n 'course_target': course_target,\r\n 'registration_price': registration_price,\r\n 'in_cart': in_cart,\r\n 'reg_then_add_to_cart_link': reg_then_add_to_cart_link,\r\n 'show_courseware_link': show_courseware_link,\r\n 'is_course_full': is_course_full\r\n })", "def get_course(self, name):\n GET_TOPIC_IDS = \"\"\"SELECT topic_id FROM CourseTopics WHERE course_name = %s\"\"\"\n GET_GOAL_IDS = \"\"\"SELECT goal_id FROM CourseGoals WHERE course_name = %s\"\"\"\n\n ret = None\n\n try:\n self.db_cursor.execute(\"\"\"SELECT subject_code, credit_hours, description FROM Course WHERE name = %s\"\"\", (name,))\n c = self.db_cursor.fetchall()\n ret = Course()\n ret.subject_code = c[0][0]\n ret.credit_hours = c[0][1]\n ret.description = c[0][2]\n ret.name = name\n ret.goals = []\n ret.topics = []\n\n\n self.db_cursor.execute(GET_TOPIC_IDS, (name,))\n t_ids = self.db_cursor.fetchall()\n for id in t_ids:\n ret.topics.append(id[0])\n\n self.db_cursor.execute(GET_GOAL_IDS, (name,))\n g_ids = self.db_cursor.fetchall()\n for id in g_ids:\n ret.goals.append(id[0])\n\n except:\n logging.warning(\"DBAdapter: Error- cannot retrieve course: \" + str(name))\n return None\n\n return ret", "def select_course_detail(self, course_id, course_section_id):\n conn = sqlite3.connect(self.db_path)\n cursor = conn.cursor()\n with conn:\n cursor.execute(\n \"\"\"SELECT \n c.subject, c.course_num, c.course_title,\n cs.course_section_id, cs.schedule_days, cs.start_time, cs.end_time,\n i.first_name || ' ' || i.last_name AS 'Instructor Name', c.course_units\n FROM courses c\n JOIN course_sections cs\n ON c.course_id = cs.course_id\n JOIN instructors i\n ON cs.instructor_id = i.instructor_id\n WHERE c.course_id = ? AND cs.course_section_id = ?\"\"\",\n (course_id, course_section_id),\n )\n return cursor.fetchone()", "def course(self, value: int):\n self._course = value", "def get_course(self, id):\n id = str(id)\n for i in range(len(self.courses)):\n if self.courses[i].id == id:\n return self.courses[i]", "def get_text(self, course): # pylint: disable=unused-argument\r\n raise NotImplementedError", "def verify_course(self, course, course_id='edX/toy/2012_Fall'):\n assert course_id == str(course.id)" ]
[ "0.6775902", "0.6629837", "0.66172206", "0.6547874", "0.64357376", "0.63588977", "0.63275343", "0.63266003", "0.63218915", "0.6313343", "0.62600297", "0.62568486", "0.6254541", "0.6222173", "0.621786", "0.6193602", "0.6189944", "0.61762655", "0.6173807", "0.6138709", "0.61324036", "0.61178404", "0.60994005", "0.60716754", "0.60334545", "0.600035", "0.599421", "0.59931153", "0.5983787", "0.59756887" ]
0.7031105
0
returns a hash of all the configurations without the objects... just tuples of strings and ints
def getConfigHash(self): strHash = {} #keyed by appName, value = a list of configNames for appName, app in self._appConfigs.iteritems(): strHash[appName] = app.getAllConfigNames() return strHash
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __hash__(self) -> int:\n result = []\n for i in self._all_types:\n try:\n hash(i)\n result.append(i)\n except TypeError:\n pass\n return hash(tuple(result))", "def get_state_hash(self):\n return tuple([tuple([x for x in row if x not in [' ', '#']]) for row in self.map[1:-1]])", "def hashes(self):\n\t\treturn (self.weakHash, self.strongHash)", "def internal_hash(self): \n return hash(tuple(sorted(self.hashtriples())))", "def IsomorphicHash(self) -> int:\n # The hash is based on the nodes and edges, not their attributes.\n return hash((tuple(self.nodes), tuple(self.edges)))", "def hash(self):\r\n hash_list = []\r\n for image in self.images:\r\n hash_list.append(os.path.relpath(image.path))\r\n hash_list.append(image._image_data)\r\n\r\n for key, value in self.config.iteritems():\r\n hash_list.append(key)\r\n hash_list.append(value)\r\n\r\n if sys.version < '3':\r\n return hashlib.sha1(''.join(map(str, hash_list))).hexdigest()[:10]\r\n return hashlib.sha1(''.join(map(str, hash_list)).encode('utf-8')).hexdigest()[:10]", "def _Hash(self):\n out = [self.key.string_id()]\n properties = self._PropList()\n for prop in properties:\n out.append(unicode(getattr(self, prop, '')))\n to_hash = ''.join(out)\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()", "def __hash__(self):\r\n return hash(tuple(sorted(self.__dict__.items())))", "def __hash__(self):\n return hash(tuple(sorted(self.__dict__.items())))", "def __hash__(self):\n return hash(tuple(sorted(self.__dict__.items())))", "def __hash__(self):\n return hash(tuple(sorted(self.__dict__.items())))", "def __hash__(self):\n return hash(tuple(sorted(self.__dict__.items())))", "def __hash__(self):\n return hash(('genes', tuple(self.genes), self.environment))", "def get_hash(self):\n if self.contributes:\n return hash_from_values(self.iter_values())", "def __hash__(self):\n # This ignores app and namespace, which is fine since hash()\n # doesn't need to return a unique value -- it only needs to ensure\n # that the hashes of equal keys are equal, not the other way\n # around.\n return hash(tuple(self.pairs()))", "def __hash__(self) -> int:\n # The hash is based on the graph topology and node and edge attributes.\n return hash(\n (\n tuple(self.nodes),\n tuple(self.edges),\n tuple([str(self.nodes[n]) for n in self.nodes]),\n tuple([str(self.edges[i, j]) for i, j in self.edges]),\n )\n )", "def __hash__(self):\n return hash(frozenset(self.piles) + frozenset(self.mergeables))", "def create_config_hash(config):\n value_str = \"\"\n for section in config.sections:\n for key in section.keys():\n value_str += str(config[section][key])\n value_hash = hashlib.md5(value_str.encode('utf-8')).hexdigest()\n\n return value_hash", "def __hash__(self):\n return hash((self.namespace, self.option_name, self.value))", "def _hexdigests(self):\n\n return tuple(self._hashers[algo].hexdigest() for algo in self.algos)", "def __hash__(self):\n hash(self.components)", "def __hash__(self):\n hashable = tuple(self.pandas_object.values.tobytes())\n if isinstance(self.pandas_object, pd.DataFrame):\n hashable += tuple(self.pandas_object.columns)\n else:\n hashable += tuple(self.pandas_object.name)\n return hash(hashable)", "def __hash__(self):\n hash_value = 0\n \n # required\n hash_value ^= self.required << 14\n \n # title\n hash_value ^= hash(self.title)\n \n # type\n hash_value ^= hash(self.type)\n \n # values\n values = self.values\n if (values is not None):\n hash_value ^= len(values)\n \n for value in values:\n hash_value ^= hash(value)\n \n return hash_value", "def __hashvalue__(self):\n return (tuple((column, self[column])\n for column in filter(lambda x: x != \"__psvcolumnstracker__\", sorted(self.keys()))))", "def __hash__(self):\n pair = (self.size(), tuple(tuple(e) for e in self._cover_relations))\n return hash(pair)", "def __hash__(self):\n hash_value = 0\n \n # approximate_online_count\n hash_value ^= self.approximate_online_count\n \n # approximate_user_count\n hash_value ^= self.approximate_user_count << 12\n \n # description\n description = self.description\n if (description is not None):\n hash_value ^= hash(description)\n \n # discovery_splash\n hash_value ^= hash(self.discovery_splash)\n \n # emojis\n emojis = self.emojis\n hash_value ^= len(emojis) << 1\n for emoji in emojis.values():\n hash_value ^= hash(emoji)\n \n # features\n features = self.features\n hash_value ^= len(features) << 5\n for feature in features:\n hash_value ^= hash(feature)\n \n # icon\n hash_value ^= hash(self.icon)\n \n # id\n hash_value ^= self.id\n \n # invite_splash\n hash_value ^= hash(self.invite_splash)\n \n # stickers\n stickers = self.stickers\n hash_value ^= len(stickers) << 9\n for sticker in stickers.values():\n hash_value ^= hash(sticker)\n \n # name\n name = self.name\n if (description is None) or (description != name):\n hash_value ^= hash(name)\n \n return hash_value", "def __hash__(self):\n hash_content = []\n hash_content.extend(self.analyzer_options)\n hash_content.append(str(self.analyzer_type))\n hash_content.append(self.target[self.lang])\n hash_content.append(self.source)\n return hash(''.join(hash_content))", "def hash_functions(self):\n pass", "def HashAlgorithm(self) -> _n_7_t_0:", "def __hash__(self):\n base = 1\n h = 0\n for l in self.data:\n for i in l:\n if i:\n h += base\n base *= 2\n return hash(h)" ]
[ "0.68357277", "0.67516655", "0.6746455", "0.6700139", "0.6639515", "0.6615102", "0.65691537", "0.6569077", "0.65256405", "0.65256405", "0.65256405", "0.65256405", "0.6488089", "0.6475467", "0.6468475", "0.6413573", "0.6387785", "0.636889", "0.63641346", "0.6332809", "0.63141006", "0.6301038", "0.62844026", "0.6252676", "0.62473536", "0.6237656", "0.6230673", "0.6170963", "0.61599547", "0.61591524" ]
0.70877445
0
Inits dictionnaries for general summary of data with all the modalities of the vocabulary (cover each modality equal to 0)
def initDictionnary(self): partitions = self.vocabulary.getPartitions() for partition in partitions: for mod in partition.modalities: self.summaryDict[partition.getAttName() + " : " + mod] = 0.0 self.summaryFilteredDict[partition.getAttName() + " : " + mod] = 0.0
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _init_dict(self):\n dict_ord = self.MIN_VALID\n\n for da in self.train_das:\n for dai in da:\n if dai.name not in self.dict_slot:\n self.dict_slot[dai.name] = dict_ord\n dict_ord += 1\n if dai.value not in self.dict_value:\n self.dict_value[dai.value] = dict_ord\n dict_ord += 1\n\n for tree in self.train_trees:\n for t_lemma, formeme in tree.nodes:\n if t_lemma not in self.dict_t_lemma:\n self.dict_t_lemma[t_lemma] = dict_ord\n dict_ord += 1\n if formeme not in self.dict_formeme:\n self.dict_formeme[formeme] = dict_ord\n dict_ord += 1\n\n self.dict_size = dict_ord", "def __init__(self, voc, df, listOfTerms, threshold):\n self.vocabulary = voc\n self.dataFile = df\n \"\"\" Each dictionnary has a key of the following pattern [partition : modality] \"\"\"\n self.summaryDict = collections.OrderedDict() # dictionnary for general summary of data\n self.summaryFilteredDict = collections.OrderedDict() # dictionnary for general summary of filtered data\n self.correlationDict = collections.OrderedDict() # dictionnary for correlations between modalities and filter condition\n self.threshold = threshold # threshold value for filter\n self.listOfTerms = None # filtering terms\n self.initListOfTerms(listOfTerms)", "def build_dataset(words):\n count = []\n # count.extend(collections.Counter(words).most_common(n_words - 1))\n count.extend(collections.Counter(words).most_common())\n dictionary = dict()\n for word, _ in count:\n dictionary[word] = len(dictionary)\n data = list()\n # unk_count = 0\n for word in words:\n index = dictionary.get(word, 0)\n # if index == 0: # dictionary['UNK']\n # unk_count += 1\n data.append(index)\n # count[0][1] = unk_count\n reversed_dictionary = dict(zip(dictionary.values(), dictionary.keys()))\n data = [data[::2],data[1::2]]\n new_data = list()\n for i in range(len(data[0])):\n new_data.append([data[0][i],data[1][i]])\n data = new_data\n vocabulary_size = len(dictionary)\n print(\"\\n\\ndictionary size = \")\n print(len(dictionary))\n return data, count, dictionary, reversed_dictionary, vocabulary_size", "def data_dict0():\n\n # 0- Sample from detectron2 -> 5 different sections.\n info_val0 = [{\"date_created\": \"2020-03-15 04:59:45.442988\",\n \"description\": \"Automatically generated COCO json file for Detectron2.\"}]\n images0 = [{\"id\": \"image\", \"width\": 100,\n \"height\": 100, \"file_name\": \"image.png\"}]\n annotations0 = [{\"id\": 1, \"image_id\": \"image\", \"bbox\": [70.0, 30.0, 30.0, 40.0],\n \"area\": 1200.0, \"iscrowd\": 0, \"category_id\": 0}]\n categories0 = [{\"id\": 0, \"name\": \"first\"}]\n licence0 = 'null'\n\n return [{\"info\": info_val0,\n \"images\": images0,\n \"annotations\": annotations0,\n \"categories\": categories0,\n \"licenses\": licence0}]", "def __init__(self):\r\n #\r\n # Create dictionaries for each characteristic\r\n #\r\n self.words = {} # For counting words\r\n self.wordlengths = {} # For counting word lengths\r\n self.stems = {} # For counting stems\r\n self.sentencelengths = {} # For counting sentence lengths\r\n #\r\n # Create another of your own\r\n #\r\n self.gerund = {} # For counting words with ing \r\n self.text = ''", "def featurize(vector,features):\n dictionary = collections.defaultdict(lambda:0)\n for feature in iter(set(features)):\n dictionary[feature] = [vector[key][feature] if feature in vector[key] else 0 for key in vector] #populates vectors with zeroes where there's no value in an industry for an n-gram.\n return dictionary", "def construct_dict(self):\n i = 0\n self.word2idx = dict()\n fi = open(self.config.word_vec_fi_glove, 'r')\n\n for line in fi:\n self.word2idx[line.split(\" \")[0]] = i\n i += 1\n\n self.vocab_size = i\n self.write_dict()\n fi.close()", "def createIndivitual(self) -> Dict[str, Any]:\n ind = {\n \"genome\": {\n key: numpy.random.randint(0, len(value), size=self.ref_count[key]) for (\n key, value) in self.grammar.items()\n },\n \"fitness\": None,\n \"fenotype\": None,\n }\n return ind", "def assumptions0(self):\n return {}", "def solution(self):\n return {\n \"count\": 0.,\n \"mean\": 0.,\n \"stdev\": 0.,\n \"5%\": 0.,\n \"25%\": 0.,\n \"median\": 0.,\n \"75%\": 0.,\n \"95%\": 0.,\n }", "def solution(self):\n return {\n \"count\": 0.,\n \"mean\": 0.,\n \"stdev\": 0.,\n \"5%\": 0.,\n \"25%\": 0.,\n \"median\": 0.,\n \"75%\": 0.,\n \"95%\": 0.,\n }", "def generate_counts():\n\n counts_dict = {}\n folder_path = os.listdir(args.f)\n for subfolder in folder_path:\n subfolder_path = os.path.join(args.f, subfolder)\n for filename in os.listdir(subfolder_path):\n doc_path = os.path.join(subfolder_path, filename)\n with open(doc_path, 'r') as file:\n read_file = file.read()\n normalised_text = re.sub(r\"[^\\s\\w]\", \" \", read_file.lower())\n counts_dict.update({doc_path: collections.Counter(normalised_text.split())})\n #print(counts_dict.get('file/crude/article560.txt'))\n\n vocab = generate_vocab()\n for value in counts_dict.values():\n for k in vocab.keys():\n if k not in value.items():\n value.update({k: 0})\n\n #print(counts_dict.get('file/crude/article560.txt'))\n return counts_dict", "def __init__(self):\n self.ngramCounts = collections.defaultdict(zero_fn);\n self.continuationProb = collections.defaultdict(set_fn);\n self.total = 0;", "def dict_initialise(metadata, analysistype):\n for sample in metadata:\n sample[analysistype].dnaseq = dict()\n sample[analysistype].protseq = dict()\n sample[analysistype].ntindex = dict()\n sample[analysistype].aaindex = dict()\n sample[analysistype].ntalign = dict()\n sample[analysistype].aaalign = dict()\n sample[analysistype].aaidentity = dict()\n return metadata", "def _initialise_sufficient_statistics(self):\n stats = super()._initialise_sufficient_statistics()\n\n stats['B'] = {\n 'numer': [\n np.zeros((self.n_states, self.n_features[i]))\n for i in range(self.n_emissions)\n ],\n 'denom': [\n np.zeros((self.n_states, self.n_features[i]))\n for i in range(self.n_emissions)\n ],\n }\n\n return stats", "def initialize(self):\n self.n_words = len(self.vocab)\n self.n_docs = len(self.documents)\n\n # Initialize the three count matrices.\n # The (i,j) entry of self.nmz is the number of words in document i assigned to topic j.\n self.nmz = np.zeros((self.n_docs, self.n_topics))\n # The (i,j) entry of self.nzw is the number of times term j is assigned to topic i.\n self.nzw = np.zeros((self.n_topics, self.n_words))\n # The (i)-th entry is the number of times topic i is assigned in the corpus.\n self.nz = np.zeros(self.n_topics)\n\n # Initialize the topic assignment dictionary.\n self.topics = {} # key-value pairs of form (m,i):z\n\n for m in range(self.n_docs):\n for i in self.documents[m]:\n # Get random topic assignment, i.e. z is a random integer in the range of topics\n z = np.random.randint(self.n_topics)\n # Increment count matrices\n self.nmz[m,z] += 1\n self.nzw[z,self.documents[m][i]] += 1\n self.nz[z] += 1\n # Store topic assignment\n self.topics[(m,i)] = z", "def load_initial_dictionaries():\n print globals.POS_COUNT_LIMIT, globals.NEG_COUNT_LIMIT, globals.NEU_COUNT_LIMIT\n pos_dict = {}\n neg_dict = {}\n neu_dict = {}\n un_label_dict = {}\n with open(\"../dataset/semeval.csv\", 'r') as main_dataset:\n main = csv.reader(main_dataset)\n pos_count = 1\n neg_count = 1\n neu_count = 1\n un_label_count = 1\n count = 1\n for line in main:\n if count % 3 == 0:\n if line[1] == \"positive\" and pos_count <= globals.POS_COUNT_LIMIT:\n pos_dict.update({str(pos_count): str(line[2])})\n pos_count += 1\n if line[1] == \"negative\" and neg_count <= globals.NEG_COUNT_LIMIT:\n neg_dict.update({str(neg_count): str(line[2])})\n neg_count += 1\n if line[1] == \"neutral\" and neu_count <= globals.NEU_COUNT_LIMIT:\n neu_dict.update({str(neu_count): str(line[2])})\n neu_count += 1\n if count % 3 == 1:\n un_label_dict.update({str(un_label_count): str(line[2])})\n un_label_count += 1\n count += 1\n\n ds.POS_DICT = pos_dict\n ds.NEG_DICT = neg_dict\n ds.NEU_DICT = neu_dict\n ds.UNLABELED_DICT = un_label_dict\n return", "def __init__ (self):\n self.lengths = {}\n self.lower_counts = {}\n self.upper_counts = {}\n self.digit_counts = {}\n self.symbol_counts = {}\n self.class_counts = {}\n self.word_counts = {}", "def _init_vocab(self):\n self._word2idx = {}\n self._idx2word = {}\n self.freqs = {}\n self.vocab_size = 0\n\n self._add_word(self.pad_word)\n self._add_word(self.start_word)\n self._add_word(self.end_word)\n self._add_word(self.unk_word)\n\n self.start_word_idx = self.stoi(self.start_word)\n self.end_word_idx = self.stoi(self.end_word)\n self.unk_word_idx = self.stoi(self.unk_word)\n self.pad_word_idx = self.stoi(self.pad_word)\n\n self._special_tokens = {\n 'bos_token': self.start_word,\n 'cls_token': self.start_word,\n 'eos_token': self.end_word,\n 'sep_token': self.end_word,\n 'pad_token': self.pad_word,\n 'unk_token': self.unk_word,\n }\n\n self._special_ids = {\n 'bos_token_id': self.start_word_idx,\n 'cls_token_id': self.start_word_idx,\n 'eos_token_id': self.end_word_idx,\n 'sep_token_id': self.end_word_idx,\n 'pad_token_id': self.pad_word_idx,\n 'unk_token_id': self.unk_word_idx,\n }\n\n self.cls_token_id = self.bos_token_id = self.start_word_idx\n self.eos_token_id = self.sep_token_id = self.end_word_idx\n self.pad_token_id = self.pad_word_idx\n self.unk_token_id = self.unk_word_idx\n\n self.cls_token = self.bos_token = self.start_word\n self.eos_token = self.sep_token = self.end_word\n self.pad_token = self.pad_word\n self.unk_token = self.unk_word", "def createAllDictionaries(self):\r\n self.makeSentenceLengths()\r\n self.makeWords()\r\n self.makeStems()\r\n self.makeGerund()\r\n self.makeWordLengths()", "def build_vocab(self, corpus):\n if self.vocabulary_counts != None:\n logger.debug(\"building vocabulary from provided frequency map\")\n vocab = self.vocabulary_counts\n else:\n logger.debug(\"default vocabulary building\")\n super(Skipgram, self).build_vocab(corpus)\n return\n\n # assign a unique index to each word\n self.vocab, self.index2word = {}, []\n\n for word, count in vocab.iteritems():\n v = Vocab()\n v.count = count\n if v.count >= self.min_count:\n v.index = len(self.vocab)\n self.index2word.append(word)\n self.vocab[word] = v\n\n logger.debug(\"total %i word types after removing those with count<%s\" % (len(self.vocab), self.min_count))\n\n if self.hs:\n # add info about each word's Huffman encoding\n self.create_binary_tree()\n if self.negative:\n # build the table for drawing random words (for negative sampling)\n self.make_table()\n # precalculate downsampling thresholds\n self.precalc_sampling()\n self.reset_weights()", "def create_dicts():\n load_data_for_dict('data/atis/train/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/valid/seq.in', 'data/atis/voc/vocabulary.json')\n load_data_for_dict('data/atis/test/seq.in', 'data/atis/voc/vocabulary.json') \n load_data_for_dict('data/atis/train/seq.out', 'data/atis/voc/slot_vocabulary.json')", "def build_vocab(self, min_count=3):\n word2count = defaultdict(int)\n for sentence in self.tokenized_corpus:\n for word in sentence:\n word2count[word] += 1\n\n word2dict = {}\n word2dict['PAD'] = {'id': 0}\n word2dict['UNK'] = {'id': 1}\n for word in word2count:\n if word2count[word] >= min_count:\n word2dict[word] = {'id': len(word2dict), 'count': word2count[word]}\n self.vocab = word2dict", "def __init__(self, corpus, epsilon=7):\n # TODO your code here'\n self.v = 0\n self.total=0\n self.epsilon=epsilon\n self.vocab = defaultdict(lambda:defaultdict(lambda:0))\n self.word_counts= defaultdict(lambda:0)\n self.train(corpus)", "def create_data_dict(model, chardict):\n \n # Less than 25 isn't part of original paper, but word2vec has some outrageous entries\n def include_word(word, chardict):\n \"\"\"\n Function to determine if word can be included and perform any parsing\n \"\"\"\n if (all(char in chardict.keys() for char in word)) & (len(word)<=25):\n # Some word2vec entries are all capitals and generally are acronyms.\n # This is unlikely to be learnable\n if not word.isupper():\n return True\n\n return False\n\n\n # Create list of words which will be used in training/testing our model\n all_words = dict({})\n\n # For every word in word2vec model establish if it is \"allowed\"; if it is\n # add the word to our all_words dict, with the embedding as the value\n for idx, key in enumerate(model.wv.vocab):\n if include_word(key, chardict):\n all_words[key] = model.wv[key]\n else:\n pass\n \n return all_words", "def build_vocab(sentences):\n # Build vocabulary\n word_counts = Counter(itertools.chain(*sentences)) # 实际没用到\n # Mapping from index to word\n vocabulary_inv = [x[0] for x in word_counts.most_common()]\n vocabulary_inv = list(sorted(vocabulary_inv))\n # 加入 <UNK>\n vocabulary_inv.insert(0, '</s>')\n # Mapping from word to index\n vocabulary = {x: i for i, x in enumerate(vocabulary_inv)}\n return [vocabulary, vocabulary_inv]", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.bigramCounts = collections.defaultdict(lambda: 0)\n self.trigramCounts = collections.defaultdict(lambda: 0)\n self.followingWords = collections.defaultdict(lambda: set())\n self.precedingWords = collections.defaultdict(lambda: set())\n self.total = 0\n self.discount = 0.75\n self.train(corpus)", "def init_metrics(losses):\n metrics = {'train': {}, 'valid': {}}\n for mode, d in metrics.items():\n metrics[mode] = dict([(k, 0) for k in losses])\n\n return metrics", "def __init__(self, docs, n):\n self.n = n\n self.dict = {}\n self.vocab = set()\n self.sum_index = \"*sum*\"\n regex = re.compile(\"\\s+\")\n count = 0\n for doc in docs:\n terms = re.split(regex, doc)\n for term in terms:\n if term not in self.vocab:\n self.vocab.add(term)\n for i in range(0, len(terms) - n + 1):\n end = i+n-1\n t = tuple(terms[i:end])\n if t not in self.dict:\n self.dict[t] = {}\n self.dict[t][self.sum_index] = 0\n self.dict[t][self.sum_index] += 1\n end_term = terms[end]\n if end_term not in self.dict[t]:\n self.dict[t][end_term] = 1\n else:\n self.dict[t][end_term] += 1\n self.D = len(self.vocab)", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.bigramCounts = collections.defaultdict(lambda: 0)\n self.total = 0\n self.train(corpus)" ]
[ "0.61584175", "0.6107097", "0.61064154", "0.6094799", "0.59786487", "0.59393686", "0.58659035", "0.58530694", "0.5824693", "0.5796534", "0.5796534", "0.57718873", "0.5747239", "0.5724415", "0.572306", "0.57186645", "0.5710093", "0.57018965", "0.5688566", "0.5671068", "0.56425345", "0.56382096", "0.5638121", "0.5636288", "0.560719", "0.5604189", "0.5599307", "0.5592964", "0.5555289", "0.5540435" ]
0.77364576
0
Initializes listOfTerms dictionnary, used to filter data
def initListOfTerms(self, listOfTerms): if listOfTerms is not None: self.listOfTerms = dict() self.filter = True for element in listOfTerms: partition = element.split(':')[0] modalities = element.split(':')[1] self.listOfTerms[partition] = modalities.split(";") print("Filtering flight's list with " + str(self.listOfTerms) + " and threshold : " + str(self.threshold)) else: self.filter = False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def initialize_terms_and_postings():\n global dictionary, postings\n for id in document_filenames:\n document = getDocumentContent(document_filenames[id])\n if(document_filenames[id].rfind(\".pdf\") == len(document_filenames[id]) - 4):\n terms = tokenize(document.encode('utf-8'))\n if(document_filenames[id].rfind(\".txt\") == len(document_filenames[id]) - 4):\n terms = tokenize(document)\n if(document_filenames[id].rfind(\".docx\") == len(document_filenames[id]) - 5):\n terms = tokenize(document)\n if(document_filenames[id].rfind(\".pptx\") == len(document_filenames[id]) - 5):\n terms = tokenize(document)\n unique_terms = set(terms)\n dictionary = dictionary.union(unique_terms)\n for term in unique_terms:\n postings[term][id] = terms.count(term) # the value is the\n # frequency of the\n # term in the\n # document", "def __init__(self, voc, df, listOfTerms, threshold):\n self.vocabulary = voc\n self.dataFile = df\n \"\"\" Each dictionnary has a key of the following pattern [partition : modality] \"\"\"\n self.summaryDict = collections.OrderedDict() # dictionnary for general summary of data\n self.summaryFilteredDict = collections.OrderedDict() # dictionnary for general summary of filtered data\n self.correlationDict = collections.OrderedDict() # dictionnary for correlations between modalities and filter condition\n self.threshold = threshold # threshold value for filter\n self.listOfTerms = None # filtering terms\n self.initListOfTerms(listOfTerms)", "def initialize_document_frequencies():\n global document_frequency\n for term in dictionary:\n document_frequency[term] = len(postings[term])", "def __init__(self, term_list, links=[]):\n\t\t# do type check\n\t\tif not isinstance(term_list, list):\n\t\t\traise TypeError('term_list must be of type list')\n\t\tif not isinstance(links, list):\n\t\t\traise TypeError('links must be of type list')\n\t\tself.term_dict = {x: term_list.count(x) for x in term_list}\n\t\tself.links = copy.deepcopy(links)", "def __init__(self):\n self.word_dict = collections.defaultdict(list)", "def __init__(self, *terms, **kwargs):\n self.missing = kwargs.pop('_key_missing_', False)\n if terms and kwargs:\n raise ValueError(\"You must specify terms or kwargs, not both\")\n self.terms = []\n for t in terms:\n self.add_term(t)\n self.add_term(kwargs)", "def load_terms_dict():\n \tfnm = \"../datasets/bbc/bbc.terms\"\n \tterm_dict = {}\n \twith open(fnm, \"r\") as f:\n \t\tfor wordid, line in enumerate(f.readlines()):\n \t\t\tword = line.strip()\n \t\t\tterm_dict[wordid] = word\n \treturn term_dict", "def __init__(self, words):\n self.d = {}\n for i, w in enumerate(words):\n self.d[w] = self.d.get(w, []) + [i]", "def __init__(self, docs, n):\n self.n = n\n self.dict = {}\n self.vocab = set()\n self.sum_index = \"*sum*\"\n regex = re.compile(\"\\s+\")\n count = 0\n for doc in docs:\n terms = re.split(regex, doc)\n for term in terms:\n if term not in self.vocab:\n self.vocab.add(term)\n for i in range(0, len(terms) - n + 1):\n end = i+n-1\n t = tuple(terms[i:end])\n if t not in self.dict:\n self.dict[t] = {}\n self.dict[t][self.sum_index] = 0\n self.dict[t][self.sum_index] += 1\n end_term = terms[end]\n if end_term not in self.dict[t]:\n self.dict[t][end_term] = 1\n else:\n self.dict[t][end_term] += 1\n self.D = len(self.vocab)", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.bigramCounts = collections.defaultdict(lambda: 0)\n self.trigramCounts = collections.defaultdict(lambda: 0)\n self.followingWords = collections.defaultdict(lambda: set())\n self.precedingWords = collections.defaultdict(lambda: set())\n self.total = 0\n self.discount = 0.75\n self.train(corpus)", "def __init__(self, terms, *interfaces):\n self.by_value = {}\n self.by_token = {}\n self._terms = []\n for term in terms:\n if term.value in self.by_value:\n raise ValueError(\n 'term values must be unique: %s' % repr(term.value))\n if term.token in self.by_token:\n raise ValueError(\n 'term tokens must be unique: %s' % repr(term.token))\n self.by_value[term.value] = term\n self.by_token[term.token] = term\n self._terms.append(term)\n if interfaces:\n directlyProvides(self, *interfaces)", "def _relevant_docs_from_posting(self, all_terms, posting):\n if len(all_terms) == 0: #if empty\n return {}\n\n all_terms_in_same_file = []\n relevant_docs = {}\n\n for term in all_terms.keys():\n relevant_docs[term] = posting[term]['tf']\n\n return relevant_docs", "def init_from_lists(self,terms,vectors):\n self.terms = terms\n self.vectors = vectors\n self.real_vectors = [RealVectorFactory.generate_vector(args) for args in vectors]\n self.dict = dict(zip(self.terms, self.real_vectors))", "def __init__(self, termname, keys, ordinal=False):\n \n self.keys = list(set(keys))\n self.keys.sort()\n self._name = termname\n self.termname = termname\n self.ordinal = ordinal\n\n if self.ordinal:\n name = self.name\n else:\n name = ['(%s==%s)' % (self.termname, str(key)) for key in self.keys]\n\n term.__init__(self, name, termname=self.termname, func=self.get_columns)", "def preparation(self):\n self.word_freq = defaultdict(int)\n\n for sentence in self.corpus:\n for word in sentence:\n self.word_freq[word] += 1\n\n # self.words decide the index of all the words\n self.words = list(self.word_freq.keys())\n self.T = len(self.words)\n\n # word_index will give index for a given word and vice versa for index_word\n self.word_index = dict([[word, i] for i, word in enumerate(self.words)])\n self.index_word = dict([[i, word] for i, word in enumerate(self.words)])", "def __init__(self, wordlist=None, path=None):\n super().__init__() # Initialize this as a new dict\n if path:\n some_words = self.get_words(path)\n for word in some_words:\n if word:\n self[word] = self.get(word, 0) + 1\n if wordlist:\n for word in wordlist:\n self[word] = self.get(word, 0) + 1\n # after creating key-value pairs create instance variable that contains the sum of all values\n self.sum = sum([self.get(key, 0) for key in self]) # sum of weights\n # set the amount of words in the list to the instance variable token\n # Count of distinct word types in this histogram\n self.types = len(self)\n self.tokens = sum(self.values())", "def __init__(self):\r\n self.dct = defaultdict(list)", "def __init__(self, concepts):\n # Define an end key to denote the end of a lookup\n self._end = '_end'\n # Define the main dictionary to store the concepts\n self.store_dict = CaseInsensitiveStringDict()\n # Store each concept in the list of concepts\n for concept in concepts:\n # current_dict is the current dictionary when iteration of words in one concept\n current_dict = self.store_dict\n # Iterate over all the words (tokens) in the concept\n for token in tokenize(concept):\n # Add the current word in the current concept to the current dictionary\n # by adding a new dictionary if the words doesn't exist already.\n # Update the current dictionary with the new one that is returned when\n # indexing with token.\n current_dict = current_dict.setdefault(token, default=CaseInsensitiveStringDict())\n # Add the end key to denote that the concept ends at the current dictionary.\n current_dict[self._end] = concept", "def set_keyword_map(self):\n \n ret = defaultdict(list)\n for idx, doc in enumerate(self.docs):\n for token in doc:\n if token in self.dictionary.token2id:\n ret[token].append(idx)\n \n self.keyword_map = ret\n return ret", "def filter_terms_by_cnt(self, min_count):\n filtered_terms = [term for term in self.term2id if self.term_frequent[term] >= min_count]\n # rebuild the term x id map\n self.term2id = {}\n self.id2term = {}\n for term in self.initial_terms:\n self.add(term, count=0)\n for term in filtered_terms:\n self.add(term, count=0)", "def create_freq_dict(sents, lang):\n ix = 0\n freq_dict_all = []\n stop_words = set(stopwords.words(lang))\n\n for sent in sents:\n ix += 1\n freq_dict = {}\n words = word_tokenize(sent)\n\n for word in words:\n word = word.lower()\n if word not in stop_words:\n if word in freq_dict:\n freq_dict[word] += 1\n else:\n freq_dict[word] = 1\n\n temp = {\n 'doc_id': ix,\n 'freq_dict': freq_dict\n }\n\n freq_dict_all.append(temp)\n\n return freq_dict_all", "def terms(self, filters={}):\n return self.__get_list_client(Term)(filters=filters)", "def __init__(self, corpus):\n self.unigramCounts = collections.defaultdict(lambda: 0)\n self.totalCount = 0\n self.zeroCount = 0\n self.train(corpus)", "def index_terms(self):\n [[self.set_postings(term, id) for term in NLProcessor.process(doc)] for id, doc in\n self.doc_store.dict.iteritems()]", "def add_terms_data(self, terms: Dict[datetime, List[dict]]):\n raise NotImplementedError()", "def __init__(self):\n self.kids = [{}]\n self.root = 0\n self.vocabular = set([])", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms", "def _get_terms(self):\n return self.__terms" ]
[ "0.6852482", "0.647374", "0.63965356", "0.638963", "0.637462", "0.6351122", "0.6201758", "0.6140415", "0.61140746", "0.6099874", "0.6098104", "0.6070301", "0.59825236", "0.5968131", "0.5958227", "0.5927311", "0.5926743", "0.5925386", "0.59241664", "0.5897759", "0.58848786", "0.5877723", "0.5859231", "0.5854529", "0.5850118", "0.5848797", "0.58459", "0.58459", "0.58459", "0.58459" ]
0.7638269
0
returns the cover in the dictionnary of the specified modality
def getCoverFromModalityInDictionnary(self, dictionnary, key): return dictionnary[key] / 100
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_image_dict(self):\n sprite_sheet = setup.GFX['treasurechest']\n image_dict = {'closed': self.get_image(0, 0, 32, 32, sprite_sheet),\n 'opened': self.get_image(32, 0, 32, 32, sprite_sheet)}\n\n return image_dict", "def asDict(self) -> dict:\n return {\n \"predominant_occlusion\": self.predominantOcclusion.value,\n \"estimations\": {\n \"chin\": self.chin,\n \"mouth\": self.mouth,\n \"clear\": self.clear,\n \"correct\": self.correct,\n \"partially\": self.partially,\n \"full\": self.full,\n },\n }", "def bless_basic(unit):\n return {DAMAGE: unit.maximum_damage}", "def getBalancedAssetComposition(self):\n composition = dict()\n composition['bull'] = 50\n composition['bear'] = 100 - composition['bull']\n return composition", "def bless_advanced(unit):\n return {DAMAGE: unit.maximum_damage + 1}", "def calc_h2_working_cap(isotmt_dict): # pylint: disable=too-many-locals\n\n out_dict = {}\n out_dict['is_porous'] = isotmt_dict['is_porous']\n\n if out_dict['is_porous']:\n press2index = {}\n temp2index = {}\n for press in 1, 5, 100:\n press2index[press] = isotmt_dict['isotherm'][0]['pressure'].index(press)\n for temp in 77, 198, 298:\n temp2index[temp] = isotmt_dict['temperature'].index(temp)\n\n case2pt = {'a': [[100, 198], [5, 298]], 'b': [[100, 77], [5, 77]], 'c': [[100, 77], [1, 77]]}\n\n unitconv = {\n 'wt%': # convert mol/kg to wt%\n get_molec_uc_to_mg_g(isotmt_dict) / isotmt_dict['conversion_factor_molec_uc_to_mol_kg'] / 10,\n 'g/L': # convert mol/kg to g/L\n get_molec_uc_to_mg_g(isotmt_dict) / isotmt_dict['conversion_factor_molec_uc_to_mol_kg'] *\n isotmt_dict['Density']\n }\n\n for case, presstemp in case2pt.items():\n for unit, conv in unitconv.items():\n load_average = isotmt_dict['isotherm'][temp2index[presstemp[0][1]]]['loading_absolute_average'][\n press2index[presstemp[0][0]]]\n disc_average = isotmt_dict['isotherm'][temp2index[presstemp[1][1]]]['loading_absolute_average'][\n press2index[presstemp[1][0]]]\n load_dev = isotmt_dict['isotherm'][temp2index[presstemp[0][1]]]['loading_absolute_dev'][press2index[\n presstemp[0][0]]]\n disc_dev = isotmt_dict['isotherm'][temp2index[presstemp[1][1]]]['loading_absolute_dev'][press2index[\n presstemp[1][0]]]\n out_dict.update({\n 'case-{}_{}_unit'.format(case, unit): unit,\n 'case-{}_{}_average'.format(case, unit): (load_average - disc_average) * conv,\n 'case-{}_{}_dev'.format(case, unit): sqrt(load_dev**2 + disc_dev**2) * conv\n })\n\n return Dict(dict=out_dict)", "def get_murim_covered(quality_cut):\n\n murim_data_dir = 'data/murim/'\n murim_suffix_normal = 'EX_BLD1_1ln.snpfilter_anno_shortall_v1.62.txt'\n murim_suffix_cancer = 'EX_CANC_1ln.snpfilter_anno_shortall_v1.62.txt'\n murim_normal = cmp_murim_mutations_yusan.load_murims_calls(os.path.join(murim_data_dir,\n 'yusanN',\n murim_suffix_normal), \n quality_cut)\n murim_cancer = cmp_murim_mutations_yusan.load_murims_calls(os.path.join(murim_data_dir,\n 'yusanT',\n murim_suffix_cancer), \n quality_cut)\n return set(murim_cancer.keys()) & set(murim_normal.keys())", "def _get_critter_db() -> Dict[CritterType, List[CritterImage]]:\n with open(os.path.join('critters', 'names.json')) as fp:\n critter_data = json.load(fp)\n\n critter_db = collections.defaultdict(list)\n for critter_name, icon_name, critter_type_str in critter_data:\n critter_type = CritterType.from_str(critter_type_str)\n critter = CritterImage(critter_name, critter_type, icon_name)\n critter_db[critter_type].append(critter)\n return critter_db", "def find_ability(abilities: list, character_class: str, attack_type: str) -> Dict:\n # Find the ability to use\n ability_to_use = {\"effects\": [], \"enhancements\": []}\n for ability in abilities:\n if (ability[\"class\"] == character_class) and (ability[\"type\"] == attack_type):\n ability_to_use = ability\n break\n\n return ability_to_use", "def getOverlapComparisonImagesDict(self) :\n overlap_shift_comparisons = {}\n for o in self.overlaps :\n overlap_shift_comparisons[o.getShiftComparisonDetailTuple()]=o.getShiftComparisonImages()\n return overlap_shift_comparisons", "def codonComposition(self):#works\n return {codon: self.countDicNuc.get(codon) for codon in self.rnaCodonTable.keys()}", "def acquisitions(self):\r\n\r\n acquisitions_dict = {}\r\n for key in self.files:\r\n if key != 'OR_KA08_2_2': \r\n print(self.files[key])\r\n matlab_file = scipy.io.loadmat(self.files[key])\r\n if len(self.files[key])>41:\r\n vibration_data=matlab_file[self.files[key][19:38]]['Y'][0][0][0][6][2]\r\n else:\r\n vibration_data=matlab_file[self.files[key][19:37]]['Y'][0][0][0][6][2]\r\n\r\n acquisitions_dict[key] = vibration_data[0]\r\n\r\n acquisitions_data = {}\r\n acquisitions_data['conditions'] = self.conditions\r\n acquisitions_data['dirdest'] = self.dirdest\r\n acquisitions_data['acquisitions'] = acquisitions_dict\r\n\r\n return acquisitions_data", "def calc_ch4_working_cap(isot_dict):\n\n out_dict = {}\n out_dict['is_porous'] = isot_dict['is_porous']\n if out_dict['is_porous']:\n\n ip5 = isot_dict['isotherm']['pressure'].index(5.8)\n ip65 = isot_dict['isotherm']['pressure'].index(65.0)\n\n # conversion factors form mol/kg to cm3STP/cm3 and wt%\n conv1 = isot_dict['conversion_factor_molec_uc_to_cm3stp_cm3'] / isot_dict['conversion_factor_molec_uc_to_mol_kg'] # pylint: disable=line-too-long\n conv2 = get_molec_uc_to_mg_g(isot_dict) / isot_dict['conversion_factor_molec_uc_to_mol_kg'] / 10\n\n wc_65bar_average = isot_dict['isotherm']['loading_absolute_average'][ip65] - isot_dict['isotherm'][\n 'loading_absolute_average'][ip5]\n wc_65bar_dev = sqrt(isot_dict['isotherm']['loading_absolute_dev'][ip5]**2 +\n isot_dict['isotherm']['loading_absolute_dev'][ip65]**2)\n wc_65bar_fract = wc_65bar_average / isot_dict['isotherm']['loading_absolute_average'][ip65]\n\n out_dict.update({\n 'enthalpy_of_adsorption_5p8bar_average': isot_dict['isotherm']['enthalpy_of_adsorption_average'][ip5],\n 'enthalpy_of_adsorption_5p8bar_dev': isot_dict['isotherm']['enthalpy_of_adsorption_dev'][ip5],\n 'enthalpy_of_adsorption_5p8bar_unit': isot_dict['isotherm']['enthalpy_of_adsorption_unit'],\n 'enthalpy_of_adsorption_65bar_average': isot_dict['isotherm']['enthalpy_of_adsorption_average'][ip65],\n 'enthalpy_of_adsorption_65bar_dev': isot_dict['isotherm']['enthalpy_of_adsorption_dev'][ip65],\n 'enthalpy_of_adsorption_65bar_unit': isot_dict['isotherm']['enthalpy_of_adsorption_unit'],\n 'wc_65bar_cm3stp/cm3_average': wc_65bar_average * conv1,\n 'wc_65bar_cm3stp/cm3_dev': wc_65bar_dev * conv1,\n 'wc_65bar_cm3stp/cm3_unit': 'cm3 STP/cm3',\n 'wc_65bar_wt%_average': wc_65bar_average * conv2,\n 'wc_65bar_wt%_dev': wc_65bar_dev * conv2,\n 'wc_65bar_wt%_unit': 'g/g/100',\n 'wc_65bar_mol/kg_average': wc_65bar_average,\n 'wc_65bar_mol/kg_dev': wc_65bar_dev,\n 'wc_65bar_mol/kg_unit': 'mol/kg',\n 'wc_65bar_fraction': wc_65bar_fract,\n 'wc_65bar_fraction_unit': '-',\n })\n return Dict(dict=out_dict)", "def get_cover_photo(self):\r\n return self.__cover_photo", "def get_cover_photo(self):\r\n return self.__cover_photo", "def get_dict(modfile):\n import codecs\n\n odict = dict()\n of = codecs.open(modfile, 'r', encoding='utf-8')\n for line in of:\n # Dictionary lines should be like:\n # /path/filename.suffix: mo_mod1 mo_mod2\n ll = line.rstrip().split(':')\n fname = ll[0]\n mods = ll[1].strip().split(' ')\n for m in mods:\n odict[m] = fname\n of.close()\n\n return odict", "def _get_spec_dict(specifications: Union[list, dict]):\n # Early return if they are passing custom specifications\n if isinstance(specifications, dict):\n return specifications\n\n spec_dict = DEFAULT_DECK_SPEC\n\n if \"ace_high\" in specifications:\n for suit in spec_dict.get(\"A\"):\n spec_dict[\"A\"][suit] = 14\n\n if \"face_cards_are_ten\" in specifications:\n for face_card in [\"J\", \"Q\", \"K\"]:\n for suit in spec_dict.get(face_card):\n spec_dict[face_card][suit] = 10\n\n return spec_dict", "def calc_o2_working_cap(isot_dict):\n\n out_dict = {}\n out_dict['is_porous'] = isot_dict['is_porous']\n if out_dict['is_porous']:\n\n ip5 = isot_dict['isotherm']['pressure'].index(5.0)\n ip140 = isot_dict['isotherm']['pressure'].index(140.0)\n\n # conversion factors form mol/kg to cm3STP/cm3 and wt%\n conv1 = isot_dict['conversion_factor_molec_uc_to_cm3stp_cm3'] / isot_dict['conversion_factor_molec_uc_to_mol_kg'] # pylint: disable=line-too-long\n conv2 = get_molec_uc_to_mg_g(isot_dict) / isot_dict['conversion_factor_molec_uc_to_mol_kg'] / 10\n\n wc_140bar_average = isot_dict['isotherm']['loading_absolute_average'][ip140] - isot_dict['isotherm'][\n 'loading_absolute_average'][ip5]\n wc_140bar_dev = sqrt(isot_dict['isotherm']['loading_absolute_dev'][ip5]**2 +\n isot_dict['isotherm']['loading_absolute_dev'][ip140]**2)\n wc_140bar_fract = wc_140bar_average / isot_dict['isotherm']['loading_absolute_average'][ip140]\n\n out_dict.update({\n 'enthalpy_of_adsorption_5bar_average': isot_dict['isotherm']['enthalpy_of_adsorption_average'][ip5],\n 'enthalpy_of_adsorption_5bar_dev': isot_dict['isotherm']['enthalpy_of_adsorption_dev'][ip5],\n 'enthalpy_of_adsorption_5bar_unit': isot_dict['isotherm']['enthalpy_of_adsorption_unit'],\n 'enthalpy_of_adsorption_140bar_average': isot_dict['isotherm']['enthalpy_of_adsorption_average'][ip140],\n 'enthalpy_of_adsorption_140bar_dev': isot_dict['isotherm']['enthalpy_of_adsorption_dev'][ip140],\n 'enthalpy_of_adsorption_140bar_unit': isot_dict['isotherm']['enthalpy_of_adsorption_unit'],\n 'wc_140bar_cm3stp/cm3_average': wc_140bar_average * conv1,\n 'wc_140bar_cm3stp/cm3_dev': wc_140bar_dev * conv1,\n 'wc_140bar_cm3stp/cm3_unit': 'cm3 STP/cm3',\n 'wc_140bar_wt%_average': wc_140bar_average * conv2,\n 'wc_140bar_wt%_dev': wc_140bar_dev * conv2,\n 'wc_140bar_wt%_unit': 'g/g/100',\n 'wc_140bar_mol/kg_average': wc_140bar_average,\n 'wc_140bar_mol/kg_dev': wc_140bar_dev,\n 'wc_140bar_mol/kg_unit': 'mol/kg',\n 'wc_140bar_fraction': wc_140bar_fract,\n 'wc_140bar_fraction_unit': '-',\n })\n return Dict(dict=out_dict)", "def cover(self):\n raise BookInfoNotImplementedError('cover', self.__class__.__name__)", "def condition_to_mg():\n return {\"HA\": 302, \"CHF\": 303, \"COPD\": 304, \"MS\": 305, \"PD\": 306, \"PFF\": 307}", "def return_dispense_media():\n media = {\"50_ug/ml_Kanamycin\": \"lb_miller_50ug_ml_kan\",\n \"100_ug/ml_Ampicillin\": \"lb_miller_100ug_ml_amp\",\n \"100_ug/mL_Spectinomycin\": \"lb_miller_100ug_ml_specto\",\n \"30_ug/ml_Kanamycin\": \"lb_miller_30ug_ml_kan\",\n \"15_ug/ml_Tetracycline\": \"lb_miller_15ug_ml_tet\",\n \"50_ug/ml_Kanamycin_25_ug/ml_Chloramphenicol\":\n \"lb_miller_50ug_ml_kan_25ug_ml_cm\",\n \"25_ug/ml_Chloramphenicol\": \"lb_miller_25ug_ml_cm\",\n \"LB_miller\": \"lb_miller_noAB\",\n \"TB_100_ug/ml_Ampicillin\": \"tb_100ug_ml_amp\",\n \"TB_50_ug/ml_Kanamycin\": \"tb_50ug_ml_kan\"}\n return (media)", "def create_animation_dict(self):\n image_dict = self.spritesheet_dict\n\n left_list = [image_dict['facing left 1'], image_dict['facing left 2']]\n right_list = [image_dict['facing right 1'], image_dict['facing right 2']]\n up_list = [image_dict['facing up 1'], image_dict['facing up 2']]\n down_list = [image_dict['facing down 1'], image_dict['facing down 2']]\n\n return {\n 'left': left_list,\n 'right': right_list,\n 'up': up_list,\n 'down': down_list\n }", "def _GetImageInfo(self,path):\n hd = Header(path, scan=True)\n hdr = hd.hdr\n self.hdr = hdr\n if hdr is None:\n# Either a ref.dat file or it isn't an imaging file.\n if 'ref' in path and 'dat' in path:\n self.refdats[os.path.realpath(path)] = True\n info = {'type':'refdat'}\n return info\n else:\n return None\n elif hdr['filetype'] == 'dicom' and not path.endswith('.yaml'):\n# Write a yaml file to the raw data directory if possible.\n dirname, outfile = self._yaml_filename(path)\n yaml_name = '%s/%s' % (dirname, outfile)\n if not os.path.exists(yaml_name):\n# Create yaml file using dirname,\n# e.g., ../anatomicals/S2_EFGRE3D/s2_efgre3d.yaml\n try:\n hd.write_hdr_to_yaml('%s/%s' % (dirname,outfile))\n except IOError:\n# This is a nonessential function, so ignore exceptions\n# such as access violations.\n pass\n elif hdr['filetype'] == 'dicom' or hdr['filetype'] == 'ge_ifile':\n if not os.path.isdir(path):\n path = os.path.dirname(path)\n shdr = hdr['subhdr']\n nhdr = hdr['native_header']\n self.shdr = shdr\n if 'dti' in shdr.get('PulseSequenceName','').lower() \\\n or 'dti' in nhdr.get('PulseSequenceFile',''):\n psdname = 'dti'\n else:\n psdname = os.path.basename((shdr.get('PulseSequenceName','').strip()).lower())\n info = {'psdname':psdname, \\\n 'acqtime':shdr['AcqTime'], \\\n 'series':int(shdr['SeriesNumber']), \\\n 'plane':hdr['plane'].strip(), \\\n 'type':self.imgtype.get(psdname,None), \\\n 'plane':hdr['plane'], \\\n 'acqtime':shdr['SeriesTime'], \\\n# 'fmapdir':None, \\\n 'refdat':None, \\\n 'imgfile':None, \\\n 'base':None, \\\n 'tdim':int(hdr['tdim']), \\\n 'echo_spacing':None, \\\n 'filetype':'brik', \\\n 'suffix':self.suffix.get(hdr['filetype'], 'brik'), \\\n 'data_filetype':hdr['filetype']}\n if info['type'] == 'localizer':\n# Don't process the localizer.\n return info\n if isinstance(info['acqtime'], int):\n info['acquisition_time'] = time.ctime(info['acqtime'])\n if nhdr.get('ImageFormat',('unknown'))[0] == 'DERIVED' and info['type'] == 'epi':\n# Sometimes screenshots are defined as epis.\n info['type'] = None\n\n# Call the method appropriate to the type of scan in this series.\n stat = apply( self.GetInfoMethods.get(info['type'], self._NoInfo), \\\n [info, path])\n if stat:\n info = {'type':'break'}\n return info\n info['suffix'] = self.suffix.get(info['filetype'], 'brik')\n return info", "def get_damage():\n\n return character['Damage']", "def _identify_media(self):\n\n mediapaths = {k: v['medium'] for k, v in self.labels.items() if v.get('medium') is not None}\n\n media_dict = {}\n for label, path in mediapaths.items():\n if path.lower() == 'air':\n media_dict[label] = Air()\n else:\n media_dict[label] = from_yaml(path)\n return media_dict", "def tile_dict(path):\n dic = {}\n for image in os.listdir(path):\n if image.split('.')[-1] == 'png':\n try:\n im = Image.open(image)\n except:\n print \"image file %s cannot open\" % image\n continue\n if im.mode != 'RGB':\n im = im.convert('RGB')\n dic[image] = average_image(im)\n return dic", "def get_perfect_information(self):\n state = {}\n state[\"chips\"] = [self.game.players[i].in_chips for i in range(self.player_num)]\n state[\"public_card\"] = (\n self.game.public_card.get_index() if self.game.public_card else None\n )\n state[\"hand_cards\"] = [\n self.game.players[i].hand.get_index() for i in range(self.player_num)\n ]\n state[\"current_round\"] = self.game.round_counter\n state[\"current_player\"] = self.game.game_pointer\n state[\"legal_actions\"] = self.game.get_legal_actions()\n return state", "def __getitem__(self, idx: int) -> Dict:\n files = self.files[idx]\n build_mask = np.array(Image.open(files[\"build_mask\"]))\n demo_mask = np.array(Image.open(files[\"demolish_mask\"]))\n build_mask = np.clip(build_mask.mean(axis=-1), 0, 1).astype(\"uint8\")\n demo_mask = np.clip(demo_mask.mean(axis=-1), 0, 1).astype(\"uint8\")\n image1 = np.array(Image.open(files[\"image1\"]))\n image2 = np.array(Image.open(files[\"image2\"]))\n image1, image2, build_mask, demo_mask = self.transform([image1, image2, build_mask, demo_mask])\n x = torch.stack([image1, image2], dim=0)\n return dict(x=x, build_mask=build_mask, demolish_mask=demo_mask)", "def change_book_cover(self, identifier_type, identifier, mirrors=None):\n self.require_librarian(flask.request.library)\n\n data_source = DataSource.lookup(self._db, DataSource.LIBRARY_STAFF)\n\n work = self.load_work(flask.request.library,\n identifier_type, identifier)\n if isinstance(work, ProblemDetail):\n return work\n\n rights_uri = flask.request.form.get(\"rights_status\")\n rights_explanation = flask.request.form.get(\"rights_explanation\")\n\n if not rights_uri:\n return INVALID_IMAGE.detailed(_(\"You must specify the image's license.\"))\n\n collection = self._get_collection_from_pools(\n identifier_type, identifier)\n if isinstance(collection, ProblemDetail):\n return collection\n\n # Look for an appropriate mirror to store this cover image. Since the\n # mirror should be used for covers, we don't need a mirror for books.\n mirrors = mirrors or dict(\n covers_mirror=MirrorUploader.for_collection(\n collection, ExternalIntegrationLink.COVERS),\n books_mirror=None\n )\n if not mirrors.get(ExternalIntegrationLink.COVERS):\n return INVALID_CONFIGURATION_OPTION.detailed(_(\"Could not find a storage integration for uploading the cover.\"))\n\n image = self.generate_cover_image(work, identifier_type, identifier)\n if isinstance(image, ProblemDetail):\n return image\n\n original, derivation_settings, cover_href, cover_rights_explanation = self._original_cover_info(\n image, work, data_source, rights_uri, rights_explanation)\n\n buffer = BytesIO()\n image.save(buffer, format=\"PNG\")\n content = buffer.getvalue()\n\n if not cover_href:\n cover_href = Hyperlink.generic_uri(\n data_source, work.presentation_edition.primary_identifier, Hyperlink.IMAGE, content=content)\n\n cover_data = LinkData(\n Hyperlink.IMAGE, href=cover_href,\n media_type=Representation.PNG_MEDIA_TYPE,\n content=content, rights_uri=rights_uri,\n rights_explanation=cover_rights_explanation,\n original=original, transformation_settings=derivation_settings,\n )\n\n presentation_policy = PresentationCalculationPolicy(\n choose_edition=False,\n set_edition_metadata=False,\n classify=False,\n choose_summary=False,\n calculate_quality=False,\n choose_cover=True,\n regenerate_opds_entries=True,\n regenerate_marc_record=True,\n update_search_index=False,\n )\n\n replacement_policy = ReplacementPolicy(\n links=True,\n # link_content is false because we already have the content.\n # We don't want the metadata layer to try to fetch it again.\n link_content=False,\n mirrors=mirrors,\n presentation_calculation_policy=presentation_policy,\n )\n\n metadata = Metadata(data_source, links=[cover_data])\n metadata.apply(work.presentation_edition,\n collection,\n replace=replacement_policy)\n\n # metadata.apply only updates the edition, so we also need\n # to update the work.\n work.calculate_presentation(policy=presentation_policy)\n\n return Response(_(\"Success\"), 200)", "def _goal_info_dict(self) -> dict:\n assert self._goal_info_cache\n return self._goal_info_cache[2]" ]
[ "0.5663321", "0.52026194", "0.5200007", "0.51598495", "0.5099573", "0.50299585", "0.5023927", "0.49796367", "0.4975332", "0.49580812", "0.4891326", "0.4875619", "0.48679543", "0.48481658", "0.48481658", "0.48460934", "0.4842605", "0.4824412", "0.47903132", "0.47611475", "0.47600284", "0.47580773", "0.47187564", "0.47176674", "0.4715737", "0.47060645", "0.47007748", "0.46683398", "0.4661859", "0.46458635" ]
0.6159811
0
Creates a new version on a versionable object when the object is saved. A new version is created if the type is automatic versionable and has changed or if the user has entered a change note.
def create_version_on_save(context, event): # according to Products.CMFEditions' update_version_on_edit script # only version the modified object, not its container on modification if IContainerModifiedEvent.providedBy(event): return # XXX dirty hack for stagingbehavior, which triggers a event with # a aq_based context when deleting the working copy try: pr = context.portal_repository except AttributeError: return if not pr.isVersionable(context): # cancel, the object is not versionable return create_version = False if getattr(context, "REQUEST", None): changeNote = get_change_note(context.REQUEST, None) else: changeNote = None if changeNote: # user has entered a change note. create a new version even if nothing # has changed. create_version = True elif pr.supportsPolicy(context, "at_edit_autoversion"): # automatic versioning is enabled for this portal type if not base_hasattr(context, "version_id"): # we do not have a initial version create_version = True else: try: create_version = not pr.isUpToDate(context, context.version_id) except ArchivistUnregisteredError: # The object is not actually registered, but a version is # set, perhaps it was imported, or versioning info was # inappropriately destroyed create_version = True # create new version if needed if create_version: pr.save(obj=context, comment=changeNote)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_initial_version_after_adding(context, event):\n\n pr = getToolByName(context, \"portal_repository\", None)\n if pr is None:\n # This can happen, e.g., when adding a Plone Site with versioning\n # and portal_repository is not yet created\n return\n\n if not pr.isVersionable(context):\n # object is not versionable\n return\n\n if not pr.supportsPolicy(context, \"at_edit_autoversion\"):\n # automatic versioning disabled for this portal type, so we don't\n # need to create an initial version\n return\n\n # get the change not\n default_changeNote = _(\"initial_version_changeNote\", default=\"Initial version\")\n if getattr(context, \"REQUEST\", None):\n changeNote = get_change_note(context.REQUEST, default_changeNote)\n else:\n changeNote = None\n\n changed = False\n if not base_hasattr(context, \"version_id\"):\n # no initial version, let's create one..\n changed = True\n\n else:\n try:\n changed = not pr.isUpToDate(context, context.version_id)\n except ArchivistUnregisteredError:\n # The object is not actually registered, but a version is\n # set, perhaps it was imported, or versioning info was\n # inappropriately destroyed\n changed = True\n\n if not changed:\n return\n\n try:\n context.portal_repository.save(obj=context, comment=changeNote)\n except FileTooLargeToVersionError:\n pass # the on edit save will emit a warning", "def CreateVersion(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def create( self, message, manual = False ):\n\n version = self.domain_model()\n context = self.__parent__\n trusted = removeSecurityProxy(context)\n \n # set values on version from context\n self._copyFields(trusted, version)\n \n # content domain ids are typically not in the interfaces\n # manually inspect and look for one, by hand to save on the new version\n mapper = orm.object_mapper(trusted)\n version.content_id = mapper.primary_key_from_instance(trusted)[0]\n version.status = None\n version.manual = manual\n \n # we rely on change handler to attach the change object to the version\n event.notify(\n interfaces.VersionCreated(context, self, version, message))\n \n session = Session()\n session.add(version)\n \n version.context = context \n event.notify(ObjectCreatedEvent(version))\n\n return version", "def do_create_version(**kwargs):\n version_params = {\n \"name\": kwargs['dag_run'].conf.get('model_version'),\n \"description\": 'Version 1',\n \"runtimeVersion\": kwargs['dag_run'].conf.get('tf_version'),\n \"deploymentUri\": 'gs://{}/{}'.format(COMPOSER_BUCKET_NAME, PREFIX_FINAL_MODEL)\n }\n\n ti = kwargs['ti']\n\n mle = MLEngineHook()\n\n model_name = kwargs['dag_run'].conf.get('model_name')\n model_versions = ti.xcom_pull(key='model_versions', task_ids='list_versions')\n\n version_path = 'projects/{}/models/{}/versions/{}'.format(PROJECT,\n model_name,\n version_params['name'])\n\n if version_path in [v['name'] for v in model_versions]:\n logging.info(\"Delete previously version of the model to overwrite.\")\n mle.delete_version(PROJECT, model_name, version_params['name'])\n\n mle.create_version(PROJECT, model_name, version_params)", "def increment_version_on_insert(obj):\n history_model = obj.previous_version()\n\n if history_model is not None:\n obj.version = history_model.version + 1", "def create_version(instance,\n operation,\n versional_comment=None):\n if not versional_comment:\n versional_comment = \"Instance %sd\" % operation\n\n return {\n #'_id': str(ObjectId()),\n #'_id': ObjectId(),\n 'document_id': instance['_id'],\n 'document_model': instance['_model'],\n 'document': instance,\n 'comment': versional_comment,\n 'operation': operation}", "def save_model(self, request, obj, form, change):\n if request.user.has_perm(\"easypublisher.can_approve_for_publication\"):\n obj.save()\n else:\n reversion.revision.add_meta(EasyPublisherMetaData, status='draft', language=request.LANGUAGE_CODE)\n reversion.revision.comment = \"Draft\"\n\n if not change and hasattr(obj, 'published'):\n obj.published = False\n obj.save()\n obj.published = True\n \n reversion.revision.post_save_receiver(obj, 0)", "def ongeza(self, type_):\n switch = {\n 'm': semver.bump_major,\n 'n': semver.bump_minor,\n 'p': semver.bump_patch,\n 'major': semver.bump_major,\n 'minor': semver.bump_minor,\n 'patch': semver.bump_patch}\n\n new_version = switch.get(type_)(self.version)\n\n if new_version in set(self.versions):\n self.logger.error('version `%s` already present', new_version)\n new_version = None\n\n return new_version", "def CreateVersion(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def new_version(self, latest_version_id: uplink.Path(name=\"id\")):\n pass", "def save(self, **kwargs):\n super(ProjectCurrentSerializer, self).save(**kwargs)\n \n if hasattr(self, 'uploaded_pot_file'):\n previous_version = self.object.get_current_version()\n current_version = create_new_version(self.object, previous_version.version+1, self.uploaded_pot_file)\n update_catalogs(self.object, previous_version, current_version)\n self.object.save()\n \n return self.object", "def create(self, validated_data):\n if validated_data['version'] > 1: # Viewset's get_serializer() will always add 'version'\n with transaction.atomic():\n current = RecordSchema.objects.get(record_type=validated_data['record_type'],\n next_version=None)\n new = RecordSchema.objects.create(**validated_data)\n current.next_version = new\n current.save()\n elif validated_data['version'] == 1: # New record_type\n new = RecordSchema.objects.create(**validated_data)\n else:\n raise serializers.ValidationError('Schema version could not be determined')\n return new", "def make_instance(self, include_optional):\n # model = synclient.models.version_info.VersionInfo() # noqa: E501\n if include_optional :\n return VersionInfo(\n content_md5 = '0', \n content_size = '0', \n id = '0', \n modified_by = '0', \n modified_by_principal_id = '0', \n modified_on = '0', \n version_comment = '0', \n version_label = '0', \n version_number = 56\n )\n else :\n return VersionInfo(\n )", "def save_as(self, version, run_pre_publishers=True):\n raise NotImplementedError(\"save_as is not implemented\")", "def saveNewVersion(self, *args, **kwargs):\n self.owner = self.song.owner\n self.date_added = self.song.date_added\n self.comment_node = self.song.comment_node\n\n self.project.title = self.song.title\n if not self.id:\n self._save(*args, **kwargs)\n\n self.project.latest_version = self\n self.project.save()\n self._save(*args, **kwargs)\n self.makeLogEntry()", "def test_add_creates_a_new_version(self):\n assert Version.objects.count() == 0\n\n response = self.api_client.post(\n reverse('api-v4:company:collection'),\n data={\n 'name': 'Acme',\n 'trading_names': ['Trading name'],\n 'business_type': {'id': BusinessTypeConstant.company.value.id},\n 'sector': {'id': random_obj_for_model(Sector).id},\n 'address': {\n 'line_1': '75 Stramford Road',\n 'town': 'London',\n 'country': {\n 'id': Country.united_kingdom.value.id,\n },\n },\n 'uk_region': {'id': UKRegion.england.value.id},\n },\n )\n\n assert response.status_code == status.HTTP_201_CREATED\n response_data = response.json()\n assert response_data['name'] == 'Acme'\n assert response_data['trading_names'] == ['Trading name']\n\n company = Company.objects.get(pk=response_data['id'])\n\n # check version created\n assert Version.objects.get_for_object(company).count() == 1\n version = Version.objects.get_for_object(company).first()\n assert version.revision.user == self.user\n assert version.field_dict['name'] == 'Acme'\n assert version.field_dict['trading_names'] == ['Trading name']\n assert not any(set(version.field_dict) & set(EXCLUDED_BASE_MODEL_FIELDS))", "def test_update_creates_a_new_version(self):\n company = CompanyFactory(name='Foo ltd.')\n\n assert Version.objects.get_for_object(company).count() == 0\n\n response = self.api_client.patch(\n reverse('api-v4:company:item', kwargs={'pk': company.pk}),\n data={'name': 'Acme'},\n )\n\n assert response.status_code == status.HTTP_200_OK\n assert response.json()['name'] == 'Acme'\n\n # check version created\n assert Version.objects.get_for_object(company).count() == 1\n version = Version.objects.get_for_object(company).first()\n assert version.revision.user == self.user\n assert version.field_dict['name'] == 'Acme'", "def obj_make_compatible(self, primitive, target_version):\n target_version = utils.convert_version_to_tuple(target_version)", "def command_new_version(self):\n repoinit.new_version(*self.args())", "def version(self, newVersion=None):\n pass", "def getVersion(self):\n self.getDocumentedObject().getVersion()", "def __init__(__self__, *,\n auto_upgrade_minor_version: pulumi.Input[bool],\n force_update_tag: pulumi.Input[str],\n location: pulumi.Input[str],\n publisher: pulumi.Input[str],\n resource_group: pulumi.Input[str],\n type_handler_version: pulumi.Input[str],\n type_name: pulumi.Input[str],\n vm_name: pulumi.Input[str],\n protected_settings: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"auto_upgrade_minor_version\", auto_upgrade_minor_version)\n pulumi.set(__self__, \"force_update_tag\", force_update_tag)\n pulumi.set(__self__, \"location\", location)\n pulumi.set(__self__, \"publisher\", publisher)\n pulumi.set(__self__, \"resource_group\", resource_group)\n pulumi.set(__self__, \"type_handler_version\", type_handler_version)\n pulumi.set(__self__, \"type_name\", type_name)\n pulumi.set(__self__, \"vm_name\", vm_name)\n if protected_settings is not None:\n pulumi.set(__self__, \"protected_settings\", protected_settings)\n if settings is not None:\n pulumi.set(__self__, \"settings\", settings)", "def __set_version_id(self):\r\n VersionId = self.client.factory.create('VersionId')\r\n VersionId.ServiceId = self._version_info['service_id']\r\n VersionId.Major = self._version_info['major']\r\n VersionId.Intermediate = self._version_info['intermediate']\r\n VersionId.Minor = self._version_info['minor']\r\n self.logger.debug(VersionId)\r\n self.VersionId = VersionId", "def version_create(self, node, hash, size, type, source, muser, uuid,\n checksum, cluster=0,\n update_statistics_ancestors_depth=None):\n\n q = (\"insert into versions (node, hash, size, type, source, mtime, \"\n \"muser, uuid, checksum, cluster) \"\n \"values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\")\n mtime = time()\n props = (node, hash, size, type, source, mtime, muser,\n uuid, checksum, cluster)\n serial = self.execute(q, props).lastrowid\n self.statistics_update_ancestors(node, 1, size, mtime, cluster,\n update_statistics_ancestors_depth)\n\n self.nodes_set_latest_version(node, serial)\n\n return serial, mtime", "def add(self, bento_name, bento_version):", "def genVersion(*args, **kwargs):\n return generateVersionFileData(Version(*args, **kwargs))", "def create_item_version(item):\n if not item.version_fields:\n return\n _hash = hashlib.sha1()\n for attrname in item.version_fields:\n _hash.update(repr(item.get(attrname)))\n return _hash.digest()", "def create_item_version(item):\n if not item.version_fields:\n return\n _hash = hashlib.sha1()\n for attrname in item.version_fields:\n _hash.update(repr(item.get(attrname)))\n return _hash.digest()", "def test_archive_creates_a_new_version(self):\n company = CompanyFactory()\n assert Version.objects.get_for_object(company).count() == 0\n\n url = reverse('api-v4:company:archive', kwargs={'pk': company.id})\n response = self.api_client.post(url, data={'reason': 'foo'})\n\n assert response.status_code == status.HTTP_200_OK\n response_data = response.json()\n assert response_data['archived']\n assert response_data['archived_reason'] == 'foo'\n\n # check version created\n assert Version.objects.get_for_object(company).count() == 1\n version = Version.objects.get_for_object(company).first()\n assert version.revision.user == self.user\n assert version.field_dict['archived']\n assert version.field_dict['archived_reason'] == 'foo'", "def CreateVersion(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()" ]
[ "0.69639736", "0.66863483", "0.6405179", "0.6266524", "0.61676246", "0.60798085", "0.60540813", "0.59558815", "0.5853697", "0.56787676", "0.56625044", "0.56550807", "0.5645847", "0.56357175", "0.55998117", "0.5571658", "0.55309147", "0.55187213", "0.5510219", "0.5465921", "0.54569423", "0.54540837", "0.5446475", "0.54456663", "0.5445192", "0.5401167", "0.53964967", "0.53964967", "0.53905934", "0.53693604" ]
0.7737758
0
Creates a initial version on a object which is added to a container and may be just created. The initial version is created if the content type is versionable, automatic versioning is enabled for this type and there is no initial version. If a changeNote was entered it's used as comment.
def create_initial_version_after_adding(context, event): pr = getToolByName(context, "portal_repository", None) if pr is None: # This can happen, e.g., when adding a Plone Site with versioning # and portal_repository is not yet created return if not pr.isVersionable(context): # object is not versionable return if not pr.supportsPolicy(context, "at_edit_autoversion"): # automatic versioning disabled for this portal type, so we don't # need to create an initial version return # get the change not default_changeNote = _("initial_version_changeNote", default="Initial version") if getattr(context, "REQUEST", None): changeNote = get_change_note(context.REQUEST, default_changeNote) else: changeNote = None changed = False if not base_hasattr(context, "version_id"): # no initial version, let's create one.. changed = True else: try: changed = not pr.isUpToDate(context, context.version_id) except ArchivistUnregisteredError: # The object is not actually registered, but a version is # set, perhaps it was imported, or versioning info was # inappropriately destroyed changed = True if not changed: return try: context.portal_repository.save(obj=context, comment=changeNote) except FileTooLargeToVersionError: pass # the on edit save will emit a warning
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_version_on_save(context, event):\n # according to Products.CMFEditions' update_version_on_edit script\n\n # only version the modified object, not its container on modification\n if IContainerModifiedEvent.providedBy(event):\n return\n\n # XXX dirty hack for stagingbehavior, which triggers a event with\n # a aq_based context when deleting the working copy\n try:\n pr = context.portal_repository\n except AttributeError:\n return\n\n if not pr.isVersionable(context):\n # cancel, the object is not versionable\n return\n\n create_version = False\n\n if getattr(context, \"REQUEST\", None):\n changeNote = get_change_note(context.REQUEST, None)\n else:\n changeNote = None\n\n if changeNote:\n # user has entered a change note. create a new version even if nothing\n # has changed.\n create_version = True\n\n elif pr.supportsPolicy(context, \"at_edit_autoversion\"):\n # automatic versioning is enabled for this portal type\n\n if not base_hasattr(context, \"version_id\"):\n # we do not have a initial version\n create_version = True\n else:\n try:\n create_version = not pr.isUpToDate(context, context.version_id)\n except ArchivistUnregisteredError:\n # The object is not actually registered, but a version is\n # set, perhaps it was imported, or versioning info was\n # inappropriately destroyed\n create_version = True\n\n # create new version if needed\n if create_version:\n pr.save(obj=context, comment=changeNote)", "def create( self, message, manual = False ):\n\n version = self.domain_model()\n context = self.__parent__\n trusted = removeSecurityProxy(context)\n \n # set values on version from context\n self._copyFields(trusted, version)\n \n # content domain ids are typically not in the interfaces\n # manually inspect and look for one, by hand to save on the new version\n mapper = orm.object_mapper(trusted)\n version.content_id = mapper.primary_key_from_instance(trusted)[0]\n version.status = None\n version.manual = manual\n \n # we rely on change handler to attach the change object to the version\n event.notify(\n interfaces.VersionCreated(context, self, version, message))\n \n session = Session()\n session.add(version)\n \n version.context = context \n event.notify(ObjectCreatedEvent(version))\n\n return version", "def CreateVersion(self, request, context):\n context.code(beta_interfaces.StatusCode.UNIMPLEMENTED)", "def create_version(instance,\n operation,\n versional_comment=None):\n if not versional_comment:\n versional_comment = \"Instance %sd\" % operation\n\n return {\n #'_id': str(ObjectId()),\n #'_id': ObjectId(),\n 'document_id': instance['_id'],\n 'document_model': instance['_model'],\n 'document': instance,\n 'comment': versional_comment,\n 'operation': operation}", "def _version_structure(self, structure, user_id):\r\n new_structure = copy.deepcopy(structure)\r\n new_structure['_id'] = ObjectId()\r\n new_structure['previous_version'] = structure['_id']\r\n new_structure['edited_by'] = user_id\r\n new_structure['edited_on'] = datetime.datetime.now(UTC)\r\n new_structure['schema_version'] = self.SCHEMA_VERSION\r\n return new_structure", "def skip_initial_version_creation(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"skip_initial_version_creation\")", "def skip_initial_version_creation(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"skip_initial_version_creation\")", "def make_instance(self, include_optional):\n # model = synclient.models.version_info.VersionInfo() # noqa: E501\n if include_optional :\n return VersionInfo(\n content_md5 = '0', \n content_size = '0', \n id = '0', \n modified_by = '0', \n modified_by_principal_id = '0', \n modified_on = '0', \n version_comment = '0', \n version_label = '0', \n version_number = 56\n )\n else :\n return VersionInfo(\n )", "def increment_version_on_insert(obj):\n history_model = obj.previous_version()\n\n if history_model is not None:\n obj.version = history_model.version + 1", "def CreateVersion(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def __set_version_id(self):\r\n VersionId = self.client.factory.create('VersionId')\r\n VersionId.ServiceId = self._version_info['service_id']\r\n VersionId.Major = self._version_info['major']\r\n VersionId.Intermediate = self._version_info['intermediate']\r\n VersionId.Minor = self._version_info['minor']\r\n self.logger.debug(VersionId)\r\n self.VersionId = VersionId", "def CreateVersion(self, request, timeout, metadata=None, with_call=False, protocol_options=None):\n raise NotImplementedError()", "def __init__(self, name, version, git_tag='NA'):\n self._version = {'name' : name, 'version' : version, 'git_tag' : git_tag}", "def createNewVersion(self, file, title=None, description =None, versionNote = None):\n newVersionUrl = self.metaData.getLink(\"create-version\")\n assert newVersionUrl is not None\n\n uploadFile = MultiPartFormRequest(file)\n\n header = self._baseHeader.copy()\n header['Content-type'] = \"application/vnd.huddle.data+json\"\n body = json.dumps({\"title\" : title, \"description\" : description, \"versionNote\" : versionNote, \"extension\" : uploadFile.extension})\n\n #create a lock\n lockResponse = self.__createLock()\n\n response = self._adapter.postRequest(newVersionUrl, header, body)\n\n r = HyperLinkResource(response['Body'])\n #now that we have a good response get the urls for the next part\n uploadUrl = r.getLink(\"upload\")\n selfUrl = r.selfLink\n assert uploadUrl is not None\n\n #create the muti_form_data for the body\n boundary = uploadFile.create_boundary_string()\n header['Content-Type'] = \"mutipart/form-data; boundary=\" + boundary\n body = uploadFile.encode_mutipart_form_data(boundary)\n\n try:\n response = self._adapter.putRequest(uploadUrl, header, body)\n except HuddleConflictError:\n print(\"we couldn't upload the document as it has been locked by someone else\")\n finally:\n self.__deleteLock(HyperLinkResource(lockResponse['Body']).getLink(\"delete\"))\n\n return Document(self._client, selfUrl)", "def do_create_version(**kwargs):\n version_params = {\n \"name\": kwargs['dag_run'].conf.get('model_version'),\n \"description\": 'Version 1',\n \"runtimeVersion\": kwargs['dag_run'].conf.get('tf_version'),\n \"deploymentUri\": 'gs://{}/{}'.format(COMPOSER_BUCKET_NAME, PREFIX_FINAL_MODEL)\n }\n\n ti = kwargs['ti']\n\n mle = MLEngineHook()\n\n model_name = kwargs['dag_run'].conf.get('model_name')\n model_versions = ti.xcom_pull(key='model_versions', task_ids='list_versions')\n\n version_path = 'projects/{}/models/{}/versions/{}'.format(PROJECT,\n model_name,\n version_params['name'])\n\n if version_path in [v['name'] for v in model_versions]:\n logging.info(\"Delete previously version of the model to overwrite.\")\n mle.delete_version(PROJECT, model_name, version_params['name'])\n\n mle.create_version(PROJECT, model_name, version_params)", "def version_create(self, node, hash, size, type, source, muser, uuid,\n checksum, cluster=0,\n update_statistics_ancestors_depth=None):\n\n q = (\"insert into versions (node, hash, size, type, source, mtime, \"\n \"muser, uuid, checksum, cluster) \"\n \"values (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)\")\n mtime = time()\n props = (node, hash, size, type, source, mtime, muser,\n uuid, checksum, cluster)\n serial = self.execute(q, props).lastrowid\n self.statistics_update_ancestors(node, 1, size, mtime, cluster,\n update_statistics_ancestors_depth)\n\n self.nodes_set_latest_version(node, serial)\n\n return serial, mtime", "def add(self, bento_name, bento_version):", "def command_new_version(self):\n repoinit.new_version(*self.args())", "def _mkObject(self):\n return ImmutableObject(\n store=self.store,\n hash=u'somehash',\n contentDigest=u'quux',\n content=self.store.newFilePath('foo'),\n contentType=u'application/octet-stream')", "def PreCreate(self, pre):\n pass", "def PreCreate(self, pre):\n pass", "def PreCreate(self, pre):\n pass", "def PreCreate(self, pre):\n pass", "def PreCreate(self, pre):\n pass", "def __init__(__self__, *,\n auto_upgrade_minor_version: pulumi.Input[bool],\n force_update_tag: pulumi.Input[str],\n location: pulumi.Input[str],\n publisher: pulumi.Input[str],\n resource_group: pulumi.Input[str],\n type_handler_version: pulumi.Input[str],\n type_name: pulumi.Input[str],\n vm_name: pulumi.Input[str],\n protected_settings: Optional[pulumi.Input[str]] = None,\n settings: Optional[pulumi.Input[str]] = None):\n pulumi.set(__self__, \"auto_upgrade_minor_version\", auto_upgrade_minor_version)\n pulumi.set(__self__, \"force_update_tag\", force_update_tag)\n pulumi.set(__self__, \"location\", location)\n pulumi.set(__self__, \"publisher\", publisher)\n pulumi.set(__self__, \"resource_group\", resource_group)\n pulumi.set(__self__, \"type_handler_version\", type_handler_version)\n pulumi.set(__self__, \"type_name\", type_name)\n pulumi.set(__self__, \"vm_name\", vm_name)\n if protected_settings is not None:\n pulumi.set(__self__, \"protected_settings\", protected_settings)\n if settings is not None:\n pulumi.set(__self__, \"settings\", settings)", "def create_item_version(item):\n if not item.version_fields:\n return\n _hash = hashlib.sha1()\n for attrname in item.version_fields:\n _hash.update(repr(item.get(attrname)))\n return _hash.digest()", "def create_item_version(item):\n if not item.version_fields:\n return\n _hash = hashlib.sha1()\n for attrname in item.version_fields:\n _hash.update(repr(item.get(attrname)))\n return _hash.digest()", "def preview_create(self, obj, include_link=False):\n return self._create(obj, preview=True, include_link=include_link)", "def new_version(self, latest_version_id: uplink.Path(name=\"id\")):\n pass", "def PreCreate(self, pre):\r\n pass" ]
[ "0.6330616", "0.58630115", "0.5736563", "0.5694096", "0.55998176", "0.5510448", "0.54821205", "0.527617", "0.52696854", "0.5260113", "0.51173586", "0.5091388", "0.50750107", "0.504838", "0.50350356", "0.501439", "0.49975744", "0.49866953", "0.4931183", "0.4930095", "0.4930095", "0.4930095", "0.4930095", "0.4930095", "0.4879633", "0.4878572", "0.4878572", "0.4877541", "0.4871347", "0.48624086" ]
0.7284928
0
Function undefine a given virtual network
def net_undefine(network, server, virt="Xen"): cmd = "virsh -c %s net-undefine %s 2>/dev/null" % (virt2uri(virt), network) ret, out = run_remote(server, cmd) return ret
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remove_network(self, name_of_vm):\n try:\n # vmachine = self.get_vm_by_name(name_of_vm)\n vmachine = self.get_dc_object([vim.VirtualMachine], name_of_vm)\n network = None\n devices = vmachine.config.hardware.device\n networks = []\n for device in devices:\n if isinstance(device, vim.vm.device.VirtualEthernetCard):\n networks.append(device)\n status = 'error'\n if not networks:\n log.info(\"INFO: No network adapters connected to the VM to remove\")\n status = 'success'\n else:\n for network in networks:\n name = network.deviceInfo.label\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n nic_spec.device = network\n remove_nic = vim.vm.ConfigSpec()\n remove_nic.deviceChange = [nic_spec]\n task = WaitForTask(vmachine.ReconfigVM_Task(spec=remove_nic))\n\n if task == 'success':\n log.info(\"removed '{}' network adapter : {}\".format(name, name_of_vm))\n else:\n log.info(\"Could not '{}' Remove Network adapter: {}\".format(name, name_of_vm))\n status = 'success'\n return status\n except Exception as error:\n log.info(\"Error in 'remove_nic' keyword... {} \\n {}\".format(error, error.message))", "def remove_gateway(self, network_ref):\n raise NotImplementedError()", "def unhide_all(network=None, base_url=DEFAULT_BASE_URL):\n net_suid = networks.get_network_suid(network, base_url=base_url)\n res = None\n\n node_names = networks.get_all_nodes(net_suid, base_url=base_url)\n if len(node_names) > 0:\n res = set_node_property_bypass(node_names, new_values='true', visual_property='NODE_VISIBLE', network=network,\n base_url=base_url)\n\n edge_names = networks.get_all_edges(net_suid, base_url=base_url)\n if len(edge_names) > 0:\n res = set_edge_property_bypass(edge_names, new_values='true', visual_property='EDGE_VISIBLE', network=network,\n base_url=base_url)\n\n return res\n # TODO: res is ambiguous ... unclear what it really should be", "def delete_network(self, network):\r\n return self.delete(self.network_path % (network))", "def remove_network_adapter(self, network_obj):\n\n nic_spec = vim.vm.device.VirtualDeviceSpec()\n nic_spec.operation = vim.vm.device.VirtualDeviceSpec.Operation.remove\n nic_spec.device = network_obj\n config_spec = vim.vm.ConfigSpec()\n config_spec.deviceChange = [nic_spec]\n return self.vm_obj.ReconfigVM_Task(spec=config_spec)", "def unplug_vifs(self, instance, network_info):\n raise NotImplementedError()", "def clear_network(self):\n\n return self.alter_network(remove=list(self.network.edges))", "def unhide_nodes(node_names, network=None, base_url=DEFAULT_BASE_URL):\n res = clear_node_property_bypass(node_names, 'NODE_VISIBLE', network=network, base_url=base_url)\n return res", "def delete_network(options, vsm_obj):\n print(\"Disconnecting edge interface attached to this network\")\n edge_id = get_edge(vsm_obj)\n edge = Edge(vsm_obj, '4.0')\n edge.id = edge_id\n vnics = Vnics(edge)\n vnics_schema = vnics.query()\n network = get_network_id(options, get_network_name_on_vc(options))\n for vnic in vnics_schema.vnics:\n if network and vnic.portgroupId == network:\n print(\"Found a matching vnic %s %s\" % (options.name, vnic.index))\n vnic.isConnected = \"False\"\n vnic.portgroupId = None\n vnic.name = \"vnic%s\" % vnic.index\n vnics_schema = VnicsSchema()\n vnics_schema.vnics = [vnic]\n result = vnics.create(vnics_schema)\n if (result[0].response.status != 204):\n print \"update vnic error: %s %s\" \\\n % (result[0].response.status, result[0].response.reason)\n return False\n else:\n break\n else:\n print (\"No matching vnic found\")\n\n vdn_scope = get_transport_zone(options)\n virtual_wire = VirtualWire(vdn_scope)\n vwire = virtual_wire.read_by_name(get_network_name(options))\n name = get_network_name(options)\n if vwire != \"FAILURE\":\n print(\"Found a matching network %s\" % (options.name))\n virtual_wire.id = vwire.objectId\n result = virtual_wire.delete()\n if (result.response.status != 200):\n print (\"Delete vwire error: %s\" % result.response.reason)\n return False\n else:\n print (\"No matching network found\")\n print(\"Network %s deleted\" % (options.name))\n\n return True", "def _delete_network_vm(args):\n libvirtConn = libvirt.openReadOnly(None)\n if libvirtConn is None:\n print('Cannot contact hypervisor', file=sys.stderr)\n return 1\n net = None\n try:\n net = libvirtConn.networkLookupByName(args.network_name)\n except libvirt.libvirtError:\n print('Cannot find network named [%s]' % args.network_name, file=sys.stderr)\n return 1\n print('Network found:\\n')\n print(xml.dom.minidom.parseString(net.XMLDesc()).toprettyxml(indent=\" \", newl=''))\n print('')\n\n if not args.yes:\n if not input('Really destroy this network ?').strip().lower() in ('y', 'yes'):\n return 1\n return oci_utils.kvm.virt.delete_virtual_network(network_name=args.network_name)", "def unhide_edges(edge_names, network=None, base_url=DEFAULT_BASE_URL):\n res = clear_edge_property_bypass(edge_names, 'EDGE_VISIBLE', network=network, base_url=base_url)\n return res", "def remove_pvrdma(self, network_obj):\n\n self.remove_network_adapter(network_obj)", "def network_delete(auth=None, **kwargs):\n cloud = get_operator_cloud(auth)\n kwargs = _clean_kwargs(**kwargs)\n return cloud.delete_network(**kwargs)", "def unconfigure_global_stackwise_virtual(device):\n # Single command 'no stackwise-virtual' will remove configuration\n command = 'no stackwise-virtual'\n try:\n output = device.configure(command)\n except SubCommandFailure:\n raise SubCommandFailure('Failed to remove global stackwise-virtual')\n return output", "def unplug_port_from_network(self, device_id, device_owner, hostname,\n port_id, network_id, tenant_id, sg, vnic_type,\n switch_bindings=None, segments=None):", "def test_delete_network(self):\n pass", "def unplug_vifs(self, instance, network_info):\n try:\n for viface in network_info:\n self.vif_driver.unplug(instance, viface)\n self.stop_firewall(instance, network_info)\n except Exception as ex:\n with excutils.save_and_reraise_exception():\n LOG.error(_LE('Failed to remove container network'\n ' for %(instance)s: %(ex)s'),\n {'instance': instance.name, 'ex': ex},\n instance=instance)", "def unassign_sdn_networks(self, **kwargs):\n\n return self.api_request(self._get_method_fullname(\"unassign_sdn_networks\"), kwargs)", "def stop_network(self):\n self.net.stop()\n cleanup()", "def deleteNodeNetworkConfig(self,node):\n data = self.connect('delete',\"nodes/%s/network\" % (node),None)\n return data", "def fusion_api_delete_ethernet_network(self, name=None, uri=None, param='', api=None, headers=None):\n return self.ethernet_network.delete(name=name, uri=uri, param=param, api=api, headers=headers)", "def fusion_api_delete_network_set(self, name=None, uri=None, api=None, headers=None):\n return self.network_set.delete(name, uri, api, headers)", "def port_nic_remove(switch, port):\n client.port.detach_nic(switch, port)", "def stop(self):\n logging.debug(\"Network.stop entered:\" + str(self.id))\n # print self.cloudnet\n # res = cn.delete(self.cloudnet)\n notify(\"Stopping network %s\" % self.name)\n # if not self.cloudnet:\n # \n # self.cloudnet = cn.find(id=\"52a24319-f58d-4795-a3bd-c22d87bb65ae\")\n if self.cloudnet:\n res = self.cloudnet.delete()\n else:\n res = True\n return res", "def delete_overlay_network(self, name=NETWORK_NAME):\n try:\n # An overlay network is usually created in host belonging to a swarm\n self.leave_swarm()\n network = self.docker_client.networks.get(name)\n network.remove()\n except docker.errors.NotFound as nf:\n print(\"Network \"+name+\" not found\")\n except docker.errors.APIError as de:\n print(\"Error deleting overlay network\")\n print de\n exit(1)\n return", "def empty_network(network_id=NETWORK_ID):\n return make_net_model({\"id\": network_id,\n \"subnets\": [],\n \"ports\": [],\n \"tenant_id\": \"calico\",\n \"mtu\": neutron_constants.DEFAULT_NETWORK_MTU})", "def unfreeze(net):\n for p in net.parameters():\n p.requires_grad_(True)\n return net", "def _internal_network_removed(self, ri, port, ex_gw_port):\n itfc_deleted = False\n driver = self.driver_manager.get_driver(ri.id)\n vrf_name = driver._get_vrf_name(ri)\n network_name = ex_gw_port['hosting_info'].get('network_name')\n if self._router_ids_by_vrf_and_ext_net.get(\n vrf_name, {}).get(network_name) and (\n ri.router['id'] in\n self._router_ids_by_vrf_and_ext_net[vrf_name][network_name]):\n # If this is the last port for this neutron router,\n # then remove this router from the list\n if len(ri.internal_ports) == 1 and port in ri.internal_ports:\n self._router_ids_by_vrf_and_ext_net[\n vrf_name][network_name].remove(ri.router['id'])\n\n # Check if any other routers in this VRF have this network,\n # and if not, set the flag to remove the interface\n if not self._router_ids_by_vrf_and_ext_net[vrf_name].get(\n network_name):\n LOG.debug(\"++ REMOVING NETWORK %s\" % network_name)\n itfc_deleted = True\n del self._router_ids_by_vrf_and_ext_net[\n vrf_name][network_name]\n if not self._router_ids_by_vrf_and_ext_net.get(vrf_name):\n del self._router_ids_by_vrf_and_ext_net[vrf_name]\n\n driver.internal_network_removed(ri, port,\n itfc_deleted=itfc_deleted)\n if ri.snat_enabled and ex_gw_port:\n driver.disable_internal_network_NAT(ri, port, ex_gw_port,\n itfc_deleted=itfc_deleted)", "def delete_network(name, host, network_type):\n logging.info(\"Deleting %s '%s' from host '%s'\", network_type, name, host.name)\n\n try:\n if network_type.lower() == \"vswitch\":\n host.configManager.networkSystem.RemoveVirtualSwitch(name)\n elif network_type.lower() == \"portgroup\":\n host.configManager.networkSystem.RemovePortGroup(name)\n except vim.fault.NotFound:\n logging.error(\"Tried to remove %s '%s' that does not exist from host '%s'\",\n network_type, name, host.name)\n except vim.fault.ResourceInUse:\n logging.error(\"%s '%s' can't be removed because there are vNICs associated with it\",\n network_type, name)", "def delete_network_profile(arn=None):\n pass" ]
[ "0.63707083", "0.63062227", "0.62453514", "0.6191281", "0.61359066", "0.60712343", "0.6025632", "0.6006481", "0.5992399", "0.5980078", "0.59744585", "0.5953473", "0.5941246", "0.5902225", "0.5895596", "0.5824276", "0.5809243", "0.58091104", "0.58025676", "0.5797904", "0.5774278", "0.5741097", "0.57234645", "0.5665824", "0.56654096", "0.5660492", "0.56413877", "0.5641263", "0.56314385", "0.5621258" ]
0.7966817
0
Sets root directory fr GUI based on config file
def set_root(self): config_dir = os.path.expanduser("~/.local/shs") config_file = os.path.join(config_dir, "shs_gui.cfg") # check the file and create one if it's not there if not os.path.isfile(config_file): os.makedirs(config_dir) open(config_file, 'w').close() config = ConfigParser.ConfigParser() config.read(config_file) # if config exists and has needed option if config.has_option("general", "root_dir"): return config.get("general", "root_dir") # make config if not config.has_section("general"): config.add_section("general") dlg = wx.DirDialog(self, "Select root directory") if dlg.ShowModal() == wx.ID_OK: root_dir = dlg.GetPath() config.set("general", "root_dir", root_dir) else: sys.exit(1) with open(config_file, 'w') as f: config.write(f) return root_dir
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_rootdir(configdict, config_file):\n if 'rootdir' not in configdict or not configdict['rootdir']:\n configdict['rootdir'] = os.path.dirname(config_file)", "def set_root(self, root):\n self.root_path = root", "def set_cont_dir(self):\n cont_dir = select_dir(os.getcwd())\n if cont_dir is not None:\n self.cont_dir_button.setStyleSheet(\"Text-align:left\")\n self.cont_dir_button.setText(cont_dir)\n else:\n self.cont_dir_button.setText('')", "def set_widget(self, widget, data=None):\n\t\tif ('/' not in Configure.ConfigFile) : nomeFile = \"./\"+Configure.ConfigFile\n\t\telse : nomeFile = Configure.ConfigFile\n\t\twidget.set_text(nomeFile)", "def add_master_path(self, widget):\r\n\r\n default_dir = get_default_dir()\r\n\r\n # Save path\r\n path = QFileDialog.getExistingDirectory(self, 'Select a directory to save master study data files')\r\n\r\n # Check path was chosen\r\n if path:\r\n widget.setText(path.replace(\"/\",\"\\\\\"))", "def set_dev_folder(self):\n self.lblDevFolder.setText(ConfigHandler.cfg.dev_dir)", "def Dir(self, directory='cfg/'):\n try:\n self.__dir = os.path.join(os.getcwd(), directory.lstrip('/\\\\'))\n except Exception:\n KITConfig.configDir = os.path.join(os.getcwd(), directory.lstrip('/\\\\'))", "def set_directory(self, directory):\n\t\tself.edit.set_text(directory)", "def on_SetDefaultWorkDir_clicked(self):\n # TODO: not implemented yet\n # raise NotImplementedError\n dir = QFileDialog.getExistingDirectory(self, \"选取文件夹\", \"./\") # 起始路径\n if dir != \"\":\n\n print(f\"We will boot Node in {dir}\")\n try:\n os.chdir(dir) # 切换目录\n except:\n print(f\"We can't change Node path to {dir}\")\n return\n\n self.NodeRootDir = dir\n self.DefaultWorkDirLabel.setText(f\"默认工作路径为:{self.NodeRootDir}\")\n\n cmd = f\"cd {self.NodeRootDir}\"\n\n self.OnlyDisplay(cmd)\n else:\n print(f\"You cancel the folder selection, we will work in default dir:{self.NodeRootDir}\")", "def set_working_dir(self):\n self.working_dir = select_dir(os.getcwd())\n if self.working_dir is not None:\n self.set_work_dir_button.setStyleSheet(\"Text-align:left\")\n self.set_work_dir_button.setText(self.working_dir)\n else:\n self.set_work_dir_button.setText('')\n msg_window('please select valid working directory')\n return", "def proj_set_directory(self, isTyped, widgetIndex):\r\n if isTyped == True:\r\n newPath = self.widgetList[widgetIndex].get()\r\n else:\r\n newPath = tkFileDialog.askdirectory(**self.dir_opt)\r\n kT.debug_log('New path: ' + newPath)\r\n if newPath != '':\r\n self.widgetList[widgetIndex].delete(0, END)\r\n self.widgetList[widgetIndex].insert(0, newPath)\r\n return", "def set_relative_root(self, root):\r\n self.root = root", "def set_relative_root(self, root):\r\n self.root = root", "def do_root(self, line):\n self.root_directory = line\n if self.source_file:\n self.source_file = self.root_directory + \"/\" + self.source_file\n print(f\"Root directory to read & write files is: {line}\")", "def set_root(root_):\n global root\n\n root = os.path.expanduser(root_)\n assert os.path.isabs(root)\n root = os.path.normpath(root)", "def set_config_path(self, new_config_path):\n oldpath = self.get_config_path()\n cdir, cfile = os.path.split(new_config_path)\n \n if not cdir.startswith('/'):\n cdit='/'+cdir\n if not cfile:\n cfile = 'site.yaml'\n\n self.dropbox_base_dir = cdir\n self.dropbox_site_yaml = cfile\n newpath = self.get_config_path()\n if newpath !=oldpath:\n return oldpath", "def set_relative_root(self, root):\n self.root = root", "def select_dir(self):\n prev_val = self.var_path.get()\n if self.conf_dir == \"dir_app\" or self.conf_dir == \"urls\":\n dir_ = fd.askopenfilename(parent=self.parentframe,\n initialdir=Path.home()) or prev_val\n else:\n dir_ = fd.askdirectory(parent=self.parentframe,\n initialdir=Path.home()) or prev_val\n\n self.var_path.set(value=dir_)\n if dir_ != prev_val:\n conf[self.conf_dir] = dir_\n self.handle_modified()", "def setDataRoot(path):\n global dataRoot\n dataRoot = os.path.realpath(path)", "def saveConfigFileDlg( self ):\n fileName = QtGui.QFileDialog.getSaveFileName( self, \"Save Full Config As...\", self.rsrc.lastFolder, \"Config files (*.cfg)\" )\n if ( fileName ):\n self.saveConfigFile( fileName )\n path, fName = os.path.split( str( fileName ) )\n self.rsrc.lastFolder = path", "def make_config(config):\n config.set(\"dxr\", \"source_folder\", os.path.expanduser(\"~/dxr\"))", "def setParentIndir(self,dir):\n\n self.parentindir = dir\n self.rundir = os.path.join(self.parentindir,self.runname)", "def __set_workdir(self):\r\n fname = self.get_current_filename()\r\n if fname is not None:\r\n directory = osp.dirname(osp.abspath(fname))\r\n self.emit(SIGNAL(\"open_dir(QString)\"), directory)", "def displaydirectory(self):\n self.imfilenameLineEdit.setText(DATA_DIRECTORY)", "def get_config(self):\n root_folder = os.path.dirname(os.path.dirname(__file__)).replace('\\\\', '/')\n root_folder = root_folder.replace('/core', '/config')\n # print root_folder, '<----------------------------------------'\n proj_config = os.path.join(root_folder, self.project.lower()).replace('\\\\', '/')\n # print proj_config, '============================================='\n if not os.path.isfile(proj_config):\n proj_config = os.path.join(root_folder, 'default').replace('\\\\', '/')\n # print proj_config, '<========================================'\n return proj_config", "def set_buildroot(path):\r\n BuildRoot().path = path", "def getPath(self):\n path = os.path.dirname(os.path.realpath(__file__)) #Finds the path of the application\n path =(os.path.dirname(os.path.realpath(__file__))+ '\\\\Enigma Settings') #Adds to the directory to create a folder\n \n return path #Returns the folders directory", "def _setLibraryRoot(self):\n\t\tself._libHome = os.path.abspath(rootDir)", "def path(self, root_dir):\r\n path = os.path.realpath(root_dir)\r\n if not os.path.exists(path):\r\n raise ValueError('Build root does not exist: %s' % root_dir)\r\n self._root_dir = path", "def set_dir(text_field, pref_name, start_dir_callback, update_pref_callback, *args):\n start_dir = start_dir_callback(pref_name)\n\n # Prompt user with file dialog box.\n # If they don't provide any input, exit the function.\n directory = pm.fileDialog2(fileMode=2, dialogStyle=2,\n startingDirectory=start_dir)\n if not directory:\n return\n\n # Assign user input to the Program Directory Text field in the Mimic UI.\n pm.textField(text_field, edit=True, text=directory[0])\n if update_pref_callback:\n update_pref_callback(pref_name, directory[0])" ]
[ "0.6675416", "0.6579016", "0.6508349", "0.64902014", "0.6410637", "0.6409476", "0.63489723", "0.6332154", "0.6329878", "0.6146685", "0.6117435", "0.6109983", "0.6109983", "0.60945934", "0.60858583", "0.60583323", "0.6055506", "0.60272396", "0.599262", "0.5988058", "0.5968688", "0.5925505", "0.591871", "0.59063596", "0.5879304", "0.58753616", "0.58600193", "0.5857996", "0.5856941", "0.5851115" ]
0.81983185
0
Enqueue a task on a remote filesystem
def enqueue_remote(calc_dir, host, user): from sshutils import getSSHClient, getQueue, copyFile, removeFile, runCommand ssh = getSSHClient(host, user) # find which queue system is implemented on cluster (qstat - PBS, sinfo - SLURM) q = getQueue(ssh) if q is None: mbox.JobSubmit(None, ()) return None # queue putter on a local machine local_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', q)) putter = q + '.sh' sftp = copyFile(ssh, putter, local_dir, calc_dir) remote_file = os.path.join(calc_dir, putter) stdout, stderr = runCommand(ssh, 'bash ' + remote_file + ' -d=' + calc_dir) mbox.JobSubmit(q, ('\n'.join(stdout.readlines()), '\n'.join(stderr.readlines()))) removeFile(sftp, remote_file) ssh.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def enqueue(self, fn):\n self.queue.put(fn)", "def enqueue(self, url, path, check_val):\n logger.debug(\"Enqueuing new task (total: {0})\".format(\n self._dwq.qsize() + 1))\n self._dwq.put((url, path, check_val, 1))", "def enqueue(self, xyz):\n command = 'enqueue ' + str(xyz)\n self.run_command(command)", "def test_queue_enqueue_command(runner, tmpworkdir, queue, target_factory): # pylint: disable=unused-argument\n\n atarget = target_factory.build(queue=queue)\n apath = Path('ips.txt')\n apath.write_text(f'{atarget.target}\\n \\n ', encoding='utf-8')\n\n result = runner.invoke(command, ['queue-enqueue', 'notexist', atarget.target])\n assert result.exit_code == 1\n\n result = runner.invoke(command, ['queue-enqueue', queue.name, atarget.target])\n assert result.exit_code == 0\n assert Queue.query.get(queue.id).targets[0].target == atarget.target\n\n result = runner.invoke(command, ['queue-enqueue', queue.name, '--file', apath])\n assert result.exit_code == 0\n assert len(Queue.query.get(queue.id).targets) == 2", "def _enqueue_task(self):\n\t\t# TODO(bslatkin): Remove these retries when they're not needed in userland.\n\t\tRETRIES = 3\n\t\ttarget_queue = os.environ.get('X_APPENGINE_QUEUENAME', constants.FEED_QUEUE)\n\t\tfor i in xrange(RETRIES):\n\t\t\ttry:\n\t\t\t\ttaskqueue.Task(\n\t\t\t\t\t\turl='/work/pull_feeds',\n\t\t\t\t\t\teta=self.eta,\n\t\t\t\t\t\tparams={'topic': self.topic}\n\t\t\t\t\t\t).add(target_queue)\n\t\t\texcept (taskqueue.Error, apiproxy_errors.Error):\n\t\t\t\tlogging.exception('Could not insert task to fetch topic = %s',\n\t\t\t\t\t\t\t\t\t\t\t\t\tself.topic)\n\t\t\t\tif i == (RETRIES - 1):\n\t\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn", "def PushWorkload(vm, workload_file, remote_path):\n if os.path.basename(remote_path):\n vm.RemoteCommand('sudo rm -f ' + remote_path)\n vm.PushFile(workload_file, remote_path)", "def enqueue(self, server_id, url, title, duration, user):\n srv = self.get_server_dict(server_id)\n srv['queue'].append( (url, title, duration, user) )", "def enqueue(self, name):\n pass", "def put(self, task):\n self.queue.put(task, task.priority)", "def enqueue(self, command):\n\n lock = Locker(str(self.qlockfile))\n if lock.lockfile():\n q = []\n if self.queuefile.exists():\n line = self.queuefile.read_text()\n q = line.split(',')\n if command not in q:\n q.append(command)\n line = \",\".join(q)\n self.queuefile.write_text(line)\n lock.unlockfile()", "def enqueue_task(signature):\n try:\n if signature not in g._celery_tasks:\n g._celery_tasks.append(signature)\n except RuntimeError:\n signature()", "def _submit_to_queue(self, script_file):", "def put(self, task):\n self.put_id += 1\n self.task_queue.put(task)", "def octopus_task(self, msg, args):\r\n self.tasks.send_task_by_id(msg, args)", "def runQueueEnqueue(self):\n raise NotImplementedError", "def add_task(self, func, *args, **kwargs):\n self.queue.put((func, args, kwargs))", "def submit(self):\n self._pre_submit()\n\n payload = self._to_json()\n resp = self._connection._post(get_url('tasks'), json=payload)\n\n if resp.status_code == 404:\n raise MissingDiskException(resp.json()['message'])\n elif resp.status_code == 403:\n if resp.json()['message'].startswith('Maximum number of disks reached'):\n raise MaxDiskException(resp.json()['message'])\n else:\n raise MaxTaskException(resp.json()['message'])\n elif resp.status_code == 402:\n raise NotEnoughCreditsException(resp.json()['message'])\n raise_on_error(resp)\n self._uuid = resp.json()['uuid']\n\n self._post_submit()", "def Enqueue(self, command):\n\n self.queue.put(command)", "def enqueue(self,\n name,\n action=None,\n method=None,\n wait_url=None,\n wait_url_method=None,\n workflow_sid=None,\n **kwargs):\n return self.append(Enqueue(\n name,\n action=action,\n method=method,\n wait_url=wait_url,\n wait_url_method=wait_url_method,\n workflow_sid=workflow_sid,\n **kwargs\n ))", "def spawn(self, taskdef: TaskDefinition) -> RemoteTask:\n raise NotImplementedError()", "def push(args):\n if args.type == 'ssh':\n cache = set(args.remote_cache).union(set(args.cache))\n for path in sorted(cache):\n if os.path.exists(os.path.join(args.base, path)) and not remote_exists(args.sftp, os.path.join(args.remote_base, path)):\n print('push: {}'.format(path))\n ensure_remote(args.sftp, os.path.dirname(os.path.join(args.remote_base, path)))\n args.sftp.put(\n os.path.join(args.base, path),\n os.path.join(args.remote_base, path)\n )\n args.remote_cache.append(path)\n args.remote_update = True\n elif args.type == 's3':\n raise NotImplementedError('s3:// remote type not yet supported!')\n elif args.type == 'gs':\n raise NotImplementedError('gs:// remote type not yet supported!')\n return", "def put(self, task):\n self.put_idx += 1\n self.task_queue.put(task)", "def put_task(self, task):\n # Check if current task is valid\n if not task.connect(self): # Check if task can be used\n return # Drop current task\n self.queue.put(task) # Add current task in schedule queue", "def queue(self, *args, **kwargs):\n queue_args = self._pop_tq_add_args(kwargs)\n app = queue_args.pop('app', None) or flask.current_app\n\n with app.test_request_context():\n # flask.url_for uses the request context if it is present\n # as we're most likely in a request context, use a\n # test_request_context() instead.\n url = self.url()\n\n payload = pickle.dumps((args, kwargs))\n\n taskqueue.add(\n url=url,\n queue_name=self.queue_name,\n payload=payload,\n **queue_args\n )", "def enqueue(self, cmd) -> None:\n self.general_queue.append(cmd)", "def enqueue(self):\n\t\t# TODO(bslatkin): Remove these retries when they're not needed in userland.\n\t\tRETRIES = 3\n\t\ttarget_queue = os.environ.get('X_APPENGINE_QUEUENAME', constants.EVENT_QUEUE)\n\t\tfor i in xrange(RETRIES):\n\t\t\ttry:\n\t\t\t\ttaskqueue.Task(\n\t\t\t\t\t\turl='/work/push_events',\n\t\t\t\t\t\teta=self.last_modified,\n\t\t\t\t\t\tparams={'event_key': self.key()}\n\t\t\t\t\t\t).add(target_queue)\n\t\t\texcept (taskqueue.Error, apiproxy_errors.Error):\n\t\t\t\tlogging.exception('Could not insert task to deliver '\n\t\t\t\t\t\t\t\t\t\t\t\t\t'events for topic = %s', self.topic)\n\t\t\t\tif i == (RETRIES - 1):\n\t\t\t\t\traise\n\t\t\telse:\n\t\t\t\treturn", "def run(self, host=None):\n host = self.getFogHost(host)\n num = str(self.getHostNumber(host))\n url = self.baseURL+'host/'+num+'/task'\n try:\n req = requests.post(\n url,\n headers=self.header,\n json={\"taskTypeID\": 1}\n )\n if req.status_code == 200:\n # self.logger.info(\"%s\", \"Scheduled image task for host\")\n pass\n except Exception:\n # self.logger.warning(\"%s\", \"Failed to schedule host imaging\")\n # self.logger.warning(\"%s\", \"Trying to delete existing image task\")\n self.delTask(num)\n req = requests.post(\n url,\n headers=self.header,\n json={\"taskTypeID\": 1}\n )\n if req.status_code == 200:\n # self.logger.info(\"%s\", \"Scheduled image task for host\")\n pass\n sys.exit(0)", "def enqueue(self, func):\n self.queue.put(func)", "def addTask(self, task, priority=0):\n self.queue.put((priority, task))", "def enqueue(self, message, qat, nbf):\n dst = self.abspath('%s.tmp' % str(message.id))\n with open(dst, 'wb') as f:\n f.write(nbf.to_bytes(8, 'big'))\n f.write(message.encode())\n f.flush()\n os.fsync(f.fileno())\n\n os.rename(dst, self.abspath('%s.amqp' % str(message.id)))" ]
[ "0.62454194", "0.6152354", "0.6060588", "0.6032706", "0.591759", "0.5790725", "0.57750595", "0.574475", "0.5740375", "0.5739555", "0.57374674", "0.5684687", "0.55867654", "0.5584851", "0.5565949", "0.5563195", "0.5535499", "0.5512124", "0.54877776", "0.5481692", "0.5475199", "0.5467762", "0.5460017", "0.54396003", "0.5415857", "0.5401705", "0.53999573", "0.53590757", "0.53369665", "0.53297234" ]
0.68370694
0
Get instance of select.
def get_select_instance(self) -> Select: element = self.wait_until_loaded() return Select(element)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_selenium_select(self):\n\n if self.exists():\n\n element = self.element()\n\n if element.tag_name == u'select':\n return SeleniumSelect(element)", "def select(cls, *flst):\n cls.runtime.set_select(flst)\n return SelectQuery(cls.runtime)", "def select(self, *attributes):\n return SelectQuery(self, attributes)", "def open_connection(self):\n return SelectConnection(\n parameters=self._connection_parameters,\n on_open_callback=self.on_connection_open,\n on_close_callback=self.on_connection_closed,\n on_open_error_callback=self.on_connection_closed,\n )", "def select(self):\n return", "def from_selection(cls):\n guid = compas_rhino.select_point()\n return cls.from_guid(guid)", "def select(self):\n pass", "def select(self):\n pass", "def _select(self):\r\n readable = [self.socket.handle.fileno(), self._read.fileno()]\r\n writable = []\r\n for i, connection in self.clients.items():\r\n if connection.is_readable():\r\n readable.append(connection.fileno())\r\n if connection.is_writeable():\r\n writable.append(connection.fileno())\r\n if connection.is_closed():\r\n del self.clients[i]\r\n return select.select(readable, writable, readable)", "def select(self):\r\n pass", "def inst(cls):\n if cls.instance is None:\n raise OptionsError(\"No options have been set\")\n return cls.instance", "def get(cls, *args, **kwargs):\n return SelectQuery(cls).filter(*args, **kwargs).one()", "def _run_select(self):\n return self._connection.select(\n self.to_sql(),\n self.get_bindings(),\n not self._use_write_connection\n )", "def _select_query(self):\r\n if self._where:\r\n self._validate_select_where()\r\n return SelectStatement(\r\n self.column_family_name,\r\n fields=self._select_fields(),\r\n where=self._where,\r\n order_by=self._order,\r\n limit=self._limit,\r\n allow_filtering=self._allow_filtering\r\n )", "def select(querystring: str,\n db: tsdb.Database,\n record_class: Optional[Type[_Record]] = None) -> Selection:\n queryobj = _parse_select(querystring)\n return _select(\n queryobj['projection'],\n queryobj['relations'],\n queryobj['condition'],\n db,\n record_class=record_class)", "def selectOpt(self, sql): # select\n # apply connection rescource\n dbp_opt = dbPool()\n results = dbp_opt.opSelect(sql)\n # release connection rescource\n dbp_opt.dispose()\n return results", "def immediate(self, selector: str) -> Selector:\n return Selector(selector, websocket=self._websocket)", "def _select_implementation(self):\n return", "def _select_implementation(self):\n return", "def find(self,\n selector: typing.Dict[str, typing.Any] = None,\n limit: int = None) -> \"Collection\":\n if not selector:\n return self\n\n collection: Collection = Collection(self._adapter.find(\n selectors=selector,\n limit=limit\n ))\n\n return collection", "def select(self, *dims):\n return select(self, *dims)", "def createSelector(self,type='select',speed=2.0):\n self.selector = self.loadObject(type, scale=2, parent=render, transparency=True, pos=Point2(0,0), glow=1)\n self.selector.hide()\n ival = self.selector.hprInterval((speed), Vec3(0, 0, 360))\n ival.loop()", "def set_select(self, val):\n self.select = val\n return self", "def select(cond, t, f):\n return _make.Select(convert(cond), convert(t), convert(f))", "def _getAsSelection(self):\n return self._asSelection", "def get(self):\n self.set_action(\"select\")\n result = self.connection.query(self.to_qmark(), self._bindings)\n relations = self.eager_load_model(result)\n return self.owner.new_collection(result).map_into(\n self.owner, \"hydrate\", relations=relations\n )", "def __getattr__(self, name):\n return self.connection(name)", "def from_selection(cls):\n guid = compas_rhino.select_mesh()\n return cls.from_guid(guid)", "def select(self, query='', next_token=None, consistent_read=False, max_items=None):\r\n return SelectResultSet(self, query, max_items=max_items, next_token=next_token,\r\n consistent_read=consistent_read)", "def getSelector(self, node):\n self.checkModelOpen()\n calcEngine = CalcEngine.factory(self.client_session)\n return calcEngine.getSelector(node)" ]
[ "0.70057505", "0.6398658", "0.6194344", "0.6185794", "0.6062771", "0.6035689", "0.6024519", "0.6024519", "0.59566975", "0.5862003", "0.5850936", "0.58454293", "0.5813995", "0.57389367", "0.56362617", "0.55677134", "0.5539791", "0.5501766", "0.5501766", "0.5473336", "0.545063", "0.539176", "0.5343442", "0.5336102", "0.53326213", "0.5322069", "0.53191733", "0.52981675", "0.52944136", "0.52898407" ]
0.8125689
0
Convert obj into Status.
def as_status(cls, obj): if obj is None: return None return obj if isinstance(obj, cls) else cls.from_string(obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_status_obj(self):\n\n status = Status(self._config.dirout, name=self._config.name,\n hardware=self._config.hardware)\n return status", "def get_object_status(obj):\n return get_object_parameter(obj, 'status')", "def save_object(self, data):\n return Status(**data)", "def convert_dicts_in_status_to_obj(status: Status) -> Status:\n keys_to_update = [\"urls\", \"user\", \"user_mentions\", \"quoted_status\"]\n for key in keys_to_update:\n if key == \"urls\":\n status.urls = [Url(**url) for url in status.__getattribute__(key)]\n elif key == \"user\":\n status.user = User(**status.__getattribute__(key))\n elif key == \"user_mentions\":\n status.user_mentions = [\n User(**user) for user in status.__getattribute__(key)\n ]\n elif key == \"quoted_status\":\n status.quoted_status = (\n convert_dicts_in_status_to_obj(\n status=Status(**status.__getattribute__(key))\n )\n if status.__getattribute__(key)\n else None\n )\n return status", "def status(self, status: dict):\n pass", "def _get_status(self, context, object_list=None):\n status = self.request.GET.get(\"status\", \"\").upper()\n\n if object_list is not None:\n return self._get_object_list(\n object_list, status != \"\" and JobStatus.is_member(status), status=status\n )\n\n options = list(map(lambda s: (s.name, s.value), JobStatus))\n\n return {\n **context,\n \"status_options\": sorted(options, key=lambda x: x[0]),\n \"status\": status,\n }", "def set_status(self, status, ts=None):\n return ObjectStatus.set_status(self, status, ts=ts)", "def _get_status(self):\n return self.__status", "def create_resultado(self, data):\n return Status(**data)", "def _create_status(self):\n if self.headers['Accept'] != CONTENT_TYPE_STATUS:\n raise NotAcceptable()\n\n body = self.server.status()\n self._write_response(\n 200, body,\n content_type='application/se.novafaen.smrt.status.v1+json'\n )\n self.server.successful_response()", "def json_converter(obj):\n if isinstance(obj, ErrorReport):\n rdict = obj.__dict__\n return rdict\n try:\n return obj.to_json()\n except AttributeError:\n return obj.__dict__", "def _parse_json_to_status_update(self, json_message):\n if len(json_message['failing']) > 0:\n return BuildStatusUpdate(BuildStatus.Failing)\n elif len(json_message['acknowledged']) > 0:\n return BuildStatusUpdate(BuildStatus.Acknowledged)\n else:\n return BuildStatusUpdate(BuildStatus.Passing)", "def GetStatus(self):\r\n return self.status", "def get_status(self) -> RobovacStatus:\n message = self._build_get_device_status_user_data_message()\n robovac_response = self._send_packet(message, True)\n received_status_bytes = robovac_response.c.usr_data\n received_status_ints = [x for x in received_status_bytes]\n\n return RobovacStatus(\n 1 if received_status_ints[6] & 4 > 0 else 0,\n 1 if received_status_ints[6] & 2 > 0 else 0,\n received_status_ints[1] & 255,\n received_status_ints[8] & 255,\n received_status_ints[11] & 255,\n received_status_ints[10] & 255,\n received_status_ints[12] & 255,\n received_status_ints[13] & 255\n )", "def from_dict(cls, dikt) -> 'Status':\n return deserialize_model(dikt, cls)", "def update_status(self, obj_type, obj_id, root_lb_id,\n provisioning_status, operating_status,\n agent_info, obj=None):\n\n msg = {'info': {'service_type': lb_const.SERVICE_TYPE,\n 'context': agent_info['context']},\n 'notification': [{'resource': agent_info['resource'],\n 'data':{'obj_type': obj_type,\n 'obj_id': obj_id,\n 'notification_type': 'update_status',\n 'root_lb_id': root_lb_id,\n 'provisioning_status':\n provisioning_status,\n 'operating_status':\n operating_status,\n obj_type: obj}}]\n }\n LOG.info(\"Sending Notification 'Update Status' \"\n \"for resource: %(resource)s with Provisioning status:\"\n \"%(p_status)s and Operating status:%(o_status)s\",\n {'resource': agent_info['resource'],\n 'p_status': provisioning_status,\n 'o_status': operating_status})\n self.notify._notification(msg)", "def status(self, id):", "def _updateStatus(self, result):\n\n if result.status is not None:\n # status was explicitly set\n self.target.localStatus = result.status\n if self.target.present and self.target.created is None:\n self.target.created = self.configSpec.operation not in [\n \"check\",\n \"discover\",\n ]\n elif not result.success:\n # if any task failed and (maybe) modified, target.status will be set to error or unknown\n if result.modified:\n self.target.localStatus = (\n Status.error if self.required else Status.degraded\n )\n elif result.modified is None:\n self.target.localStatus = Status.unknown\n # otherwise doesn't modify target status", "def update_server_status(userStatusObj):\n oldStatusStr = get_from_db(key='status')\n if oldStatusStr:\n oldStatusObj = json.loads(oldStatusStr)\n mergeObj = {**oldStatusObj, **userStatusObj}\n set_to_db(key='status', str_value=json.dumps(mergeObj))", "def status(self) -> VacuumStatus:\n return VacuumStatus(self.send(\"get_status\")[0])", "def status(self):\n if \"status\" in self._prop_dict:\n if isinstance(self._prop_dict[\"status\"], OneDriveObjectBase):\n return self._prop_dict[\"status\"]\n else :\n self._prop_dict[\"status\"] = AutomaticRepliesStatus(self._prop_dict[\"status\"])\n return self._prop_dict[\"status\"]\n\n return None", "def status(self) -> Status:\n return self._status", "def status(self):\n return status_dict[self._get_property_(self.STATUS).upper()]", "def getStatus(self):\n\n self.sendCommand(cmdBytes = b'\\xfe')\n statusList = self.readReply(inputEndpoint=self.epStatus,\n unpackingFormat=self.statusPackingFormat,\n timeout=1000)\n status = self.Status(*statusList)\n self.lastStatus = status\n return status", "def get_status_by_id(cls, request, id):\n return request.dbsession.query(cls).get(id).status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def createStatusObject(self):\n if self.config_filepath is None:\n return False\n\n self.status = GARunStatus(self.config_filepath)\n return True", "async def createStatus(self, *args, **kwargs):\n\n return await self._makeApiCall(self.funcinfo[\"createStatus\"], *args, **kwargs)" ]
[ "0.69012076", "0.6826973", "0.68099135", "0.5904176", "0.5852035", "0.56639117", "0.56094617", "0.55938005", "0.5563878", "0.548514", "0.5456211", "0.54496115", "0.54476863", "0.5410725", "0.5410167", "0.5387186", "0.53415185", "0.5339267", "0.53245735", "0.531709", "0.53131574", "0.5311648", "0.5294444", "0.52784", "0.5275167", "0.52726495", "0.52726495", "0.52726495", "0.5267729", "0.52645916" ]
0.8367584
0
Return a `Status` instance from its string representation.
def from_string(cls, s): for num, text in cls._STATUS2STR.items(): if text == s: return cls(num) else: raise ValueError("Wrong string %s" % s)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_string(cls, name: str) -> Enum:", "def from_str(cls, string):", "def from_dict(cls, dikt) -> 'Status':\n return deserialize_model(dikt, cls)", "def as_status(cls, obj):\n if obj is None: return None\n return obj if isinstance(obj, cls) else cls.from_string(obj)", "def from_dict(cls, dic: Dict[str, Any]) -> \"CircuitStatus\":\n invalid = ValueError(f\"Dictionary invalid format for CircuitStatus: {dic}\")\n if \"message\" not in dic or \"status\" not in dic:\n raise invalid\n try:\n status = next(s for s in StatusEnum if dic[\"status\"] == s.name)\n except StopIteration as e:\n raise invalid from e\n return cls(status, dic[\"message\"])", "def from_string(cls, string):\n normalised = cls.normalise_string(string)\n return cls.from_normalised_string(normalised)", "def from_str(cls, s):\n raise NotImplementedError", "def from_bytes(cls, bytes):\n construct = _constructs.CertificateStatus.parse(bytes)\n\n return cls(\n status_type=construct.status_type,\n response=construct.response,\n )", "def from_stdout(cls, value: str) -> \"GitStatus\":\n pattern = re.compile(\n r\"\"\"[\\n\\r]?\n (\n #\n \\W+\n branch.oid\\W+\n (?P<branch_oid>\n [a-f0-9]{40}\n )\n )?\n (\n #\n \\W+\n branch.head\n [\\W]+\n (?P<branch_head>\n .*\n )\n\n )?\n (\n #\n \\W+\n branch.upstream\n [\\W]+\n (?P<branch_upstream>\n .*\n )\n )?\n (\n #\n \\W+\n branch.ab\n [\\W]+\n (?P<branch_ab>\n \\+(?P<branch_ahead>\\d+)\n \\W{1}\n \\-(?P<branch_behind>\\d+)\n )\n )?\n \"\"\",\n re.VERBOSE | re.MULTILINE,\n )\n matches = pattern.search(value)\n\n if matches is None:\n raise Exception(\"Could not find match\")\n return cls(**matches.groupdict())", "def load(cls, json_str):\n \n game_state = json.loads(json_str)\n return cls(game_state)", "def _set_status(self, v, load=False):\n if hasattr(v, \"_utype\"):\n v = v._utype(v)\n try:\n t = YANGDynClass(v,base=unicode, is_leaf=True, yang_name=\"status\", rest_name=\"status\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)\n except (TypeError, ValueError):\n raise ValueError({\n 'error-string': \"\"\"status must be of a type compatible with string\"\"\",\n 'defined-type': \"string\",\n 'generated-type': \"\"\"YANGDynClass(base=unicode, is_leaf=True, yang_name=\"status\", rest_name=\"status\", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, namespace='urn:brocade.com:mgmt:brocade-bgp-operational', defining_module='brocade-bgp-operational', yang_type='string', is_config=False)\"\"\",\n })\n\n self.__status = t\n if hasattr(self, '_set'):\n self._set()", "def BindingStatus_fromString(*args):\n return _libsbml.BindingStatus_fromString(*args)", "def test_str(self):\r\n statobj = inputtypes.Status('test')\r\n self.assertEqual(str(statobj), 'test')\r\n self.assertEqual(unicode(statobj), u'test')", "def fromString(cls, string):\n raise NotImplementedError(\n 'fromString is not implemented on %r' % (cls.__name__,))", "def _decode_sensor_status(self, status: str) -> str:\n k = int(status)\n return self.SENSOR_STATUSES[k]", "def _parse_json_to_status_update(self, json_message):\n if len(json_message['failing']) > 0:\n return BuildStatusUpdate(BuildStatus.Failing)\n elif len(json_message['acknowledged']) > 0:\n return BuildStatusUpdate(BuildStatus.Acknowledged)\n else:\n return BuildStatusUpdate(BuildStatus.Passing)", "def _parse_status(self, status):\n if status in (STATUS_FINISHED, 'FINISHED'):\n return STATUS_FINISHED\n elif status in (STATUS_ERROR, 'ERROR'):\n return STATUS_ERROR\n elif status in (STATUS_CANCELED, 'CANCELED'):\n return STATUS_CANCELED\n return STATUS_STARTED", "def from_str (s):\n try: \n return from_csv(s)\n except Exception: \n pass\n \n try: \n return from_hex(s)\n except Exception: \n pass\n\n try:\n return from_name(s)\n except Exception: \n pass\n\n raise ColourFormatError(\"'%s' is not a recognized colour string\"%s)", "def _status_to_state(status):\n if status == 'failed':\n return Finding.State.ACTIVE\n elif status == 'passed' or status == 'skipped':\n return Finding.State.INACTIVE\n else:\n return Finding.State.STATE_UNSPECIFIED", "def valid_status(cls, status_str):\n status_str = status_str.upper()\n if status_str not in [\"OPEN\", \"CLOSED\"]:\n raise ValueError(f\"invalid ticket status '{status_str}'\")\n return status_str", "def from_string(\n cls: Type[_CromwellWorkflowLabel], workflow_label: str\n ) -> _CromwellWorkflowLabel:\n count_equals = workflow_label.count(\"=\")\n count_escaped_equals = workflow_label.count(\"\\\\=\")\n\n if count_equals - count_escaped_equals == 0:\n return cls(cls.CAPER_STR_LABEL, workflow_label)\n\n if count_equals - count_escaped_equals != 1:\n raise ValueError(\n \"Found more than one unescaped `=` in key=value pair, must only '\"\n \"specify one so parsing is not ambiguous\"\n )\n\n for i, char in enumerate(workflow_label):\n if char == \"=\":\n if workflow_label[i - 1] != \"\\\\\":\n key, value = workflow_label[0:i], workflow_label[i + 1 :]\n return cls(key, value)\n\n # Can skip coverage here, we know the loop above always executes on a string\n # with one non-escaped equals sign in it\n raise ValueError(\"Could not detect key-value pair\") # pragma: no cover", "def translate_from_rpc(rpcStatusText):\n return StatusText(\n \n StatusText.StatusType.translate_from_rpc(rpcStatusText.type),\n \n \n rpcStatusText.text\n )", "def from_json_str(cls, json_str):\n return cls.from_json(simplejson.loads(json_str))", "def value_from_str(self, s):\n raise ValueError()", "def from_type_string(cls, type_str):\n type_info = cls.is_my_type(type_str)\n if type_info:\n return cls(type_info)", "def status_message(message):\n return StatusMessage(message)", "def from_json(cls, string):\n dct = json.loads(string)\n return cls.from_dict(dct)", "def getStatusString(self, statClass=None):\n return self.convertStatus(self.getStatus(statClass=statClass))", "def from_string(string):\n # in order to complete this lab we are going to use the python lib json in which we have the function json.loads\n # which will automatically load a json from a string\n return json.loads(string)", "def __new__(\n cls: type[_StrEnumT], value: str, *args: Any, **kwargs: Any\n ) -> _StrEnumT:\n if not isinstance(value, str):\n raise TypeError(f\"{value!r} is not a string\")\n return super().__new__(cls, value, *args, **kwargs)" ]
[ "0.6532725", "0.6098119", "0.6089204", "0.6071723", "0.6062613", "0.6039817", "0.60321283", "0.5979478", "0.58530563", "0.5814466", "0.57564574", "0.5747435", "0.57120544", "0.563867", "0.56150186", "0.55646116", "0.55443376", "0.5541977", "0.5534263", "0.5529887", "0.5467418", "0.54669523", "0.5459639", "0.5457072", "0.5444933", "0.54296553", "0.54268676", "0.5411365", "0.54090124", "0.5406818" ]
0.7688771
0
List of strings with all possible values status.
def all_status_strings(cls): return [info[1] for info in cls._STATUS_INFO]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getStatusValues(self):\n return []", "def get_all_status():\n return \"\"", "def valid_statuses(self):\n return [\n \"dish_maintenance\",\n \"dish_ok\",\n \"RF_maintenance\",\n \"RF_ok\",\n \"digital_maintenance\",\n \"digital_ok\",\n \"calibration_maintenance\",\n \"calibration_ok\",\n \"calibration_triage\",\n ]", "def availableValues(self):\n return [x.name for x in self._field.enum_type.values]", "def available_statuses(self):\n return self.pipeline.get(self.status, ())", "def available_statuses(self):\n return self.pipeline.get(self.status, ())", "def _get_status(self):\n\t\tstatus_list = []\n\t\tfor hand in self.player_hand:\n\t\t\tif hand.value > 21:\n\t\t\t\tstatus_list.append('lost')\n\t\t\telif hand.value == 21 \\\n\t\t\t\t\tand len(hand.cards) == 2 \\\n\t\t\t\t\tand not(self.dealer_hand[0].value == 21 and len(self.dealer_hand[0].cards) == 2):\n\t\t\t\tstatus_list.append('blackjack')\n\t\t\telif self.dealer_hand[0].value > 21:\n\t\t\t\tstatus_list.append('won')\n\t\t\telif hand.value > self.dealer_hand[0].value:\n\t\t\t\tstatus_list.append('won')\n\t\t\telif hand.value == self.dealer_hand[0].value:\n\t\t\t\tstatus_list.append('push')\n\t\t\telse:\n\t\t\t\tstatus_list.append('lost')\n\t\treturn status_list", "def old_statuses(self):\n return [\"passed_checks\", \"needs_checking\", \"known_bad\", \"not_connected\"]", "def all_statuses(cls):\n return list(cls.pipeline.keys())", "def all_statuses(cls):\n return list(cls.pipeline.keys())", "def get_status():\n return ('off', 'off')", "def _list_of_availability_strings():\n names = [availability.name for availability in Availability]\n return names", "def status_enum(self):\n return self.valid_statuses()", "def valid() -> List[str]:\n return [\n AssignmentState.CREATED,\n AssignmentState.LAUNCHED,\n AssignmentState.ASSIGNED,\n AssignmentState.COMPLETED,\n AssignmentState.ACCEPTED,\n AssignmentState.MIXED,\n AssignmentState.REJECTED,\n AssignmentState.SOFT_REJECTED,\n AssignmentState.EXPIRED,\n ]", "def status() -> Dict[str, Any]:", "def get_checked_status_list(self):\r\n checked_status_list = []\r\n for item_index in xrange(self.count()):\r\n item = self.item(item_index)\r\n if not item is None:\r\n checked_status_list.append(item.checkState())\r\n return checked_status_list", "def get_current_status_for_debug(self) -> List[str]:\n msgs = []\n if self.added_items:\n msgs.append(f\"{self.class_name} {self.cfg.name} added items:\")\n for item in self.added_items:\n msgs.append(f\"\\t{item}\")\n else:\n msgs.append(f\"No added items in {self.class_name}\")\n\n if self.ongoing:\n msgs.append(f\"{self.class_name} {self.cfg.name} pending items:\")\n for item in self.ongoing:\n msgs.append(f\"\\t{item}\")\n else:\n msgs.append(f\"No pending items in {self.class_name}\")\n\n return msgs", "def values(cls) -> t.List[t.Union[str, NameTitle]]:\n return list(cls.__labels__.values())", "def get_status(pos, neg, names):\n status = {}\n for i in names:\n #print str(i) +'\\n'+ str(pos) +'\\n'+ str(neg)+'\\n'+'\\n'\n if i in pos:\n status[i] = \"1\"\n elif i in neg:\n status[i] = \"0\"\n else:\n status[i] = \"NA\"\n return status", "def status():\n statuses = get_all_statuses()\n return json.dumps(statuses, indent=4)", "def health_check_codes(self) -> Sequence[str]:\n return pulumi.get(self, \"health_check_codes\")", "def read_all_status_characters(self):\n return self.STATUS_CHARACTERS", "def all_values(cls) -> List[str]:\n return list(member.value for member in cls.__members__.values())", "def get_status(self):\n return [l1.get_visible() for (l1, l2) in self.lines]", "def list_available_enum(enum_type: Type[Enum]) -> List[str]:\n return [f\"{i}: {v}\" for (i, v) in enumerate(enum_type)] # type: ignore[var-annotated]", "def values(self) -> Sequence[str]:\n return pulumi.get(self, \"values\")", "def values(self) -> Sequence[str]:\n return pulumi.get(self, \"values\")", "def values(self) -> Sequence[str]:\n return pulumi.get(self, \"values\")", "def values(self) -> Sequence[str]:\n return pulumi.get(self, \"values\")", "def values(self) -> Sequence[str]:\n return pulumi.get(self, \"values\")" ]
[ "0.80372065", "0.74304366", "0.7289085", "0.697477", "0.6933288", "0.6933288", "0.6818656", "0.6688988", "0.6659315", "0.6659315", "0.6566149", "0.65324384", "0.6446381", "0.6437433", "0.6356403", "0.63014066", "0.6281931", "0.62613416", "0.62507194", "0.6203024", "0.61833817", "0.6171317", "0.6151899", "0.61277723", "0.6081895", "0.60705376", "0.60705376", "0.60705376", "0.60705376", "0.60705376" ]
0.8151018
0
True if status is critical.
def is_critical(self): return str(self) in ("AbiCritical", "QCritical", "Unconverged", "Error")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def life_critical():\n return True", "async def is_water_level_critical(self):\n entity_id = self._hass.data[DOMAIN][ATTR_WATER_LEVEL_CRITICAL_ENTITY_ID]\n return entity_id and self._hass.states.get(entity_id).state == STATE_ON", "def critical(self) -> Optional[pulumi.Input['InfraAlertConditionCriticalArgs']]:\n return pulumi.get(self, \"critical\")", "def critical(self) -> Optional[pulumi.Input['InfraAlertConditionCriticalArgs']]:\n return pulumi.get(self, \"critical\")", "def critical(self) -> pulumi.Output[Optional['outputs.InfraAlertConditionCritical']]:\n return pulumi.get(self, \"critical\")", "async def critical(self, check, *, note=None):\n return await self.mark(check, \"critical\", note=note)", "def critical(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"critical\")", "def critical(self, msg, *args):\n if self.lvl<=logging.CRITICAL: return self._log(msg, *args)", "def critical(self, msg):\n\n self(msg, CRITICAL)", "def critical(self, msg, *args, **kwargs):\n pass", "def critical(self, *args, **kwargs):\n self.msg(logging.CRITICAL, *args, **kwargs)", "def handle_critical(self, api, command):\n return self.handle_log(api, command, level=logging.CRITICAL)", "def critical(self, *args, **kwargs):", "def critical(\n self,\n msg,\n color=None,\n light=None\n ) -> None:\n self.write(msg, level=logging.CRITICAL, color=color, light=light)", "def is_maintenance_active(self):\n pass", "def critical(msg):\n log_msg(CRITICAL, msg)", "def critical(self, msg):\r\n self.logger.critical(msg)", "def critical(self, msg):\n self.__logger.critical(msg)", "def critical(self, message: str):\n self.log(Level.CRITICAL, message)", "def critical(self, msg: str):\n self._logger.critical(msg)", "def critical(self, *lines):\n if self.__debug_level >= DEBUG_LEVELS['critical']:\n self.print_lines(self.colored(('on_red', 'bold', 'white'), lines))", "def assess_status(self):\n if not self.configuration_complete():\n hookenv.status_set('blocked',\n 'Kerberos configuration incomplete')\n elif os_utils.is_unit_upgrading_set():\n hookenv.status_set('blocked',\n 'Ready for do-release-upgrade and reboot. '\n 'Set complete when finished.')\n else:\n hookenv.status_set('active',\n 'Unit is ready')", "def status(self):\r\n return STATUS.FINE", "def is_contagious(self):\n if self.health >= 0 and self.health <= 49:\n return True\n elif self.health >= 50 and self.health <= 100:\n return False", "def check_status(self):\n return self.status", "def check_status(self):\n return self.status", "def isStatus(self):\n return self.type == \"MPI_Status\"", "def get_status(self):\n return super(Cabling, self).get_status()", "def is_on(self):\n if self.is_update_locked():\n return self.graceful_state\n if self._state['action'] == 1 and self._state['state'] == 2:\n return True\n return False", "def check_status(self):\n if self.voltage>self.criticalValue and not self.statusHigh:#just went high\n self.statusHigh = True\n self.channelMessage = self.channelMessageHigh\n if ss is not None:\n if self.highSoundFile is not None:#specific high soundfile\n ss.playFile(os.path.join(\"sounds\",self.highSoundFile),1, 60.0)\n elif self.highIsGood:\n winsound.MessageBeep(winsound.MB_ICONASTERISK)#high is good and we just went high so nice sound\n else:\n winsound.MessageBeep(winsound.MB_ICONHAND)#high is bad and we just went high so bad sound\n \n elif self.voltage<self.criticalValue and self.statusHigh:#just went low\n self.statusHigh = False\n self.channelMessage = self.channelMessageLow\n if ss is not None:\n if self.lowSoundFile is not None:#specific high soundfile\n ss.playFile(os.path.join(\"sounds\",self.lowSoundFile),1, 60.0)\n if not self.highIsGood:\n winsound.MessageBeep(winsound.MB_ICONASTERISK)#high is bad and we just went low so good sound\n else:\n winsound.MessageBeep(winsound.MB_ICONHAND)#high is good and we just went low so bad sound" ]
[ "0.6534846", "0.64763504", "0.6444272", "0.6444272", "0.64000493", "0.6359411", "0.6297349", "0.62813634", "0.61272347", "0.60135937", "0.5963335", "0.595614", "0.59326184", "0.5897819", "0.58703893", "0.58697253", "0.5852167", "0.575893", "0.5719633", "0.5684549", "0.5680519", "0.5660482", "0.56022483", "0.55711156", "0.55690265", "0.55690265", "0.5555901", "0.553526", "0.5521802", "0.5512051" ]
0.74858505
0
Initialize an instance of `NodeResults` from a `Node` subclass.
def from_node(cls, node): kwargs = dict( node_id=node.node_id, node_finalized=node.finalized, node_history=list(node.history), node_name=node.name, node_class=node.__class__.__name__, node_status=str(node.status), ) return node.Results(node, **kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, tree, result, url):\n self.tree = tree\n self.result = result\n self.url = url", "def __init__(self) -> None:\n\t\t# Call super\n\t\tsuper(RootNode, self).__init__()\n\t\tself.nodes: List[Node] = []\n\t\tself.subfiles: Set[str] = set()", "def __init__(self, start_index=None, number_of_results=None, types=None, results=None, order=None): # noqa: E501 # noqa: E501\n\n self._start_index = None\n self._number_of_results = None\n self._types = None\n self._results = None\n self._order = None\n self.discriminator = None\n\n self.start_index = start_index\n self.number_of_results = number_of_results\n if types is not None:\n self.types = types\n if results is not None:\n self.results = results\n if order is not None:\n self.order = order", "def __init__(self, elem_type):\n if not isinstance(elem_type, ResultType):\n raise TypeError('%s is not a type' % str(elem_type))\n self._elem_type = elem_type", "def __init__(self, node_id=None, status=None, start=None, end=None, error=None, error_code=None, progress=None, stats=None, out=None): # noqa: E501 # noqa: E501\n self._node_id = None\n self._status = None\n self._start = None\n self._end = None\n self._error = None\n self._error_code = None\n self._progress = None\n self._stats = None\n self._out = None\n self.discriminator = None\n if node_id is not None:\n self.node_id = node_id\n if status is not None:\n self.status = status\n if start is not None:\n self.start = start\n if end is not None:\n self.end = end\n if error is not None:\n self.error = error\n if error_code is not None:\n self.error_code = error_code\n if progress is not None:\n self.progress = progress\n if stats is not None:\n self.stats = stats\n if out is not None:\n self.out = out", "def __init__(self, node):\n super().__init__(node)\n if not issubclass(node.process_class, Psi4Calculation):\n raise exceptions.ParsingError('Can only parse Psi4Calculation')", "def __init__(self):\r\n\r\n super(Node, self).__init__()\r\n self.inputs = []\r\n self.outputs = []\r\n self._active_outputs = []\r\n self.description = None\r\n\r\n # Experimental: dictionary to be used to retype output fields\r\n # Currently used only in CSV source node.\r\n self._retype_dictionary = {}", "def __init__(self, node_text=\"\", node_type=0, node_parent=None):\n self.node_text = node_text\n self.node_type = node_type\n self.node_parent = node_parent\n self.node_left = None\n self.node_right = None", "def test_match_with_node_class(self):\n class OneNode(Node):\n \"\"\"Node example\"\"\"\n\n query = Query().match(OneNode)\n expected = '\\n'.join((\n 'MATCH (_a:OneNode)',\n 'RETURN _a',\n ))\n self.assertEqual(str(query), expected)\n\n class TwoNode(Node):\n \"\"\"Node example\"\"\"\n class Neo:\n \"\"\"Neo with labels\"\"\"\n labels = ('Two', 'Node')\n\n query = Query().match(TwoNode, 'q')\n expected = '\\n'.join((\n 'MATCH (q:Node:Two)',\n 'RETURN q',\n ))\n self.assertEqual(str(query), expected)", "def __init__(self, node_def, op, message, error_code):\n ...", "def from_xml_node(cls, xml_node):\n raise NotImplementedError(\"from_xml_node must be implemented by derived classes.\")", "def __init__(self,xmlnode_or_cond,error_type=None,copy=1,parent=None):\n if type(xmlnode_or_cond) is str:\n xmlnode_or_cond=unicode(xmlnode_or_cond,\"utf-8\")\n if type(xmlnode_or_cond) is unicode:\n if not stanza_errors.has_key(xmlnode_or_cond):\n raise ValueError, \"Bad error condition\"\n\n ErrorNode.__init__(self,xmlnode_or_cond,STANZA_ERROR_NS,copy=copy,parent=parent)\n\n if type(xmlnode_or_cond) is unicode:\n if error_type is None:\n error_type=stanza_errors[xmlnode_or_cond][1]\n self.xmlnode.setProp(\"type\",to_utf8(error_type))", "def __init__(self, node: Node[T]) -> None:\n self.current = node", "def __init__(self, total_hits=None, results=None, local_vars_configuration=None): # noqa: E501 # noqa: E501\n if local_vars_configuration is None:\n local_vars_configuration = Configuration()\n self.local_vars_configuration = local_vars_configuration\n\n self._total_hits = None\n self._results = None\n self.discriminator = None\n\n if total_hits is not None:\n self.total_hits = total_hits\n if results is not None:\n self.results = results", "def __init__(self, node_class=Node, edge_class=Edge):\n self.node_class = node_class\n self.edge_class = edge_class", "def __init__(self, nodes):\n\n self._nodes = nodes", "def __init__(self, node_cls, path, predicate=None):\n self._node_cls = node_cls\n self._path = path\n self._predicate = predicate", "def __init__(self, node: Dict):\n self._node = node", "def __init__(self, node):\n from aiida.common import exceptions\n super(BigDFTParser, self).__init__(node)\n if not issubclass(node.process_class, BigDFTCalculation):\n raise exceptions.ParsingError(\"Can only parse BigDFTCalculation\")", "def __init__(self, nodes=None):\r\n self.nodes = nodes", "def __call__(cls, *args: Union['Node', Mapping[str, 'Node']], **kwargs: Any):\n args = cls._check_and_transform_args(args)\n cls._check_kwargs(kwargs)\n return OpNode(\n op_type=cls,\n args=args,\n output_data_type=cls._return_data_type,\n kwargs=kwargs)", "def __init__(self, tree_node=None):\n self.root = tree_node", "def create_node_instance(self, node_type=None):\n if node_type in self.aliases:\n node_type = self.aliases[node_type]\n\n _NodeClass = self.__nodes.get(node_type)\n if _NodeClass:\n return _NodeClass()", "def __init__(self, action=None, node=None, prevNode=None, **kwdargs):\n self.action = action\n for (key, default) in self._node_props.items():\n if key in node:\n setattr(self, key, node[key])\n else:\n setattr(self, key, default)\n\n self._children = []\n if self.dir and 'nodes' in node:\n # We keep the data in raw format, converting them only when needed\n self._children = node['nodes']\n\n if prevNode:\n self._prev_node = EtcdResult(None, node=prevNode)\n # See issue 38: when returning a write() op etcd has a bogus result.\n if self._prev_node.dir and not self.dir:\n self.dir = True", "def __init__(self, node: Dict):\n super().__init__(node)", "def __init__(self, node, result_path, result_file_name=\"stream.txt\"):\n super(GenericStreamFormatter, self).__init__(node, result_path)\n self._result_file_name = result_file_name", "def __init__(self, items):\n if isinstance(items, abc.Generator) or isinstance(items, abc.Iterator):\n items = list(items)\n\n if items == [] or all(isinstance(item, Node) for item in items):\n list.__init__(self, items)\n else:\n raise TypeError(\"Items must be nodes.\")", "def __init__(self, *item_types):\n if len(item_types) == 1 and not isinstance(item_types[0], ResultType):\n item_types = tuple(item_types[0])\n for i, item_type in enumerate(item_types):\n if not isinstance(item_type, ResultType):\n raise TypeError('item_types[%s] is not a type: %s' % (i, item_type))\n self._item_types = item_types", "def create_node(self, node_class, *args, **kwds):\n assert isinstance(node_class, str)\n cls = nodelist.all_nodes[node_class]\n node = cls(*args, **kwds)\n self.add_node(node)\n return node", "def __init__(self, klass = BSTNode):\n self.root = None\n self.klass = klass" ]
[ "0.5823575", "0.5784935", "0.57507306", "0.5743493", "0.56789154", "0.55787575", "0.5556027", "0.55545753", "0.5540183", "0.5491495", "0.5489627", "0.548306", "0.5457399", "0.5450769", "0.5444015", "0.5427575", "0.5427209", "0.5423883", "0.54184836", "0.54181236", "0.5371449", "0.53617275", "0.5360223", "0.5343589", "0.53317595", "0.5326886", "0.532427", "0.53187656", "0.53069115", "0.52787846" ]
0.6979888
0
List with the absolute paths of the files to be put in GridFs.
def gridfs_files(self): return self["files"]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def filepaths(self):\n pass", "def list_files(self):\n ret = []\n for fname in self.files:\n ret.append('filename: %s\\t replica locations: %s' %\n (fname, ','.join(self.files[fname])))\n return ret", "def listFiles(self):\n pass", "def getFiles(self):\n\t\treturn os.listdir(self.getPath())", "def filelist(self):\n msg = \"Collection of (str) file paths to mock\"\n raise NotImplementedError(msg)", "def glob_fs(self):\n\n found_files = []\n for pattern in self.glob_patterns:\n found_files += [PathString(present_file)\n for present_file in glob.glob(pattern)]\n return found_files", "def get_files(self) -> list:\n files = []\n for file in os.listdir(self.root):\n if file.endswith(f\".{self.suffix}\"):\n files.append(os.path.join(self.root, file))\n return files", "def locations(self):\n return [part.file for part in self.iterParts() if part]", "def locations(self):\n return [part.file for part in self.iterParts() if part]", "def locations(self):\n return [part.file for part in self.iterParts() if part]", "def _get_files_list(self):\n ts_filepaths = []\n conn_filepaths = []\n ts_filepaths_from_dir = sorted(os.listdir(self.ts_dir))\n conn_filepaths_from_dir = sorted(os.listdir(self.conn_dir))\n for sub_id in self.ids:\n for ts_file in ts_filepaths_from_dir:\n if sub_id in ts_file:\n ts_filepaths += [os.path.join(self.ts_dir, ts_file)]\n ts_filepaths_from_dir.remove(ts_file)\n break\n for conn_file in conn_filepaths_from_dir:\n if sub_id in conn_file:\n conn_filepaths += [os.path.join(self.conn_dir, conn_file)]\n conn_filepaths_from_dir.remove(conn_file)\n break\n\n return ts_filepaths, conn_filepaths", "def getGlobusFiles(self):\n\t\treturn self.transfer_client.operation_ls(self.transfer_client.endpoint_search(DATA_ENDPOINT_NAME)[0]['name'])", "def filenames(self):\n pass", "def local_paths(self) -> List[Path]:\n return self._local_paths", "def get_files(self):\r\n return self._filelist", "def paths(self, toNative=True):\n if self.__mode == E5PathPickerModes.OpenFilesMode:\n return self.path(toNative=toNative).split(\";\")\n else:\n return [self.path(toNative=toNative)]", "def get_files(self) -> tp.Iterable[str]:\n return os.listdir(self.path)", "def _list_of_files(self):\n if self.only_gpw:\n path = 'NOTORIA_GPW_XLSX/'\n securities_list = os.listdir(path)\n else:\n path = 'NOTORIA_NC_XLSX/'\n securities_list = os.listdir(path)\n securities_list = [x for x in securities_list if not x.startswith('.')]\n securities_list.sort()\n self.securities_filenames = securities_list\n self.paths_to_securities_files = [path + x for x in securities_list]\n self.logger.debug('self.securities_filenames, n: {}, [0]: {}'.format(\n str(len(self.securities_filenames)),\n str(self.securities_filenames[0]))\n )\n self.logger.debug('self.paths_to_securities_files, n: {}, [0]: {}'.format(\n str(len(self.paths_to_securities_files)),\n str(self.paths_to_securities_files[0]))\n )", "def get_files_paths(self):\n return self.__files_paths", "def _get_ais_paths(self) -> list:\n ais_files = []\n year = self.year\n end_year = self.year\n for month in range(1, 13):\n end_month = month + 1\n if month == 12:\n end_year += 1\n end_month = 1\n\n for vessel_type in self.vessel_types:\n path_template = f\"{vessel_type}_{year}{month:02}01-{end_year}{end_month:02}01_total.tif\"\n fname = self.dir / path_template\n ais_files.append(fname)\n\n return ais_files", "def get_all_path(self, conf):\n\t\tpass", "def _path_files(self):\n\n if not os.path.exists(self.path):\n return None\n\n directory_content = os.listdir(self.path)\n files = []\n\n while len(directory_content) != 0:\n\n if not directory_content[0].startswith(self.path):\n directory_obj = os.path.join(self.path, directory_content[0])\n else:\n directory_obj = directory_content[0]\n\n if os.path.isfile(directory_obj):\n files.append(directory_obj)\n elif os.path.exists(directory_obj):\n temp_directory_content = os.listdir(directory_obj)\n for obj in temp_directory_content:\n directory_content.append(os.path.join(directory_obj, obj))\n directory_content.pop(0)\n\n return files", "def get_file_list(self):\n try:\n for filename in os.listdir(SHARED_DIR):\n self.file_list.append(filename)\n except Exception as e:\n print \"Error: retriving file list, %s\" % e", "def getExternalFiles(self):\n return []", "def contents(self):\n entries = []\n walk = next(os.walk(self.path))\n entries.extend(LocalFolder(os.path.join(walk[0], f)) for f in walk[1])\n entries.extend(LocalFile(os.path.join(walk[0], f)) for f in walk[2])\n return entries", "def folder(fpath):\n file_paths = glob.glob(fpath + '/*.dat')\n return list(file_paths)", "def files(self):\n try:\n return glob.glob(self.path)\n except (AttributeError, TypeError):\n try:\n return glob.glob(self.alias)\n except (AttributeError, TypeError):\n return []", "def paths(self):\r\n return self._paths", "def output_files(self):\n output_files = []\n for split in self.split_files:\n output_files.extend(split.filepaths)\n return output_files", "def _get_file_paths(self):\n return [os.path.join(self.path, self.mode, 'waveforms', file_name + '.npy') for file_name in self.file_names]" ]
[ "0.70887214", "0.68611246", "0.6793429", "0.666335", "0.6587119", "0.65812594", "0.6568302", "0.65522903", "0.65522903", "0.65522903", "0.6527277", "0.6510298", "0.6502833", "0.64978254", "0.6486751", "0.64842963", "0.64746535", "0.64709234", "0.646373", "0.6423753", "0.6334592", "0.63216835", "0.63130295", "0.6303369", "0.6277523", "0.6242747", "0.6233801", "0.6199236", "0.6194944", "0.61933684" ]
0.73669624
0
This function registers the files that will be saved in GridFS. kwargs is a dictionary mapping the key associated to the file (usually the extension) to the absolute path. By default, files are assumed to be in binary form, for formatted files one should pass a tuple ("filepath", "t").
def register_gridfs_files(self, **kwargs): d = {} for k, v in kwargs.items(): mode = "b" if isinstance(v, (list, tuple)): v, mode = v d[k] = GridFsFile(path=v, mode=mode) self["files"].update(d) return self
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ugs_save_file_on_filesystem_hook(*args, **kwargs):\n\n if len(args) == 1 and isinstance(args[0], FrappeFileDoc):\n # We are being called from a file-type doc\n ret = args[0].save_file_on_filesystem()\n else:\n ret = frappe.utils.file_manager.save_file_on_filesystem(*args, **kwargs)\n\n file_name = ret['file_name']\n #file_url = ret['file_url'] # Not a consistent file system identifier\n\n if ('is_private' in kwargs) and kwargs['is_private']:\n file_path = os.path.abspath(\n frappe.get_site_path('private', 'files', file_name))\n else:\n file_path = os.path.abspath(\n frappe.get_site_path('public', 'files', file_name))\n\n extension = os.path.splitext(file_name)[1].lower()\n\n if extension in ('.jpg', '.jpeg'):\n # Resize and autoorient this image\n resize_image(file_path)\n\n return ret", "def register_data_files(self, *files, task=None, run=None):\n\n files = [Path(f) for f in files]\n for file in files:\n if file.suffix not in DATA_EXTENSIONS:\n raise ValueError(f'Wrong file format of data {file.suffix}. '\n f'Valid formats are {DATA_EXTENSIONS}')\n\n key = ''\n if task is not None:\n key += f'task_{task}'\n if run is not None:\n key += f'run-{run}'\n\n if key not in self.data:\n self.data[key] = files\n else:\n self.data['key'].extend(files)", "def _config_filehandler(self, **kwargs):\n tool = FileHandlerFactory(self.filehandler_type, **kwargs)\n self._tools.append(tool.to_msg())", "def __init__(self, files, **kwargs):\n self.files = {name: (None, data, 'application/octet-stream', {}) for name, data in files.iteritems()}\n for name, data in kwargs.iteritems():\n name = name.replace('_', '-')\n try:\n options = self.options_by_name[name]\n except KeyError:\n raise ValueError('Unknown option {}'.format(name))\n self.files[name] = (None, data, options.content_type, options.headers)", "def store(self, filename):", "def _upload_to_gcs(self, files_to_upload):\n # Compose mime_type using file format passed as param\n mime_type = 'application/' + self.export_format['file_format']\n hook = GoogleCloudStorageHook(\n google_cloud_storage_conn_id=self.google_cloud_storage_conn_id,\n delegate_to=self.delegate_to)\n for object, tmp_file_handle in files_to_upload.items():\n hook.upload(self.bucket, object, tmp_file_handle.name, mime_type)", "def registerFileType(self, extensions, classname):\r\n\r\n # keep track of all reader classes\r\n self._all_readers.append(classname)\r\n\r\n # iterate over all extensions\r\n for e in extensions:\r\n e_lower = e.lower()\r\n if e_lower not in self._extension_map:\r\n self._extension_map[e_lower] = []\r\n\r\n self._extension_map[e_lower].append(\r\n (extensions[e] + ' file', classname))", "def add_files_and_directories_rename(self, **kwargs):\n self.rename_files_or_directories_objects.append(\n RenameFilesOrDirectories(\n root_directory=self.root_directory,\n exclude_directories=self.exclude_directories,\n exclude_files=self.exclude_files,\n **kwargs\n )\n )", "def add_file(self, path):\n pass", "def _add_files(self, category, files, session, bucket=None):\n\n with session[category].make_commit('master') as commit:\n for filename, content in files.items():\n if bucket:\n commit.put_file_url(\n filename,\n 's3://%s/%s' % (bucket, content)\n )\n else:\n commit.put_file_bytes(\n filename,\n content\n )", "def add_files(self, *paths, **kw):\n write_p = self._pointer\n\n block_size = ffi.write_get_bytes_per_block(write_p)\n if block_size <= 0:\n block_size = 10240 # pragma: no cover\n\n with new_archive_entry() as entry_p:\n entry = ArchiveEntry(None, entry_p)\n for path in paths:\n with new_archive_read_disk(path, **kw) as read_p:\n while 1:\n r = read_next_header2(read_p, entry_p)\n if r == ARCHIVE_EOF:\n break\n entry.pathname = entry.pathname.lstrip('/')\n read_disk_descend(read_p)\n write_header(write_p, entry_p)\n if entry.isreg:\n with open(entry_sourcepath(entry_p), 'rb') as f:\n while 1:\n data = f.read(block_size)\n if not data:\n break\n write_data(write_p, data, len(data))\n write_finish_entry(write_p)\n entry_clear(entry_p)", "def put(filenames, file_type='auto', history_id=None):\n if type(filenames) is str:\n filenames = [filenames]\n\n history_id = history_id or os.environ['HISTORY_ID']\n gi = get_galaxy_connection(history_id=history_id)\n for filename in filenames:\n log.debug('Uploading gx=%s history=%s localpath=%s ft=%s', gi, history_id, filename, file_type)\n history = gi.histories.get(history_id)\n history.upload_dataset(filename, file_type=file_type)", "def register_model_file(self, filename):\n # TODO: remember about locking&reading when doing the atomic stuff\n self.model_files.append(filename)\n self.serialize()", "def to_files(self, gen, filenames=None):\n\n if filenames:\n self.filenames = filenames\n\n for f, arr in zip(self.pathgen, gen):\n np.save(f, arr)", "def set_file_storage(source='local', **kwargs):\n pass", "def writeFiles(cls, argDict):\n for obj in cls.Instances.values():\n path = obj.write(argDict)\n argDict[obj.varName] = path", "def _write_files_(self, arrs, filepath):\n\t\t# If there's a dot in the filepath prepend the filepath\n\t\tif '.' in filepath:\n\t\t\tfilename, ext = gen_io.remove_file_extension(filepath)\n\t\t\tarrs = {f\"{filename}_{key}.{ext}\": arrs[key] for key in arrs}\n\t\t\tprint(arrs.keys())\n\t\t\n\t\t# Else just assume it's a folder\n\t\telse:\n\t\t\tif not os.path.isdir(filepath):\n\t\t\t\tos.makedirs(filepath)\n\n\t\t\tarrs = {f\"{filepath}/{key}.npy\": arrs[key] for key in arrs}\n\n\t\t# Now write the files.\n\t\tfor key in arrs:\n\t\t\tfilepath = gen_io.create_unique_filepath(key)\n\t\t\tnp.save(key, arrs[key])", "def add_files(self, paths):\n for path in paths:\n self.add_file(path)", "def write_file(*args, **kwargs): # real signature unknown\n pass", "def export(**kwargs):\n\n import os\n\n interface = None # Holds the actual FileInterface for the specified data format\n vertex_index_to_file_key_map = None\n element_index_to_file_key_map = None\n\n if 'file_name' in kwargs:\n fname = kwargs['file_name']\n else:\n raise ValueError(\"file_name must be specified.\")\n \n extension = os.path.splitext(fname)[1].lower()\n\n if extension=='.msh':\n from bempp.api.file_interfaces import gmsh\n interface = gmsh.GmshInterface()\n \n if int('grid' in kwargs) + int('grid_function' in kwargs) != 1:\n raise ValueError(\"Exactly one of 'grid' or 'grid_function' must be specified\")\n\n if 'grid' in kwargs:\n grid = kwargs['grid']\n elif 'grid_function' in kwargs:\n grid = kwargs['grid_function'].grid\n\n number_of_vertices = grid.leaf_view.entity_count(2)\n number_of_elements = grid.leaf_view.entity_count(0)\n\n offset = interface.index_offset\n\n if 'vertex_index_to_file_key_map' in kwargs:\n vertex_index_to_file_key_map = kwargs['vertex_index_to_file_key_map']\n else:\n vertex_index_to_file_key_map = range(offset,number_of_vertices+offset)\n if 'element_index_to_file_key_map' in kwargs:\n element_index_to_file_key_map = kwargs['element_index_to_file_key_map']\n else:\n element_index_to_file_key_map = range(offset,number_of_elements+offset)\n\n # Create the vertex and element structure\n\n from collections import OrderedDict\n\n vertex_iterator = grid.leaf_view.entity_iterator(2)\n element_iterator = grid.leaf_view.entity_iterator(0)\n index_set = grid.leaf_view.index_set()\n\n vertices = OrderedDict([(vertex_index_to_file_key_map[index_set.entity_index(vertex)],vertex.geometry.corners[:,0])\n for vertex in vertex_iterator])\n elements = OrderedDict([(element_index_to_file_key_map[index_set.entity_index(element)],\n {'data':[vertex_index_to_file_key_map[index_set.sub_entity_index(element,n,2)] for n in range(3)],\n 'domain_index':element.domain}) for element in element_iterator])\n\n interface.add_grid_data(vertices,elements)\n\n # Evaluate data\n\n if 'grid_function' in kwargs:\n fun = kwargs['grid_function']\n data_type = kwargs.get('data_type',interface.default_data_type)\n\n if 'transformation' in kwargs:\n transformation = kwargs['transformation']\n else:\n transformation = lambda x: x\n\n index_set = grid.leaf_view.index_set()\n\n if data_type == 'element_node':\n local_coordinates = _np.array([[0,1,0],[0,0,1]])\n data = OrderedDict.fromkeys(element_index_to_file_key_map)\n\n for element in grid.leaf_view.entity_iterator(0):\n data[element_index_to_file_key_map[index_set.entity_index(element)]] = transformation(\n fun.evaluate(element,local_coordinates))\n interface.add_element_node_data(data,kwargs.get('label','element_node_data'))\n elif data_type == 'node':\n local_coordinates = _np.array([[0,1,0],[0,0,1]])\n data = OrderedDict.fromkeys(vertex_index_to_file_key_map)\n for element in grid.leaf_view.entity_iterator(0):\n local_data = transformation(fun.evaluate(element,local_coordinates))\n for i in range(3):\n data[vertex_index_to_file_key_map[index_set.sub_entity_index(element,i,2)]] = local_data[:,i]\n interface.add_node_data(data,kwargs.get('label','node_data'))\n elif data_type == 'element':\n local_coordinates = _np.array([[1./3],[1./3]])\n data = OrderedDict.fromkeys(element_index_to_file_key_map)\n\n for element in grid.leaf_view.entity_iterator(0):\n data[element_index_to_file_key_map[index_set.entity_index(element)]] = transformation(\n fun.evaluate(element,local_coordinates).ravel())\n interface.add_element_data(data,kwargs.get('label','element_data'))\n else:\n raise ValueError(\"data_type must be one of 'node', 'element', or 'element_node'\")\n\n interface.write(kwargs['file_name'])", "def update_args_with_file(files, args):\n args['files'] = {}\n for file_name in files:\n file = files[file_name]\n filename = file.filename\n args['files'][file_name] = filename\n return args", "def addFilenameFilter(call, args=(), kwargs={}, nodeClass='*'):", "def _store_sequencing_files(\n self,\n flow_cell_id: str,\n sequencing_files: List[str],\n tag_name: str,\n sample_id: Optional[str] = None,\n ) -> None:\n bundle_name: str = sample_id or flow_cell_id\n hk_bundle: Optional[Bundle] = self.hk.bundle(bundle_name)\n if not hk_bundle:\n hk_bundle: Bundle = self.hk.create_new_bundle_and_version(name=bundle_name)\n\n with self.hk.session_no_autoflush():\n for file in sequencing_files:\n if self._check_if_sequencing_file_is_on_bundle(file=file, bundle=hk_bundle):\n LOG.info(f\"Found file: {file}.\")\n LOG.info(\"Skipping file\")\n else:\n LOG.info(f\"Found new file: {file}.\")\n LOG.info(f\"Adding file using tag: {tag_name}\")\n self.hk.add_and_include_file_to_latest_version(\n bundle_name=bundle_name, file=Path(file), tags=[tag_name, flow_cell_id]\n )", "def register(file_format, extensions, reader, writer=None):\n register_format(\n fmt=file_format,\n ext_to_fmt=_extension_to_filetype,\n reader_map=_reader_map,\n writer_map=_writer_map,\n extensions=extensions,\n reader=reader,\n writer=writer,\n )", "def register(file_format, extensions, reader, writer=None):\n register_format(\n fmt=file_format,\n ext_to_fmt=_extension_to_filetype,\n reader_map=_reader_map,\n writer_map=_writer_map,\n extensions=extensions,\n reader=reader,\n writer=writer,\n )", "def handle_upload(f, attrs):\n\n # chunked = False\n dest_folder = os.path.join(app.config['UPLOAD_DIRECTORY'], attrs['qquuid'])\n dest = os.path.join(dest_folder, attrs['qqfilename'])\n save_upload(f, dest)", "def process_reg_file(filename, tracking_id, args):\n file_path = os.path.join(app.config['DRS_UPLOADS'], '{0}'.format(tracking_id), filename)\n processor = Processor(file_path, args)\n response = processor.process('registration')\n return response", "def add(self, *args, **kwargs):\n self.zipfile.write(*args, **kwargs)", "def __init__(self, filenames, **kwargs):\n self.filenames = filenames\n self.meta = assign_default_kwargs(self, kwargs, self.defaults)\n # Grab logger\n self.log = get_logger(__name__)\n\n # Performance tracking\n self.errors = []\n self.uploaded = 0\n\n # Grab db using credentials\n self.log.info('Accessing Database {}'.format(self.db_name))\n engine, self.session = get_db(self.db_name, credentials=self.credentials)\n\n self.log.info('Preparing to upload {} files...'.format(len(filenames)))", "def upload_file(\n files: List[UploadFile] = File(...),\n # JSON serialized string\n meta: Optional[str] = Form(\"null\"), # type: ignore\n additional_params: Optional[str] = Form(\"null\"), # type: ignore\n fileconverter_params: FileConverterParams = Depends(FileConverterParams.as_form), # type: ignore\n preprocessor_params: PreprocessorParams = Depends(PreprocessorParams.as_form), # type: ignore\n keep_files: Optional[bool] = False,\n):\n if not indexing_pipeline:\n raise HTTPException(status_code=501, detail=\"Indexing Pipeline is not configured.\")\n\n file_paths: list = []\n file_metas: list = []\n\n meta_form = json.loads(meta) or {} # type: ignore\n if not isinstance(meta_form, dict):\n raise HTTPException(status_code=500, detail=f\"The meta field must be a dict or None, not {type(meta_form)}\")\n\n for file in files:\n try:\n file_path = Path(FILE_UPLOAD_PATH) / f\"{uuid.uuid4().hex}_{file.filename}\"\n with file_path.open(\"wb\") as buffer:\n shutil.copyfileobj(file.file, buffer)\n\n file_paths.append(file_path)\n meta_form[\"name\"] = file.filename\n file_metas.append(meta_form)\n finally:\n file.file.close()\n\n params = json.loads(additional_params) or {} # type: ignore\n\n # Find nodes names\n converters = indexing_pipeline.get_nodes_by_class(BaseConverter)\n preprocessors = indexing_pipeline.get_nodes_by_class(PreProcessor)\n\n for converter in converters:\n params[converter.name] = fileconverter_params.dict()\n for preprocessor in preprocessors:\n params[preprocessor.name] = preprocessor_params.dict()\n\n indexing_pipeline.run(file_paths=file_paths, meta=file_metas, params=params)\n\n # Clean up indexed files\n if not keep_files:\n for p in file_paths:\n p.unlink()" ]
[ "0.61494565", "0.57136476", "0.55193377", "0.5482238", "0.5418756", "0.5412003", "0.5402928", "0.53940064", "0.53582376", "0.53195375", "0.52941066", "0.5292922", "0.5292611", "0.5275253", "0.5264585", "0.5251641", "0.52443796", "0.5214584", "0.52058154", "0.5199464", "0.5187732", "0.51728237", "0.51376677", "0.51020926", "0.51020926", "0.50119", "0.499675", "0.49698716", "0.49293357", "0.49280578" ]
0.7732723
0
Check whether the node is a instance of `class_or_string`. Unlinke the standard isinstance builtin, the method accepts either a class or a string. In the later case, the string is compared with self.__class__.__name__ (case insensitive).
def isinstance(self, class_or_string): if class_or_string is None: return False import inspect if inspect.isclass(class_or_string): return isinstance(self, class_or_string) else: return self.__class__.__name__.lower() == class_or_string.lower()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def type_or_class_match(node_a, node_b):\n if isinstance(node_b['node'], type):\n return issubclass(type(node_a['node']), node_b['node'])\n elif isinstance(node_a['node'], type):\n return issubclass(type(node_b['node']), node_a['node'])\n elif isinstance(node_b['node'], xf.PatternNode):\n return isinstance(node_a['node'], node_b['node'].node)\n elif isinstance(node_a['node'], xf.PatternNode):\n return isinstance(node_b['node'], node_a['node'].node)\n return isinstance(node_a['node'], type(node_b['node']))", "def safe_isinstance(obj, class_path_str):\n # this function is copy-paste from the code of the SHAP Python library\n # Copyright (c) 2018 Scott Lundberg\n if isinstance(class_path_str, str):\n class_path_strs = [class_path_str]\n elif isinstance(class_path_str, list) or isinstance(class_path_str, tuple):\n class_path_strs = class_path_str\n else:\n class_path_strs = ['']\n\n # try each module path in order\n for class_path_str in class_path_strs:\n if \".\" not in class_path_str:\n raise ValueError(\"class_path_str must be a string or list of strings specifying a full \\\n module path to a class. Eg, 'sklearn.ensemble.RandomForestRegressor'\")\n\n # Splits on last occurence of \".\"\n module_name, class_name = class_path_str.rsplit(\".\", 1)\n\n # here we don't check further if the model is not imported, since we shouldn't have\n # an object of that types passed to us if the model the type is from has never been\n # imported. (and we don't want to import lots of new modules for no reason)\n if module_name not in sys.modules:\n continue\n\n module = sys.modules[module_name]\n\n #Get class\n _class = getattr(module, class_name, None)\n\n if _class is None:\n continue\n\n if isinstance(obj, _class):\n return True\n\n return False", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_kind_of_class(obj, a_class):\n return(isinstance(obj, a_class))", "def is_kind_of_class(obj, a_class):\n\n return isinstance(obj, a_class)", "def match(self, cls):\n return isinstance(self, cls)", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n else:\n return False", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class):\n return True\n return False", "def _is_string(self, obj):\n return isinstance(obj, unicode) or isinstance(obj, str)", "def is_kind_of_class(obj, a_class):\n return (isinstance(obj, a_class))", "def isClass(self, className):\n return self.characterClass == className or self.baseClass == className", "def _isinstancetype(an_obj):\n if an_obj is None: return False\n if not PY3K:\n return isinstance(an_obj, types.InstanceType)\n typstr = str(type(an_obj))\n # the following logic works, as PyRAF users expect, in both v2 and v3\n return typstr==\"<type 'instance'>\" or \\\n (typstr.startswith(\"<class '\") and ('.' in typstr))", "def is_kind_of_class(obj, a_class):\n\n return (isinstance(obj, a_class))", "def is_kind_of_class(obj, a_class):\n\n if isinstance(obj, a_class):\n return True\n else:\n return False", "def is_kind_of_class(obj, a_class):\n if isinstance(obj, a_class) is True:\n return True\n else:\n return False", "def isclass(object):\r\n return isinstance(object, (type, types.ClassType))", "def isNodeType(self, t):\n return isinstance(self, t)", "def is_same_class(obj, a_class):\n return isinstance(obj, a_class)", "def is_of_type(cls, value) -> bool:\n # UTF8 = 'utf-8'\n # UTF16 = 'utf-16'\n # UTF32 = 'utf-32'\n # ASCII = 'ascii'\n # BINARY = 'binary'\n # OCTAL = 'octal'\n # HEXADECIMAL = 'hexadecimal'\n # CP1252 = 'cp1252'\n # WINDOWS1252 = 'windows-1252'\n # UNICODEESCAPE = 'unicode-escape'\n\n v = None\n if cls == cls.UTF8 or cls == cls.UTF16 or cls == cls.UTF32 or cls == cls.UNICODEESCAPE:\n try:\n v = bytes(value)\n except:\n return False\n\n if cls == cls.ASCII:\n try:\n v = ascii(value)\n except:\n return False\n\n if cls == cls.BINARY:\n try:\n v = bin(value)\n except:\n return False\n\n if cls == cls.OCTAL:\n try:\n v = oct(value)\n except:\n return False\n\n if cls == cls.HEXADECIMAL:\n try:\n v = hex(value)\n except:\n return False\n\n if cls == cls.WINDOWS1252 or cls == cls.CP1252:\n try:\n v = str(value)\n except:\n return False\n return True", "def _check_type(item, types, item_name=None):\n check_types = sum(\n (\n (type(None),)\n if type_ is None\n else (type_,)\n if not isinstance(type_, str)\n else _types[type_]\n for type_ in types\n ),\n (),\n )\n\n if not isinstance(item, check_types):\n type_name = [\n \"None\"\n if cls_ is None\n else cls_.__name__\n if not isinstance(cls_, str)\n else cls_\n for cls_ in types\n ]\n if len(type_name) == 1:\n type_name = type_name[0]\n elif len(type_name) == 2:\n type_name = \" or \".join(type_name)\n else:\n type_name[-1] = \"or \" + type_name[-1]\n type_name = \", \".join(type_name)\n item_name = \"Item\" if item_name is None else \"'%s'\" % item_name\n raise TypeError(\n f\"{item_name} must be an instance of {type_name}, \"\n f\"got {type(item)} instead.\"\n )\n\n return item", "def is_string(obj):\n return isinstance(obj, basestring)", "def typeIsString(obj):\n return type(obj) is str or _haveTypeUnicode and type(obj) is unicode", "def is_string(obj):\n return isinstance(obj, str)", "def class_is_type(cls, *seg_type: str) -> bool:\n # Use set intersection\n if cls._class_types.intersection(seg_type):\n return True\n return False" ]
[ "0.6436535", "0.5984461", "0.58243", "0.58243", "0.58243", "0.58243", "0.58243", "0.58243", "0.5785469", "0.5769264", "0.57563615", "0.57536185", "0.57536185", "0.57398254", "0.57315344", "0.5721359", "0.57201725", "0.5707082", "0.5687554", "0.5687518", "0.56612915", "0.56455106", "0.5589657", "0.54924095", "0.5458112", "0.5453892", "0.5452248", "0.5438209", "0.5412567", "0.5403363" ]
0.8296064
0
Convert obj into a Node instance.
def as_node(cls, obj): if isinstance(obj, cls): return obj elif is_string(obj): # Assume filepath. return FileNode(obj) elif obj is None: return obj else: raise TypeError("Don't know how to convert %s to Node instance." % obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fromObj(self, obj):\n for k in BaseNode.SERIALIZABLE_PROPERTIES:\n if k in obj:\n # work around for migrate nodeInfo class\n if k == \"nodeInfo\":\n if isinstance(obj[k], dict):\n obj[k] = NodeInfo(obj[k][\"showInputs\"], obj[k][\"showOutputs\"], obj[k]\n [\"showLabel\"], obj[k][\"showBorder\"], obj[k][\"fill\"], obj[k][\"useNodeFont\"])\n\n setattr(self, k, obj[k])", "def __init__(self, obj):\n from lxml import objectify\n try:\n self.root = objectify.fromstring(obj.data)\n except:\n # try something else\n self.root = objectify.fromstring(obj)\n self.obj = obj", "def _to_node(self, data):\n return Node(\n id = data['ps'],\n name = data['ps'],\n state = NodeState.UNKNOWN,\n public_ip = [data['ip']],\n private_ip = [],\n driver = self.connection.driver,\n extra = {\n 'current_size' : data['memory_mb'],\n 'account_id' : data['account_id'],\n 'type' : data['type']\n }\n )", "def to_model(self):\r\n node = Node.objects.get_or_create(\r\n name=self.name,\r\n description=self.description\r\n )[0]\r\n \r\n return node", "def atom_from_aif(obj: aif.Node, config: Config) -> AtomNode:\n timestamp = dt.from_format(obj.get(\"timestamp\"), aif.DATE_FORMAT) or pendulum.now()\n\n return config.AtomNodeClass(\n id=obj[\"nodeID\"],\n metadata=config.MetadataClass(timestamp, timestamp),\n text=utils.parse(obj[\"text\"], config.nlp),\n )", "def add_node(self, obj, typ_sofi, layer):\n\n n = Node(obj)\n n.layer = layer\n\n self.nodes.add(n)", "def fromObject(cls, obj, decode=None):\n if obj.__doc__ is None:\n return cls(u'' if decode else '')\n r = cls.fromString(obj.__doc__, decode=decode)\n return r", "def push(self,obj):\n self.head = Node(obj,0,self.head)", "def convertNode(self, builder, typeName, data):\n\t\tif typeName not in self.nodeTypeMap:\n\t\t\traise Exception('Node type \"' + typeName + '\" hasn\\'t been registered.')\n\n\t\tconvertedData = self.nodeTypeMap[typeName](self, data)\n\n\t\ttypeNameOffset = builder.CreateString(typeName)\n\t\tdataOffset = builder.CreateByteVector(convertedData)\n\n\t\tObjectData.Start(builder)\n\t\tObjectData.AddType(builder, typeNameOffset)\n\t\tObjectData.AddData(builder, dataOffset)\n\t\treturn ObjectData.End(builder)", "def get_nodes_from_biolink_object(biolink_named_thing: NamedThing) -> Node:\n node = None\n if biolink_named_thing is not None:\n node = Node(biolink_named_thing.__class__.__name__, **biolink_named_thing.__dict__)\n return node", "def __init__(self, obj, datamodel=None):\n with RecursiveConverter.in_progress:\n self.obj = obj\n self.class_name = obj.__class__.__name__\n self.datamodel = datamodel\n self.is_root = datamodel is None\n if self.is_root:\n RecursiveConverter.converted_modules = {}\n RecursiveConverter.typedefs = []\n self.datamodel = VHDLModule('-', obj)\n\n # recursively convert all child modules\n self.childs = []\n\n def conv(self, node):\n if isinstance(node, VHDLList):\n if node.elements_compatible_typed:\n if isinstance(node.elems[0], VHDLModule):\n if self.is_compatible_with_converted_module(node.elems[0]):\n return\n self.childs.append(RecursiveConverter(node.elems[0].current, node.elems[0]))\n\n else:\n # dynamic list..need to convert all modules\n for x in node.elems:\n if isinstance(x, VHDLModule):\n if self.is_compatible_with_converted_module(x):\n return\n self.childs.append(RecursiveConverter(x.current, x))\n elif isinstance(node, VHDLModule):\n if self.is_compatible_with_converted_module(node):\n return\n self.childs.append(RecursiveConverter(node.current, node))\n\n if self.is_root:\n logger.info(f'Creating top.vhd ...')\n self.top_vhdl = TopGenerator(obj)\n\n # maybe some input/output is a convertible module?\n for node in self.inputs:\n conv(self, node)\n\n for node in self.outputs:\n conv(self, node)\n\n # iterate all functions and discover local variables that may need to be converted\n for x in self.obj.__dict__.values():\n if isinstance(x, PyhaFunc):\n for key, val in x.get_local_types().items():\n if isinstance(val, Hardware):\n node = init_vhdl_type(key, val)\n conv(self, node)\n\n # convert instance elements before the instance itself, recursive\n for node in self.datamodel.elems:\n conv(self, node)\n\n self.red_node = get_objects_rednode(obj)\n convert_name = self.get_module_converted_name(self.datamodel)\n logger.info(f'{convert_name} to VHDL ...')\n\n self.conv = convert(self.red_node, obj) # actual conversion happens here\n\n self.vhdl_conversion = str(self.conv)\n RecursiveConverter.converted_modules[convert_name] = (self.datamodel, self.vhdl_conversion)\n RecursiveConverter.typedefs.extend(self.conv.build_typedefs())", "def convertNode(cls, node):\n if isinstance(node, cls):\n if len(node) == 1:\n return cls.NodeProxy(node[0])\n return node\n elif isinstance(node, list):\n if len(node) > 1:\n return cls(node)\n else:\n return cls.NodeProxy(node[0])\n else:\n return cls.NodeProxy(node)", "def get_object(sv, expr):\r\n if expr in sv.Object: \r\n nod=sv.Object[expr] # object exists\r\n else: \r\n nod=add_object(sv, expr) # create node for intermediate expression\r\n nod.isvirtual=True # temporary node\r\n return nod", "def deserialize(self, obj):\n raise NotImplementedError", "def create_ninode(b_obj=None):\n # when no b_obj is passed, it means we create a root node\n if not b_obj:\n return block_store.create_block(\"NiNode\")\n\n # get node type - some are stored as custom property of the b_obj\n try:\n n_node_type = b_obj[\"type\"]\n except KeyError:\n n_node_type = \"NiNode\"\n\n # ...others by presence of constraints\n if has_track(b_obj):\n n_node_type = \"NiBillboardNode\"\n\n # now create the node\n n_node = block_store.create_block(n_node_type, b_obj)\n\n # customize the node data, depending on type\n if n_node_type == \"NiLODNode\":\n export_range_lod_data(n_node, b_obj)\n\n return n_node", "def visit(self, obj):\n pass", "def new(self, obj):\n pass", "def build(self, obj):\n if isinstance(obj, self.art_type):\n return obj\n elif isinstance(obj, (tuple, list, dict, set)):\n if obj.__class__ is tuple:\n return self.build_tuple(obj)\n elif obj.__class__ is dict:\n return self.build_dict(obj)\n elif obj.__class__ is list:\n return self.build_list(obj)\n else:\n return self.build_set(obj)\n elif isinstance(obj, SageObject):\n return self.build_from_magic_method(obj)\n else:\n return self.build_from_string(obj)", "def from_raw(self, robj: RawObject) -> RootNode:\n cooked = self.schema.from_raw(robj)\n return RootNode(cooked, self.schema, cooked.timestamp)", "def serialize(self, obj):\n return obj", "def shift(self, obj):\n new_node = SingleLinkedListNode(obj, None)\n if self.begin is None:\n self.begin = new_node\n self.end = self.begin\n else:\n new_node.next = self.begin\n self.begin = new_node", "def add(self, obj):\n if self._element is None: # If the node is empty, tree below is empty\n self._element = obj # Add the object into the node\n elif self._element is obj:\n return None # If the object is already in the tree\n elif obj < self._element:\n if not self._leftchild:\n new = BSTNode(obj) # Create a new node for the object\n self._leftchild = new # Link current node to new node\n new._parent = self\n self._rebalance() # Self is the parent, so rebalance it\n else:\n self._leftchild.add(obj)\n elif obj > self._element:\n if not self._rightchild:\n new = BSTNode(obj)\n self._rightchild = new\n new._parent = self\n self._rebalance()\n else:\n self._rightchild.add(obj)\n return obj", "def _bddnode(root, lo, hi):\n\t# print(\"_bddnode\")\n\tif lo is hi:\n\t\tnode = lo\n\telse:\n\t\tkey = (root, lo, hi)\n\t\ttry:\n\t\t\tnode = _NODES[key]\n\t\texcept KeyError:\n\t\t\tnode = _NODES[key] = BDDNode(*key)\n\treturn node", "def create(self, obj):\r\n request = http.Request('POST', self.get_url(), self.wrap_object(obj))\r\n\r\n return request, parsers.parse_json", "def create_node(identifier, *args, **kwargs):\r\n\r\n d = node_dictionary()\r\n node_class = d[identifier]\r\n node = node_class(*args, **kwargs)\r\n return node", "def _object_to_tree(self, adt_object):\n\n objtype = adt_object.objtype\n name = adt_object_to_element_name(adt_object)\n\n root = Element(name)\n declared_ns = self._declare_xmlns(root, objtype.xmlnamespace)\n\n if objtype.code is not None:\n root.add_attribute('adtcore:type', objtype.code)\n\n self._build_tree(root, adt_object, declared_ns)\n return root", "def create_node(self, hx, data):\n return Node(hx, data)", "def new(self, obj):\n if obj:\n key = obj.__class__.__name__ + \".\" + obj.id\n self.__objects[key] = obj", "def _to_node(self, data, ex_cloud_service_name=None, virtual_ips=None):\n\n remote_desktop_port = \"\"\n ssh_port = \"\"\n public_ips = virtual_ips or []\n\n if data.instance_endpoints is not None:\n if len(data.instance_endpoints) >= 1:\n public_ips = [data.instance_endpoints[0].vip]\n\n for port in data.instance_endpoints:\n if port.name == \"Remote Desktop\":\n remote_desktop_port = port.public_port\n\n if port.name == \"SSH\":\n ssh_port = port.public_port\n\n return Node(\n id=data.role_name,\n name=data.role_name,\n state=self.NODE_STATE_MAP.get(data.instance_status, NodeState.UNKNOWN),\n public_ips=public_ips,\n private_ips=[data.ip_address],\n driver=self.connection.driver,\n extra={\n \"instance_endpoints\": data.instance_endpoints,\n \"remote_desktop_port\": remote_desktop_port,\n \"ssh_port\": ssh_port,\n \"power_state\": data.power_state,\n \"instance_size\": data.instance_size,\n \"ex_cloud_service_name\": ex_cloud_service_name,\n },\n )", "def neo4j_to_node(neo4j_node: neo4j.graph.Node) -> Node:\n props = dict(neo4j_node)\n node_id = props.pop(\"id\")\n db_ns, db_id = process_identifier(node_id)\n return Node(db_ns, db_id, neo4j_node.labels, props)" ]
[ "0.6805639", "0.61491376", "0.5969413", "0.58747584", "0.5704297", "0.5641489", "0.56377715", "0.5610259", "0.55141354", "0.54903036", "0.547982", "0.54648393", "0.5442755", "0.5429253", "0.5429165", "0.53956425", "0.5392377", "0.5380667", "0.5366155", "0.53627306", "0.53557754", "0.5344963", "0.53410476", "0.5326461", "0.52719915", "0.5268806", "0.5268212", "0.52482843", "0.5242376", "0.52222675" ]
0.8197142
0
Return a relative version of the workdir
def relworkdir(self): if getattr(self, "workdir", None) is None: return None try: return os.path.relpath(self.workdir) except OSError: # current working directory may not be defined! return self.workdir
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rel_cwd():\n return os.path.relpath(os.getcwd(), git_toplevel())", "def build_relpath(self):\n return join_path(\"..\", self.build_dirname)", "def pathtofolder():\n return os.getcwd()", "def relDir(self, cwd=None, root=None):\n return os.path.dirname(self.relName(cwd, root)) or \".\"", "def get_path() -> str:\n return os.path.dirname(os.path.realpath(__file__))", "def localdir():\n root = __file__\n if os.path.islink(root):\n root = os.path.realpath(root)\n directory = os.path.dirname(os.path.abspath(root))\n return os.path.normpath(os.path.join(directory, \"../settings/\"))", "def work_dir(self):\n return self._work_dir", "def basepath():\n return os.path.abspath(\n os.path.join(\n os.path.dirname(__file__),\n '..'\n )\n )", "def get_base_dir(self):\n dir_of_this_file = os.path.dirname(os.path.abspath(__file__))\n return os.path.dirname(dir_of_this_file)", "def workdir(self) -> str:\n return self._workdir", "def _get_reporoot():\n from os import path\n import acorn\n medpath = path.abspath(acorn.__file__)\n return path.dirname(path.dirname(medpath))", "def get_working_dir():\n working_dir = os.path.dirname(os.path.abspath(__file__))\n return working_dir", "def get_workdir() -> str:\n Config.__get()\n assert Config.__config is not None\n return get_abspath(Config.__config.get('wsgi', 'workdir').strip())", "def workDir(self):\n self.debug.printHeader()\n #if hasattr(self.settings, \"workDir\"): toret=self.settings.workDir # 025 todo 143\n if self.settings.config.has_section(\"files\") and self.settings.config.has_option(\"files\",\"workDir\"):\n # toret=self.settings.get(\"files\",\"workDir\") 025\n toret=self.settings.workDir\n else: toret=os.environ['HOME']+'/xxz'\n # Also could write workdir back to settings.\n return toret", "def get_path():\n return path.abspath(path.dirname(path.dirname(__file__)))", "def get_main_dir():\n return os.path.dirname(os.getcwd())", "def base_dir(self):\n return self.cm.get(YAML_CONFIG_WORKING_REPO)", "def this_folder():\n if getattr(sys, 'frozen', False):\n # The application is frozen\n return os.path.dirname(sys.executable)\n else:\n # The application is not frozen\n return os.path.dirname(__file__)", "def directory_root():\n\timport os\n\treturn os.path.join(os.path.dirname(__file__), '../..')", "def get_relative_path(self, source: str) -> str:\n abs_top_level_dir = os.path.normcase(\n os.path.normpath(self.get_top_level_directory()))\n abs_working_dir = os.path.normcase(\n os.path.normpath(os.path.join(os.getcwd(), source)))\n\n if not abs_working_dir.startswith(abs_top_level_dir):\n logger.debug(\n \"Repository top level directory is '{}'. Specified working directory is '{}'\".format(\n abs_top_level_dir, {abs_working_dir}))\n raise Exception(\n \"Experiment file is not inside current \"\n + self.get_type() + \" directory.\")\n\n result = abs_working_dir.replace(abs_top_level_dir, \"\")\n return self.norm_to_posix_path(result)", "def srcdir(path):\n if not workflow.included_stack:\n return None\n return workflow.current_basedir.join(path).get_path_or_uri()", "def root_dir():\r\n return Path(__file__).parent.parent", "def _GetRelPath(self, filename):\r\n absname = os.path.join(self.repo_dir, filename)\r\n return os.path.relpath(absname)", "def in_rwd(path):\n return os.path.join(env.remote_workdir, path)", "def get_relative_pathname(self):\n return os.path.join(Exam.EXAM_FILES_LOCATION,\n str(self.unique_id)[0:2],\n str(self.unique_id) + self.file_ext)", "def rel_path(self) -> str:\n return os.path.dirname(self._filepath_oracle.filepath())", "def getGitPath() -> osp:\n current_dir = osp.dirname(osp.realpath(__file__))\n git_dir = osp.dirname(osp.dirname(current_dir))\n return git_dir", "def dir(self) -> str:\n return f'{os.path.dirname(self.path)}/'.lstrip('/')", "def cwd_in_path():\n ...", "def relative_path(filename):\n length = len(os.path.abspath(DOC_BUILD_DIR)) + 1\n return os.path.abspath(filename)[length:]" ]
[ "0.7572184", "0.74179256", "0.71759975", "0.7152729", "0.7115556", "0.71097624", "0.7082853", "0.7065006", "0.70645964", "0.7047222", "0.70466393", "0.7030355", "0.70125103", "0.70114654", "0.70030177", "0.70021874", "0.6992148", "0.69542587", "0.69249356", "0.6917377", "0.6879868", "0.68758994", "0.6853022", "0.68491805", "0.68436795", "0.68429345", "0.68405414", "0.68310916", "0.68211806", "0.681429" ]
0.751337
1
Set the node identifier. Use it carefully!
def set_node_id(self, node_id): self._node_id = node_id
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def node_id(self, node_id: int):\r\n self._node_id = node_id", "def set_id(self, refobj, identifier):\n cmds.setAttr(\"%s.identifier\" %refobj, identifier)", "def node_id(self, node_id):\n\n self._node_id = node_id", "def setNodeId(self, recId):\n if self.cursor:\n self.cursor.nodeId = recId", "def setNodeIdFromTitle(self, node_id):\n calcEngine = CalcEngine.factory(self.client_session)\n new_id = calcEngine.setNodeIdFromTitle(node_id)\n return new_id[\"node_id\"]", "def setId(self, identifier):\n self.identifier = identifier", "def set_node(self, uri, info):\n\t\tself.node_uri = uri\n\t\tself.node_info = info", "def ion_node_id(self, ion_node_id):\n\n self._ion_node_id = ion_node_id", "def set_identifier(self, uid):\n\n self.uid = uid\n\n self.set_unique_metadata('DC', 'identifier', self.uid, {'id': self.IDENTIFIER_ID})", "def _set_id(self):\n raise NotImplementedError()", "def setIdentifier(self, identifier):\n self._config['identifier'] = identifier", "def setIdentifier(self, identifier):\n self._config['identifier'] = identifier", "def _set_id(self, value):\n pass", "def set_node(self, node_id):\n info = self._get_info(self.EXPECTED)\n if node_id in info:\n self._node_id = node_id\n return True\n return False", "def identifier(self, identifier):\n self._identifier = identifier", "def identifier(self, identifier):\n self._identifier = identifier", "def set_node(self, node):\n self.__node = node", "def set_node(self, n, value):\n node = self.get_node(n)\n if node:\n node.value = value", "def identifier(self, identifier):\n\n self._identifier = identifier", "def identifier(self, identifier):\n\n self._identifier = identifier", "def identifier(self, identifier):\n\n self._identifier = identifier", "def identifier(self, identifier):\n\n self._identifier = identifier", "def set_tid(self, tid):\n self.__tid = tid", "def tree_id(self, value):\n self._tree_id = value", "def set_ID(self, x):\n x = str(x)\n if self.ID != x:\n self.ID = x", "def setup_label(self, node):\n self._options['id'] = node.argument", "def identifier(self, identifier: str):\n\n self._identifier = identifier", "def set_ident(self) -> int:\n return self._set_ident", "def readjust_node_id(self, lowerbound = 1):\n for i in range(lowerbound, len(self.nodes)):\n if self.nodes[i]:\n self.nodes[i].node_id = i", "def _auto_name(self):\n return \"node_\"+str(self._id)" ]
[ "0.8026486", "0.77016544", "0.7650171", "0.7358727", "0.70628834", "0.7047012", "0.6896155", "0.6876746", "0.6874612", "0.6835747", "0.67891157", "0.67891157", "0.67755765", "0.67752916", "0.67473626", "0.67473626", "0.6714017", "0.6624899", "0.6569984", "0.6569984", "0.6569984", "0.6569984", "0.6555837", "0.6548094", "0.6543438", "0.6532563", "0.6516263", "0.64785933", "0.64654016", "0.64544934" ]
0.778731
1
True if this node is a file
def is_file(self): return isinstance(self, FileNode)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_file(self):\n return self.type == \"file\"", "def is_file(self):\n return self.tipo == 'file' or self.tipo is None", "def is_eficas_file(node):\n return (node.get_attr(Type).read() == EficasFile.ftype)", "def is_file(self):\n return not self.is_directory", "def isfile(self):\n return not self.isdir()", "def isfile(self):\n return os.path.isfile(self.path)", "def isfile (self, path):\r\n pass", "def is_file(field):\n return isinstance(field.field.widget, forms.FileInput)", "def is_discord_file(obj):\n return (obj.__class__.__name__) == \"File\"", "def is_file (self, path=None, ttype=None) :\n if path : return self._adaptor.is_file (path, ttype=ttype)\n else : return self._adaptor.is_file_self ( ttype=ttype)", "def is_file(file_to_test):\r\n return all(hasattr(file_to_test, method) for method in ['read', 'name'])", "def is_file(self, path: PathLike):", "def is_file_o(value):\n if not (type(value) is str and os.path.split(value)[0]):\n return False\n else:\n return True", "def _IsFile(self, file_message):\n message_type = file_message.message_type\n return (message_type == FileMessage.FILE_DOWNLOAD or\n message_type == FileMessage.FILE_UPLOAD or\n message_type == FileMessage.FILE_CLOUD_COPY or\n message_type == FileMessage.FILE_DAISY_COPY or\n message_type == FileMessage.FILE_LOCAL_COPY or\n message_type == FileMessage.FILE_REWRITE or\n message_type == FileMessage.FILE_HASH)", "def has_file(self, name):\n return bool(self.input(name).__class__.__name__ == 'cgi_FieldStorage')", "def is_file(self):\n\n url_path = self.url.split('/')\n if re.match(r\".+\\.\\w+\", url_path[-1]):\n # Find <file_name>.<extension>\n return True\n return False", "def is_file_i(value):\n if not (type(value) is str and os.path.isfile(value)):\n return False\n else:\n return True", "def is_input_file(self):\r\n return self.depth == 0", "def is_multi_file(self):\n return 'files' in self.torrent['info']", "def on_disk(self):\n return isinstance(self._subarray, FileArray)", "def isFileObject(fileObj):\n if sys.version_info[0] == 2:\n return isinstance(fileObj, file)\n else:\n # for python 3:\n # has read() method for:\n # io.IOBase\n # io.BytesIO\n # io.StringIO\n # io.RawIOBase\n return hasattr(fileObj, 'read')", "def is_declaring_file(self, address, file_path):", "def has_file(self) -> bool:\n return self._file is not None", "def isfile(self, path):\n return os.path.isfile(path)", "def has_file(self, doc):\n return len(doc.package.files) != 0", "def _is_file(value: str) -> bool:\n file_in = os.path.expanduser(value)\n return os.path.isfile(file_in) and os.access(file_in, os.R_OK)", "def is_fs_file(pathname: Union[str, os.PathLike]) -> bool:\n return os.path.isfile(pathname)", "def isfile(path):\n return get_instance(path).isfile(path)", "def is_file_type_error(self):\n return self._tag == 'file_type_error'", "def is_separate_file(self):\n return self.uri is not None and not self.has_data_uri" ]
[ "0.823697", "0.8187442", "0.79591274", "0.7515635", "0.7338623", "0.72390836", "0.7206723", "0.7115239", "0.70083743", "0.69566363", "0.68873036", "0.6885846", "0.68853", "0.68246317", "0.6806279", "0.67822725", "0.6668802", "0.66520804", "0.6606101", "0.660243", "0.65713197", "0.6551837", "0.65241444", "0.6508636", "0.6471253", "0.64587367", "0.64171296", "0.641677", "0.63863117", "0.63547033" ]
0.90234685
0
True if this node is a Task
def is_task(self): from .tasks import Task return isinstance(self, Task)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_task_stagnant(task):", "def is_task(self, task_id, tasks):\r\n for t in tasks:\r\n if t.id == task_id:\r\n return True\r\n return False", "def _is_python_task(task, pidstr):\n if str(task.pid) != pidstr:\n return False\n else:\n return True", "def isNodeType(self, t):\n return isinstance(self, t)", "def __contains__(self, task):\n return task in self._tasks", "def is_task_in_schedule(self, tid: str) -> bool:\n return tid in self.__tasks", "def can_run(self, task: \"TaskView\") -> Union[bool, str]:\n return True", "def _verify_task(self, task_type: str = None) -> bool:\n\n return task_type in [\n self.BINARY_CLASSIFICATION, self.CATEGORICAL_CLASSIFICATION,\n self.REGRESSION\n ]", "def task_type(self):\n pass", "def is_task_runnable(self, task: Task) -> bool:\n if any([self._task_status_dict[_] == self.FAIL for _ in task.dependencies]):\n self._task_status_dict[task] = self.FAIL\n return False\n ret = self._task_status_dict[task] == self.TODO and reduce(\n lambda a, b: a and b,\n [self._task_status_dict[_] == self.SUCCESS for _ in task.dependencies],\n True\n )\n return ret", "def __contains__(self, name):\n return name in self._tasks", "def test_task_instance(self) -> None:\n self.assertTrue(isinstance(self.test_task, Tasks))", "def _is_task_visible(context, task):\n # Is admin == task visible\n if context.is_admin:\n return True\n\n # No owner == task visible\n if task['owner'] is None:\n return True\n\n # Perform tests based on whether we have an owner\n if context.owner is not None:\n if context.owner == task['owner']:\n return True\n\n return False", "def is_connecting(self):\n return self._task is not None", "def is_cont_node():\n return False", "def _check_task(self, task: Task) -> bool:\n try:\n extents = list(fiemap(task.path, sync=task.frequency > 1))\n except OSError:\n self.logger.error('Error#%d %s', task.id, task.path, exc_info=True)\n return False\n\n if not extents:\n return False\n\n planner = Planner(self.planner_params, extents)\n clusters = planner.result()\n\n if not clusters:\n return False\n\n task.extents = extents\n task.clusters = clusters\n\n return True", "def is_goal(self, node):\n # print(\"is {} in {}\".format(node, self.goal_nodes))\n if node in self.goal_nodes:\n return True", "def _task_is_running(course_id, task_type, task_key):\r\n running_tasks = InstructorTask.objects.filter(\r\n course_id=course_id, task_type=task_type, task_key=task_key\r\n )\r\n # exclude states that are \"ready\" (i.e. not \"running\", e.g. failure, success, revoked):\r\n for state in READY_STATES:\r\n running_tasks = running_tasks.exclude(task_state=state)\r\n return len(running_tasks) > 0", "def task_type(cls):\r\n raise NotImplementedError()", "def is_task_complete(self):\n if self._task is None:\n raise UserWarning(\"No task registered\")\n\n return self._task.is_complete()", "def is_registered(task_name):\n if tasks.find({'name': task_name}).count() > 0:\n return True\n else:\n return False", "def should_run(self, task: \"TaskView\") -> Union[bool, Priority]:\n return self.configSpec.should_run()", "def is_async(self) -> bool:", "def check(self, *args, **kwargs):\n test, traceback = super(ContinueTask, self).check(*args, **kwargs)\n\n if not isinstance(self.parent, (LoopTask, WhileTask)):\n test = False\n mess = 'Incorrect parent type: {}, expected LoopTask or WhileTask.'\n traceback[self.path + '/' + self.name + '-parent'] = \\\n mess.format(self.parent.task_id)\n\n return test, traceback", "def needs_root_task(expr: Any) -> bool:\n if not isinstance(expr, TaskExpression) or isinstance(expr, SchedulerExpression):\n return True\n\n return any(\n isinstance(arg, TaskExpression) for arg in iter_nested_value((expr.args, expr.kwargs))\n )", "def _include_task(self, task_or_block, loop_counter, play_vars, graph, node_name_prefix, color, parent_node_id,\n parent_node_name):\n\n self.display.vv(\"Adding the task '{}' to the graph\".format(task_or_block.get_name()))\n\n if not task_or_block.evaluate_tags(only_tags=self.options.tags, skip_tags=self.options.skip_tags,\n all_vars=play_vars):\n self.display.vv(\"The task '{}' is skipped due to the tags.\".format(task_or_block.get_name()))\n return False\n\n task_edge_label = str(loop_counter)\n if len(task_or_block.when) > 0:\n when = \"\".join(map(str, task_or_block.when))\n task_edge_label += \" [when: \" + when + \"]\"\n\n task_name = clean_name(node_name_prefix + self.template(task_or_block.get_name(), play_vars))\n # get prefix id from node_name\n id_prefix = node_name_prefix.replace(\"[\", \"\").replace(\"]\", \"\").replace(\" \", \"_\")\n task_id = id_prefix + str(uuid.uuid4())\n edge_id = \"edge_\" + str(uuid.uuid4())\n\n graph.node(task_id, label=task_name, shape=\"octagon\", id=task_id)\n graph.edge(parent_node_name, task_id, label=task_edge_label, color=color, fontcolor=color, style=\"bold\",\n id=edge_id)\n self.graph_representation.add_link(parent_node_id, edge_id)\n self.graph_representation.add_link(edge_id, task_id)\n\n return True", "def is_resource_node(self):\n return self.camera is not None or self.mesh is not None", "def is_await(node):\n return (expr_check(KW_AWAIT, 1, 1, node) or\n isinstance(node, Await))", "def isTimeForTask(self, task_times):\n if self.run_type.startswith('timed'):\n time_since_start = (time.time() - self.start_times['run'])\n remaining_time = self.max_time * 60 - time_since_start\n mean_task_time = np.mean(task_times)\n self.tee(\" projected task time: %s, remaining time: %s\"%(\\\n HMStime(mean_task_time), HMStime(remaining_time)), process=process)\n if mean_task_time > remaining_time:\n return False\n else:\n return True", "def task_type(cls):\n raise NotImplementedError()" ]
[ "0.7443283", "0.6947819", "0.6818511", "0.6777766", "0.6695782", "0.6610532", "0.65350896", "0.6530874", "0.64364797", "0.6271983", "0.622673", "0.62005734", "0.6075867", "0.5960482", "0.59500843", "0.5931306", "0.5922021", "0.58936006", "0.58829546", "0.5872167", "0.5845682", "0.5838681", "0.5837312", "0.581809", "0.5802476", "0.57993716", "0.5796483", "0.5786035", "0.5782532", "0.5764993" ]
0.8342237
0
True if this node is a Work
def is_work(self): from .works import Work return isinstance(self, Work)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isNodeType(self, t):\n return isinstance(self, t)", "def workable(self) -> bool:\n return self._strategy.workable", "def is_worker_thread():\n try:\n return worker_thread_data.is_worker_thread\n except AttributeError:\n return False", "def is_task(self):\n from .tasks import Task\n return isinstance(self, Task)", "def test_is_work_data(self):\n self.assertEqual(self.connector.is_work_data(self.work_data), True)\n self.assertEqual(self.connector.is_work_data(self.edition_data), False)", "def is_on(self):\n return self.state == WORKING_STATE", "def is_on(self):\n return self.state == WORKING_STATE", "def is_on(self):\n return self.state == WORKING_STATE", "def is_cont_node():\n return False", "def is_at_work(cls, sim_info: SimInfo) -> bool:\n for career in cls.get_all_careers_for_sim_gen(sim_info):\n if career.currently_at_work:\n return True\n return False", "def subectIsSelf():", "def is_node_support(self, node):\n return self.node_attribute(key=node, name=\"type\") == \"support\"", "def node_inode(self):\n return False", "def node_inode(self):\n return False", "def test_workdays_is_workdays(self):\n self.assertTrue(isinstance(self.expander._workdays, Workdays))", "def is_leaf(self):\n return isinstance(self, Leaf)", "def subectIsSelf():\n return (isinstance(subject, PartyProxy))", "def inside_itself(self):\n for i in range(2, len(self.nodes)):\n if self.nodes[0] == self.nodes[i]:\n return True\n return False", "def _is_working():\n global _worker\n return _worker is not None and _worker.is_alive()", "def is_root(self) -> bool:\n parent_type = self.parent_id.split(\"_\", 1)[0]\n return parent_type == self._reddit.config.kinds[\"submission\"]", "def is_resource_node(self):\n return self.camera is not None or self.mesh is not None", "def is_working(self):\n if not self.__th:\n return False\n return self.__th.is_alive()", "def has_node(self, ntype, nid):\n return ntype in self._node_index and nid in self._node_index[ntype]", "def is_under_main_root(self, workunit):\r\n return workunit.root() == self._main_root_workunit", "def leaf(self, node: object) -> bool:\n if node.left is None and node.right is None:\n return True\n\n else:\n return False", "def __contains__ (self, item):\n if isinstance(item, Node):\n item = item.id\n return item in self.network", "def workload_management_network(self) -> pulumi.Output[bool]:\n return pulumi.get(self, \"workload_management_network\")", "def is_leaf(self, node: object) -> bool:\n if node.left == None and node.right == None:\n return True\n else:\n return False", "def isLeaf(self):\n return self.left is None and self.right is None", "def isNodeLeaf ( self ):\n return self.nodes is None or len ( self.nodes ) == 0\n # End isNodeLeaf" ]
[ "0.63182133", "0.6092663", "0.59042835", "0.5891577", "0.57657576", "0.5746107", "0.5746107", "0.5746107", "0.57207537", "0.56583154", "0.5650686", "0.56354994", "0.562447", "0.562447", "0.56225556", "0.55459434", "0.55150414", "0.5466359", "0.54564786", "0.544544", "0.5416866", "0.5393417", "0.53698546", "0.5343783", "0.5336834", "0.5334566", "0.53315276", "0.53120005", "0.5309009", "0.5288901" ]
0.80919564
0
True if this node is a Flow
def is_flow(self): from .flows import Flow return isinstance(self, Flow)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isFlow(self) -> bool:\n ...", "def isNodeType(self, t):\n return isinstance(self, t)", "def _is_sink() -> bool:\n\n def _is_inplace(n: Node):\n \"\"\"Get the inplace argument from ``torch.fx.Node``\n \"\"\"\n inplace = False\n if n.op == \"call_function\":\n inplace = n.kwargs.get(\"inplace\", False)\n elif n.op == \"call_module\":\n inplace = getattr(n.graph.owning_module.get_submodule(n.target), \"inplace\", False)\n return inplace\n\n def _is_shape_consistency(n: Node):\n \"\"\"Check if this node is shape-consistency node (i.e. ``runtime_apply`` or ``runtime_apply_for_iterable_object``)\n \"\"\"\n return n.target in [runtime_apply, runtime_apply_for_iterable_object, runtime_comm_spec_apply]\n\n return not sum([v for _, v in deps.items()]) and not any(map(_is_inplace, n.users)) and not any(\n map(_is_shape_consistency, n.users))", "def has_data_flow(self) -> bool:\n return self.graph_count and not self.data_flow_null_count", "def __is_tree_node(self, node):\n if not node.input:\n if len(node.output) > 1:\n return False\n\n if len(node.output) > 1:\n return False\n\n for input_node in node.input:\n cls = self.__is_tree_node(input_node)\n if not cls:\n return False\n return True", "def isSink(self):\n return (len(self.children()) == 0)", "def is_task(self):\n from .tasks import Task\n return isinstance(self, Task)", "def is_dag(self):\n if nx.is_directed_acyclic_graph(Node.G):\n return True\n else:\n return False", "def is_event(g, node):\n if node not in g.nodes():\n print('Not a node in the graph')\n return False\n else:\n if g.node[node]['type'] == 'event':\n return True\n else:\n return False", "def is_file(self):\n return isinstance(self, FileNode)", "def is_tcp(self) -> bool:\n return self.proto == IP_TCP", "def is_in_normal_flow(self):\n return not (\n self.is_floated() or self.is_absolutely_positioned() or\n self.is_running() or self.is_footnote())", "def isSource(self):\n return (len(self.parents()) == 0)", "def is_color_flow_enabled(self) -> bool:\n return int(self._color_flow) == ACTIVE_COLOR_FLOWING", "def has_node(self, name):\n return self.source_net.has_node(name)", "def is_node_output_tensor(node: pippy.fx.Node) -> bool:\n type_ = node.meta.get(\"type\", None)\n return type_ is not None and issubclass(type_, torch.Tensor)", "def has_data_flow(self) -> bool:\n return self.data_flow_steps is not None", "def isframe(object):\r\n return isinstance(object, types.FrameType)", "def dstflow(self):\n if self.name in conditional_branch + unconditional_branch:\n return True\n return False", "def is_frontier_node(node):\r\n if isinstance(node, ast.Return):\r\n return True\r\n elif isinstance(node, ast.If):\r\n fields = dict(ast.iter_fields(node))\r\n body = fields['body']\r\n orelse = fields['orelse']\r\n if (ModuleVisitor.is_frontier_node(body) and \r\n ModuleVisitor.is_frontier_node(orelse)):\r\n return True\r\n else: \r\n return False\r\n elif isinstance(node, ast.For):\r\n fields = dict(ast.iter_fields(node))\r\n body = fields['body']\r\n if ModuleVisitor.is_frontier_node(body):\r\n return True\r\n else:\r\n return False\r\n elif isinstance(node, ast.While):\r\n fields = dict(ast.iter_fields(node))\r\n body = fields['body']\r\n if ModuleVisitor.is_frontier_node(body):\r\n return True\r\n else:\r\n return False\r\n elif isinstance(node, list):\r\n num_frontiers = [ModuleVisitor.is_frontier_node(c) \r\n for c in node].count(True)\r\n if num_frontiers > 0:\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False", "def is_structural(self):\n\n if self.depth > 1:\n\n if (self.path[0] == \"input\") and (self.path[1] in gs.all_elements):\n\n return True\n\n return False", "def is_directed(self) -> bool:\n return True", "def is_vertex(self):\n return True", "def is_vertex(self):\n return True", "def is_graph_isomorphic(self):\n out=True\n for node in self.node_names:\n self.move_to_node(node)\n if not self.check_closed_path:\n out=False\n return out", "def type_match(graph_node, pattern_node):\n if isinstance(pattern_node['node'], xf.PatternNode):\n return isinstance(graph_node['node'], pattern_node['node'].node)\n return isinstance(graph_node['node'], type(pattern_node['node']))", "def IsFather(self, *args):\n return _XCAFDoc.XCAFDoc_GraphNode_IsFather(self, *args)", "def isWellFormedNode(self, *args):\n return _libsbml.ASTBasePlugin_isWellFormedNode(self, *args)", "def flow(self):\n return self._flow", "def is_bare (self):\n # If there is no VNF\n if len([v for v in self.nfs]) == 0:\n fr_sum = sum([sum(1 for fr in i.ports.flowrules) for i in self.infras])\n # And there is no flowrule in the ports\n if fr_sum == 0:\n sg_sum = len([sg for sg in self.sg_hops])\n # And there is not SG hop\n if sg_sum == 0:\n e2e_sum = len([sg for sg in self.reqs])\n if e2e_sum == 0:\n return True\n return False" ]
[ "0.82888186", "0.6625373", "0.6044185", "0.5894704", "0.58589464", "0.5831481", "0.58194137", "0.58000535", "0.5776091", "0.57718563", "0.5769203", "0.5670171", "0.5609499", "0.56082875", "0.5608028", "0.55946344", "0.5508543", "0.5454714", "0.5452838", "0.5450906", "0.5432121", "0.54209226", "0.541516", "0.541516", "0.5395914", "0.5388364", "0.53865606", "0.53820974", "0.53630066", "0.5362882" ]
0.82321334
1
Returns a list with the status of the dependencies.
def deps_status(self): if not self.deps: return [self.S_OK] return [d.status for d in self.deps]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getDependenciesList(self) -> List[Mapping[Any, Any]]:\n if self._dependencyList is not None:\n return self._dependencyList\n\n chartfile = self.getChartFile()\n if chartfile['apiVersion'] == 'v2':\n if 'dependencies' in chartfile:\n self._dependencyList = chartfile['dependencies']\n else:\n self._dependencyList = []\n elif chartfile['apiVersion'] == 'v1':\n self.readArchiveFiles()\n if self._archiveFiles is not None and 'requirements.yaml' in self._archiveFiles:\n self._dependencyList = self._getFile('requirements.yaml')['dependencies']\n else:\n self._dependencyList = []\n else:\n raise ConfigurationError('Unknown chart file version: {}'.format(chartfile))\n return self._dependencyList", "def dependencies(self) -> typing.Optional[typing.List[aws_cdk.core.IDependable]]:\n return self._values.get('dependencies')", "def dependencies(self) -> List[Bundle]:\n return []", "def getDependencyList(self):\n return self.getDocumentedObject().getDependencyList()", "def git_status():\n\tl = []\n\tdebug(\"Not implemented\",1)\n\n\treturn l", "def dependencies(self):\n return self._dependency_analyzer.GetDependencies(\n [self.stacktrace.crash_stack] if self.stacktrace else [])", "def get_dependencies(self):\n return [\"make\", \"g++\", \"gcc\", \"cmake-2.8.12.1\", \"boost_1_56_0\"]", "def get_dependencies(self):\n raise NotImplementedError()", "def wait_for_dependencies(self, depends_on):\n\n while any(self._name_result[d] is None for d in depends_on):\n with self._done_event:\n self._done_event.wait()\n return [d for d in depends_on if not self._name_result[d]]", "def list_dependencies(self, value):\n try:\n self.dependency_re = self.dependency_re or re.compile(r\"\\${\\w*}\")\n matched = self.dependency_re.findall(value)\n if matched:\n dependencies = [match[2:-1] for match in matched if match[2:-1] != self.name]\n return list(set(dependencies))\n except:\n pass\n return []", "def get_dependencies(self):\n return [[\"uuid\", \"ossp-uuid\"]]", "def dependencies(self, dep_context):\n if self.strict_deps:\n return self.target.strict_dependencies(dep_context)\n else:\n return list(self.target.closure(bfs=True, **dep_context.target_closure_kwargs))", "def get_dependencies(self):\n dependencies = self._dependencies\n if self.ansible is not None:\n dependencies.append(\"ansible=={}.*\".format(self.ansible))\n else:\n dependencies.append(\"ansible\")\n # Drivers can have their own dependencies\n if self.scenario.driver is not None \\\n and self.scenario.driver in DRIVER_DEPENDENCIES.keys():\n dependencies.extend(DRIVER_DEPENDENCIES[self.scenario.driver])\n # Scenarios can specify a requirements.txt\n if self.scenario.requirements is not None:\n dependencies.append(\"-r\" + self.scenario.requirements)\n return dependencies", "def str_deps(self):\n lines = []\n app = lines.append\n\n app(\"Dependencies of node %s:\" % str(self))\n for i, dep in enumerate(self.deps):\n app(\"%d) %s, status=%s\" % (i, dep.info, str(dep.status)))\n\n return \"\\n\".join(lines)", "def get_dependencies():\n return config.check_driver_dependencies(\n __virtualname__, {\"profitbricks\": HAS_PROFITBRICKS}\n )", "def test_get_dependency_list(client, dependency):\n headers = {\"Accept\": \"application/json\"}\n response = client.open(\"/dependency\", method=\"GET\", headers=headers)\n assert response.status_code == 200\n assert len(response.json[\"dependencies\"]) == 1\n assert (\n response.json[\"dependencies\"][0][\"component_version_id\"] == dependency.component_version_id\n )\n assert (\n response.json[\"dependencies\"][0][\"dependency_version_id\"]\n == dependency.dependency_version_id\n )", "def dependencies(project_name):\n deps = []\n logging.info('Locating {}'.format(project_name))\n located = distlib.locators.locate(project_name, prereleases=True)\n if located is None:\n logging.warn('{} not found'.format(project_name))\n return []\n for dep in located.run_requires:\n # Drop any version details from the dependency name.\n deps.append(just_name(dep))\n return deps", "def get_fsleyes_deps():\n\n # The dependency list is stored in requirements.txt\n with open(op.join(basedir, 'requirements.txt'), 'rt') as f:\n install_requires = f.readlines()\n\n return [i.strip() for i in install_requires]", "def dependency_list(\n *dependencies: Optional[SlurmJobID],\n) -> Sequence[SlurmJobID]:\n return tuple(dependency for dependency in dependencies if dependency is not None)", "def _get_dependencies():\n return config.check_driver_dependencies(__virtualname__, {\"XenAPI\": HAS_XEN_API})", "def _list_dependencies_info(\n out: Callable, ljust: int, package: str, dependencies: List[Requirement]\n):\n unicode = sys.stdout.encoding.lower().startswith(\"utf\")\n if unicode:\n ljust += 1\n\n not_found: List[Requirement] = list()\n for dep in dependencies:\n if dep.name == package:\n continue\n try:\n version_ = version(dep.name)\n except Exception:\n not_found.append(dep)\n continue\n\n # build the output string step by step\n output = f\"✔︎ {dep.name}\" if unicode else dep.name\n # handle version specifiers\n if len(dep.specifier) != 0:\n output += f\" ({str(dep.specifier)})\"\n output += \":\"\n output = output.ljust(ljust) + version_\n\n # handle special dependencies with backends, C dep, ..\n if dep.name in (\"matplotlib\", \"seaborn\") and version_ != \"Not found.\":\n try:\n from matplotlib import pyplot as plt\n\n backend = plt.get_backend()\n except Exception:\n backend = \"Not found\"\n\n output += f\" (backend: {backend})\"\n out(output + \"\\n\")\n\n if len(not_found) != 0:\n not_found = [\n f\"{dep.name} ({str(dep.specifier)})\"\n if len(dep.specifier) != 0\n else dep.name\n for dep in not_found\n ]\n if unicode:\n out(f\"✘ Not installed: {', '.join(not_found)}\\n\")\n else:\n out(f\"Not installed: {', '.join(not_found)}\\n\")", "def _get_dependencies(requirements_file: Path) -> List[str]:\n lines = requirements_file.read_text().strip().split('\\n')\n return [line for line in lines if not line.startswith('#')]", "def get_dependencies(self, resource):\n\n rel_path = resource.relative_path\n deps = self.deps[rel_path] if rel_path in self.deps \\\n else self.update_deps(resource)\n return deps", "def get_dependency_configurations(self):\n deps = []\n\n for variant in self.resolve_variants():\n # Note: the variants have already been resolved\n # This for loop simply needs to resolve the dependencies one\n # by one, potentially overwriding earlier ones\n name, value = next(iter(variant.items()))\n if 'requires' in value and value['requires'] is not None:\n requires = value['requires']\n for req_name, req_config in requires.items():\n deps.append((req_name, req_config['version']))\n\n return deps", "def plugin_get_dependency():\n return []", "def get_dependencies(self, revision: Dict) -> List[Dict]:\n dependency_ids = revision['auxiliary']['phabricator:depends-on']\n revisions = self.get_revisions(phids=dependency_ids)\n result = []\n for r in revisions:\n result.append(r)\n sub = self.get_dependencies(r)\n result.extend(sub)\n return result", "def get_class_dependencies(cls):\n if hasattr(cls, _dependencies):\n deps = list(itertools.chain(*[d.unpack() for d in getattr(cls, _dependencies)]))\n else:\n deps = []\n return deps", "def dependencies(self, dep_context):\n if self.strict_deps:\n return strict_dependencies(self.target, dep_context)\n else:\n return all_dependencies(self.target, dep_context)", "def initial_dependencies(self) -> List[str]:\n return self.options[\"general\"][\"dependencies\"]", "def status():\n return jsonify(service='scwr-api-requirements', status='ok')" ]
[ "0.68876165", "0.6757046", "0.66798264", "0.65741026", "0.6442077", "0.64086777", "0.6375406", "0.6356302", "0.6338329", "0.629271", "0.6290646", "0.6248928", "0.61763686", "0.6158371", "0.6153964", "0.6139203", "0.613682", "0.61277705", "0.61259836", "0.61249816", "0.608106", "0.60771054", "0.6063798", "0.60606945", "0.60583353", "0.604128", "0.60250294", "0.6021626", "0.5997954", "0.59564424" ]
0.8783236
0
True if this node depends on the other node.
def depends_on(self, other): return other in [d.node for d in self.deps]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def depends((a, b)):\r\n return (any(bout in a.inputs for bout in b.outputs)\r\n or any(depends((ainp.owner, b)) for ainp in a.inputs\r\n if ainp.owner))", "def equiv(self, other):\n # FUTURE: once the PiplelineState nodes attribute stores multiple modules,\n # we may need to check that the ordering is consistent. Currently,\n # CAST and the AnnnotatedCast nodes only have a single module, so this is not a concern\n for i, node in enumerate(self.nodes):\n if not node.equiv(other.nodes[i]):\n # printing diff to help locating difference\n # because we do not overwrite the __str__() methods, \n # this has limited usefullness, but its better than nothing\n print(f\"AnnCast equiv failed:\")\n self_lines = str(node).splitlines()\n other_lines = str(other.nodes[i]).splitlines()\n for i, diff in enumerate(difflib.ndiff(self_lines, other_lines)):\n if diff[0]==' ': \n continue\n print(f\"Line {i}: {diff}\")\n\n return False\n \n return True", "def independent(self) -> bool:\n parent = self._parent()\n if parent is None:\n return True\n connections = parent._graph.connections\n path = self._path\n lp = len(path)\n for con in connections:\n if con[\"type\"] == \"connection\":\n if con[\"target\"][:lp] == path:\n return False\n return True", "def __le__(self, other):\n if self.head_vertex > other.head_vertex:\n return False\n elif self.tail_vertex > other.tail_vertex:\n return False\n elif self.weight > other.weight:\n return False\n return True", "def __le__(self, other):\n if self.head_vertex > other.head_vertex:\n return False\n elif self.tail_vertex > other.tail_vertex:\n return False\n elif self.weight > other.weight:\n return False\n return True", "def has_relationship(self, source_node: Node, target_node: Node) -> bool: # pylint: disable=no-self-use\n return source_node.node_id in target_node.in_nodes_ids", "def dependence(a, b):\r\n if depends((a, b)):\r\n return 1\r\n if depends((b, a)):\r\n return -1\r\n return 0", "def is_predecessor(self, parent_node: Node, child_node: Node) -> bool:\n return self.has_relationship(parent_node, child_node)", "def __ge__(self, other):\n if self.head_vertex < other.head_vertex:\n return False\n elif self.tail_vertex < other.tail_vertex:\n return False\n elif self.weight < other.weight:\n return False\n return True", "def __ge__(self, other):\n if self.head_vertex < other.head_vertex:\n return False\n elif self.tail_vertex < other.tail_vertex:\n return False\n elif self.weight < other.weight:\n return False\n return True", "def __gt__(self, other):\n if self.head_vertex <= other.head_vertex:\n return False\n elif self.tail_vertex <= other.tail_vertex:\n return False\n elif self.weight <= other.weight:\n return False\n return True", "def __gt__(self, other):\n if self.head_vertex <= other.head_vertex:\n return False\n elif self.tail_vertex <= other.tail_vertex:\n return False\n elif self.weight <= other.weight:\n return False\n return True", "def dependsOn(self, dep):\n if isinstance(dep, PartitionDevice) and dep.isExtended and \\\n self.isLogical and self.disk == dep.disk:\n return True\n\n return Device.dependsOn(self, dep)", "def compare(self, node) -> bool:\n\t\t# No conflicts, Return True\n\t\treturn True", "def __ge__(self, other):\n return self.head_vertex >= other.head_vertex and self.tail_vertex >= other.tail_vertex", "def make_dependence_cmp():\r\n\r\n depends = make_depends()\r\n\r\n def dependence(a, b):\r\n \"\"\" A cmp function for nodes in a graph - does a depend on b?\r\n\r\n Returns positive number if a depends on b\r\n Returns negative number if b depends on a\r\n Returns 0 otherwise\r\n \"\"\"\r\n if depends((a, b)):\r\n return 1\r\n if depends((b, a)):\r\n return -1\r\n return 0\r\n\r\n return dependence", "def __le__(self, other):\n return self.head_vertex <= other.head_vertex and self.tail_vertex <= other.tail_vertex", "def compare(self, t2) -> bool:\n return True if self.get_edge(t2) >= 0 else False", "def is_linked(self, other):\n for edge in self._edges_list:\n if other.index == edge.linked[1].index:\n return True\n return False", "def __gt__(self, other):\n return self.head_vertex > other.head_vertex and self.tail_vertex > other.tail_vertex", "def __le__(self, other):\n if self.assumptions >= other.assumptions and self.guarantees <= other.guarantees:\n return True\n else:\n return False", "def _has_parents(self, node: CFNode) -> bool:\n return bool(self._graph._backedges[node])", "def is_relation(self, rel_name):\n return rel_name in self._declaration", "def isDecendentOf(self, node):\n if (self in node.children()):\n return True\n elif (not node.isSink()):\n return reduce(lambda x,y: x or y, [self.isDecendentOf(x) for x in node.children()])\n else:\n return False", "def IsFather(self, *args):\n return _XCAFDoc.XCAFDoc_GraphNode_IsFather(self, *args)", "def has_edge(self, otherNode):\n\t\t\treturn otherNode in self.edges", "def allow_relation(self, obj1, obj2, **hints):\n return self._route_by_model_type(obj1) == self._route_by_model_type(obj2)", "def is_connected(self, node1, node2):\r\n\r\n return node1 in self.graph and node2 in self.graph[node1]", "def __lt__(self, other):\n if self.head_vertex >= other.head_vertex:\n return False\n elif self.tail_vertex >= other.tail_vertex:\n return False\n elif self.weight >= other.weight:\n return False\n return True", "def __lt__(self, other):\n if self.head_vertex >= other.head_vertex:\n return False\n elif self.tail_vertex >= other.tail_vertex:\n return False\n elif self.weight >= other.weight:\n return False\n return True" ]
[ "0.63062394", "0.62011176", "0.61985177", "0.6183282", "0.6183282", "0.6178362", "0.61306155", "0.60825515", "0.6065093", "0.6065093", "0.6033321", "0.6033321", "0.6020656", "0.6002114", "0.5981491", "0.5935776", "0.5914485", "0.5895294", "0.5885986", "0.5883382", "0.58295155", "0.5798338", "0.5789004", "0.5760179", "0.5715226", "0.565326", "0.56521565", "0.56471795", "0.564065", "0.564065" ]
0.805279
0
Return the string representation of the dependencies of the node.
def str_deps(self): lines = [] app = lines.append app("Dependencies of node %s:" % str(self)) for i, dep in enumerate(self.deps): app("%d) %s, status=%s" % (i, dep.info, str(dep.status))) return "\n".join(lines)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def print_deps(self):\n\t\tprint self.deps, '\\n'", "def __str__(self):\n stringRepresentation = []\n for node in self.getNodes():\n stringRepresentation.append(\"->\".join(\n (str(node), str(self.graph[node]))))\n\n return str(stringRepresentation)", "def component_dependencies_tag():\n\n rendered_dependencies = []\n for component in get_components_from_registry(registry):\n rendered_dependencies.append(component.render_dependencies())\n\n return mark_safe(\"\\n\".join(rendered_dependencies))", "def __str__(self):\n s = ''\n for node in self.nodes:\n s += '\\n\\n'+str(node)+'\\n\\t'\n edges = node.getChildren()\n keys = edges.keys()\n keys.sort()\n for key in keys:\n bounds = edges[key].getSuffix()\n s += str(edges[key])+' '\n for i in xrange(bounds[0], bounds[1]):\n s += self.target[i]\n s += '\\n\\t'\n return s", "def __str__(self):\n s = \"--\\n\"\n for node in self:\n s += node.__str__() + \"\\n\"\n return s + \"--\"", "def textree(self):\n\t\ttextree = '\\\\begin{dependency}[theme=simple]\\n\\\\begin{deptext}[column sep=.5cm, row sep=.1ex]\\n'\n\t\tsentence = self.reconstruct_sentence()\n\t\ts = '\\\\&'.join(sentence[1:])+'\\\\\\\\\\n'\n\t\tn = '\\\\&'.join(map(str,range(len(sentence)))) + '\\\\\\\\\\n'\n\t\ttextree = textree + s + n +'\\\\end{deptext}\\n'\n\t\ttextree = textree + '\\\\deproot{%s}{}\\n' % str(self.head_pos)\n\t\tfor head in self.deps:\n\t\t\tfor dependent in self.deps[head]:\n\t\t\t\ttextree = textree + '\\\\depedge{%s}{%s}{%s}\\n' % (head, dependent[0], dependent[1])\n\t\ttextree = textree + '\\\\end{dependency}'\n\t\treturn textree", "def __str__(self):\n result = [] \n node = self.head\n while node is not None:\n result.append(str(node.value))\n node = node.next_node \n return '[' + ', '.join(result) + ']'", "def __str__(self):\n result = [] \n node = self.head\n while node is not None:\n result.append(str(node.value))\n node = node.next_node \n return '[' + ', '.join(result) + ']'", "def __str__(self):\n values = \"\"\n node = self.head\n while node:\n values = values + \"{} \".format(node.__str__())\n node = node.next\n return values", "def dumps(self):\n result = []\n pkg_options_dumps = self._package_options.dumps()\n if pkg_options_dumps:\n result.append(pkg_options_dumps)\n for pkg_pattern, pkg_option in sorted(self._deps_package_options.items()):\n dep_pkg_option = pkg_option.dumps(scope=pkg_pattern)\n if dep_pkg_option:\n result.append(dep_pkg_option)\n return \"\\n\".join(result)", "def __str__(self):\n # string representation includes values of all inner fields\n return \\\n \"Node Name: \" + str(self.name) + \"\\n\" + \\\n \"Node Attributes: \" + str(self.attributes) + \"\\n\" + \\\n \"Incident Edges: \" + \"\\n\".join([edge.__str__() for edge in self.incident_edges]) + \"\\n\"", "def __str__(self):\n # string representation includes values of all inner fields\n return \\\n \"Node Name: \" + str(self.name) + \"\\n\" + \\\n \"Node Attributes: \" + str(self.attributes) + \"\\n\" + \\\n \"Incident Edges: \" + \"\\n\".join([edge.__str__() for edge in self.incident_edges]) + \"\\n\"", "def __str__(self):\n return \"NODE: \" + str(self.num_childs) + \" \" + str(self.num_metadata)", "def __repr__(self):\n ret = \"\"\n if is_relation(self.root):\n ret += self.root + '('\n for index, obj in enumerate(self.arguments):\n ret += str(obj)\n if index != len(self.arguments)-1:\n ret += ','\n ret += ')'\n elif is_equality(self.root):\n ret = str(self.first) + self.root + str(self.second)\n elif is_quantifier(self.root):\n ret = self.root + str(self.variable) + '[' + str(self.predicate) + ']'\n elif is_unary(self.root):\n ret = self.root + str(self.first)\n elif is_binary(self.root):\n ret = '(' + str(self.first) + self.root + str(self.second) + ')'\n return ret\n # Task 7.2", "def __str__(self):\n return np.array2string(self.graph.toarray())", "def __str__(self):\n name_str = \"node name is %s\\n\" % self.__name\n label_str = \"labels are %s\\n\" % str(self.__labels)\n propety_str = \"properties are %s\\n\" % str(self.__props)\n return name_str + label_str + propety_str", "def _list_dependencies_info(\n out: Callable, ljust: int, package: str, dependencies: List[Requirement]\n):\n unicode = sys.stdout.encoding.lower().startswith(\"utf\")\n if unicode:\n ljust += 1\n\n not_found: List[Requirement] = list()\n for dep in dependencies:\n if dep.name == package:\n continue\n try:\n version_ = version(dep.name)\n except Exception:\n not_found.append(dep)\n continue\n\n # build the output string step by step\n output = f\"✔︎ {dep.name}\" if unicode else dep.name\n # handle version specifiers\n if len(dep.specifier) != 0:\n output += f\" ({str(dep.specifier)})\"\n output += \":\"\n output = output.ljust(ljust) + version_\n\n # handle special dependencies with backends, C dep, ..\n if dep.name in (\"matplotlib\", \"seaborn\") and version_ != \"Not found.\":\n try:\n from matplotlib import pyplot as plt\n\n backend = plt.get_backend()\n except Exception:\n backend = \"Not found\"\n\n output += f\" (backend: {backend})\"\n out(output + \"\\n\")\n\n if len(not_found) != 0:\n not_found = [\n f\"{dep.name} ({str(dep.specifier)})\"\n if len(dep.specifier) != 0\n else dep.name\n for dep in not_found\n ]\n if unicode:\n out(f\"✘ Not installed: {', '.join(not_found)}\\n\")\n else:\n out(f\"Not installed: {', '.join(not_found)}\\n\")", "def __str__(self):\n vList = []\n for vertex in self:\n vList.append(vertex.name)\n gStr = \"The DiGraph contains _vertices: {0}\".format(\" \".join(vList))\n return gStr", "def __repr__(self):\n return str(self.nodes)", "def __str__(self):\n string = \"\"\n cur_node = self.head\n while cur_node is not None:\n string += cur_node.data.__str__()\n cur_node = cur_node.next\n return string", "def cycles(self) -> str:\n if self.inheritanceCycles:\n return (\n ', '.join(\n ('<'.join(c.name for c in cycle))\n for cycle in self.inheritanceCycles))\n else:\n return None", "def __str__(self):\n ret_str = \"\"\n for element_type in ('nodes', 'edges', 'layers'):\n elements = getattr(self, element_type)\n subtype_counts = defaultdict(int)\n ret_str += \"{0} {1}:\\n\".format(len(elements), element_type)\n for element in elements:\n subtype_counts[type(element).__name__] += 1\n for subtype in subtype_counts:\n ret_str += \"\\t{0}: {1}\\n\".format(subtype,\n subtype_counts[subtype])\n if element_type == 'layers':\n layer_names = [layer.name for layer in self.layers]\n ret_str += \"\\t\\t\" + \", \".join(layer_names)\n ret_str += \"\\n\"\n return ret_str", "def node_repr(node):\n\n result = History.name(node)\n if History.children(node):\n result += ':[' + ', '.join(map(History.node_repr, History.children(node))) + ']'\n return result", "def __str__(self) -> str:\n values = []\n self._str_helper(self.root, values)\n return \"TREE in order { \" + \", \".join(values) + \" }\"", "def __str__(self) -> str:\n values = []\n self._str_helper(self.root, values)\n return \"TREE in order { \" + \", \".join(values) + \" }\"", "def __str__(self):\n _str = \"\"\n current_node = self._head\n while(current_node != None):\n _str += str(current_node.value)\n _str += \" -> \"\n current_node = current_node.next\n _str += \"None\"\n return _str", "def __str__(self):\n pList = []\n for vertex in self._parents:\n pList.append(vertex.name)\n vStr = (\"Vertex {0} has value: {1} and is a child of vertices:\"\n \" {2}\".format(self.name, str(self.data), \" \".join(pList)))\n return vStr", "def show_rev_deps(self, package):\n return self.show_deps(package, \"show-rev-deps\")", "def __str__(self):\n return \"->\".join([str(n.data) for n in self.as_list()])", "def __str__(self):\n built_string = \"Graph(\"\n built_string += str(self.get_nodes())\n built_string += \", \"\n built_string += str(self.get_edges())\n built_string += \")\"\n return built_string" ]
[ "0.668987", "0.66223913", "0.6401555", "0.6358125", "0.62786525", "0.62584203", "0.62463415", "0.62463415", "0.6243726", "0.62194383", "0.6207523", "0.6207523", "0.6144341", "0.6044976", "0.60416764", "0.60378635", "0.60351294", "0.6028271", "0.6021468", "0.6000513", "0.5981035", "0.59764487", "0.5965553", "0.595328", "0.595328", "0.59526753", "0.5950818", "0.5942885", "0.5926009", "0.5924397" ]
0.8653706
0
Return pandas DataFrame with the value of the variables specified in `varnames`. Can be used for task/works/flow. It's recursive!
def get_vars_dataframe(self, *varnames): import pandas as pd if self.is_task: df = pd.DataFrame([{v: self.input.get(v, None) for v in varnames}], index=[self.name], columns=varnames) df["class"] = self.__class__.__name__ return df elif self.is_work: frames = [task.get_vars_dataframe(*varnames) for task in self] return pd.concat(frames) elif self.is_flow: frames = [work.get_vars_dataframe(*varnames) for work in self] return pd.concat(frames) else: #print("Ignoring node of type: `%s`" % type(self)) return pd.DataFrame(index=[self.name])
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_df(raw_data, variables):\n vars = []\n for v in requirement_order(variables):\n vars.append(v(raw_data))\n return pd.concat(vars, axis=1)", "def convert_variables_to_dataframe(self, variables: Variables):\n records = []\n for (year, variable_name), variable in variables.items():\n variable['year'] = year\n records.append(variable)\n dataframe = DataFrame.from_records(records)\n\n # Drop/rename columns, clean up label/concept values\n extra_columns = {'attributes', 'group', 'limit', 'predicateType'} & set(dataframe.columns)\n dataframe = dataframe.drop(columns=extra_columns).rename(columns={'name': 'variable'})\n\n return dataframe", "def _combine_vars(self, obj_types='all', var_keys='all'):\n\n # Retrieve variables\n if 'variables' in self:\n vs = self['variables']\n else:\n return None\n if isinstance(vs, pd.DataFrame):\n return vs # Return df if vs is already a df\n elif isinstance(vs, DataDict) and len(vs.keys()) == 1:\n return list(vs.values())[0] # Return df if vs has only one entry\n elif isinstance(vs, (dict,DataDict)):\n df_dict = dict(vs) # Convert to dict if vs is DataDict\n else:\n raise TypeError(\"DataDict.variables must be of type dict,\"\n \"agentpy.DataDict, or pandas.DataFrame.\")\n\n # Remove dataframes that don't include any of the selected var_keys\n if var_keys != 'all':\n df_dict = {k: v for k, v in df_dict.items()\n if any(x in v.columns for x in make_list(var_keys))}\n\n # Select object types\n if obj_types != 'all':\n df_dict = {k: v for k, v in df_dict.items()\n if k in make_list(obj_types)}\n\n # Add 'obj_id' before 't' for model df\n model_type = self.log['model_type']\n if model_type in list(df_dict.keys()):\n df = df_dict[model_type]\n df['obj_id'] = 0\n indexes = list(df.index.names)\n indexes.insert(-1, 'obj_id')\n df = df.reset_index()\n df = df.set_index(indexes)\n df_dict[model_type] = df\n\n # Return none if empty\n if df_dict == {}:\n return None\n\n # Create dataframe\n df = pd.concat(df_dict) # Dict keys (obj_type) will be added to index\n df.index = df.index.set_names('obj_type', level=0) # Rename new index\n\n # Select var_keys\n if var_keys != 'all':\n # make_list prevents conversion to pd.Series for single value\n df = df[make_list(var_keys)]\n\n return df", "def select_columns(variables):\n return relevant_raw_data_df[variables]", "def extract_var_data(self, var_names):\n variable_dict = {} # Declaring dictionary used to store key-val pairs, var_name as key and the array as the value\n try:\n for var in var_names:\n variable_dict[var] = self.dataset[var].values\n return variable_dict\n except Exception as e:\n print(\"An Error occured:\", e)\n raise e", "def relation_df(keys, variables, indexes=None):\n relations = []\n for variable_i in range(0, len(variables)):\n variable = variables[variable_i]\n \n print(\"processing: \", indexes[variable_i], \"length: \", len(variable))\n \n relation = relation_map(keys, variable)\n\n assert len(np.unique([len(relation[key]) for key in relation.keys()])) == 1, \"Not Unique Value \" + indexes[variable_i]\n relations.append(pd.DataFrame(relation))\n\n result = pd.concat(relations)\n\n if indexes != None:\n result.index = indexes\n return result", "def as_DF(self, s_names, u_names):\n return pd.DataFrame(self.Q, index=u_names, columns=s_names)", "def df(objs, labels: Optional[List[str]] = None):\n import pandas as pd\n from .objects import DynamicObject\n if objs:\n objs = list(objs)\n obj = objs[0]\n if is_dataclass(obj):\n df = pd.DataFrame.from_records(dataclassAsTuple(o) for o in objs)\n df.columns = [field.name for field in fields(obj)]\n elif isinstance(obj, DynamicObject):\n df = pd.DataFrame.from_records(o.__dict__ for o in objs)\n else:\n df = pd.DataFrame.from_records(objs)\n if isinstance(obj, tuple):\n _fields = getattr(obj, '_fields', None)\n if _fields:\n # assume it's a namedtuple\n df.columns = _fields\n else:\n df = None\n if labels:\n exclude = [label for label in df if label not in labels]\n df = df.drop(exclude, axis=1)\n return df", "def GetDataFrame(self, q_string, var_tup=None):\n def map_to_dict( results, field_names):\n res_dict = {}\n for fn in field_names:\n res_dict[fn] = []\n for res in results:\n for fn, f in zip(field_names, res):\n res_dict[fn].append(f)\n return res_dict\n def map_to_df( results, field_names):\n return pandas.DataFrame.from_dict(map_to_dict( results, field_names ))\n cursor = self.GetCursor()\n l_logger.debug(\"Query: %s, %r\" % (q_string,var_tup))\n cursor.execute(q_string,var_tup)\n results = cursor.fetchall()\n field_names = [i[0] for i in cursor.description]\n if len(results) == 0:\n return None\n else:\n return map_to_df( results, field_names )", "def frame(something, name = None):\n \n if isinstance(something, dict):\n res = pd.DataFrame.from_dict(something, orient='index')\n else:\n res = pd.DataFrame(something)\n number_of_columns = len(res.columns)\n if name != None:\n if isinstance(name, list):\n if len(name) >= number_of_columns:\n res.columns = name[:number_of_columns]\n else:\n res.columns = name + list(range(len(name), number_of_columns))\n else:\n res.columns = [name] + list(range(1, number_of_columns))\n return res", "def variables(names, **kwargs):\n return symbols(names, cls=Variable, seq=True, **kwargs)", "def get_variable_history(\n self,\n building_id: str,\n service_name: str,\n variable_name: str,\n start: datetime = None,\n end: datetime = None,\n ) -> DataFrame:\n start = _infer_datetime(start)\n end = _infer_datetime(end)\n try:\n raw = self._get_variable_history(\n building_id, service_name, variable_name, start, end\n )\n if raw:\n df = read_json(\n json.dumps(raw), dtype=float, convert_dates=False\n ).set_index(\"date\")\n df.index = map(datetime.fromtimestamp, df.index / 1000)\n else:\n raise IOError(\"Empty response from web request.\")\n except (json.JSONDecodeError, KeyError, IOError):\n df = DataFrame(columns=[\"value\"], dtype=float)\n df.index.name = \"date\"\n\n return df", "def get_df(self, n, named_cols=False):\n try:\n import pandas as pd\n except ImportError:\n print('Install pandas')\n raise ImportError\n\n df = pd.DataFrame(self.get_n(n))\n if named_cols:\n df.columns = ['x_{}'.format(c) for c in df.columns]\n return df", "def get_pandas(self, name):\n val = self.get(name)\n if isinstance(val, dict):\n df = pandas.DataFrame(val)\n return df", "def generate_df(js_dict, naming, value=\"value\"):\n\n values = []\n dimensions, dim_names = get_dimensions(js_dict, naming)\n values = get_values(js_dict, value=value)\n output = pd.DataFrame([category + [values[i]]\n for i, category in\n enumerate(get_df_row(dimensions, naming))])\n output.columns = dim_names + [value]\n output.index = range(0, len(values))\n return output", "def complete_one_hot_variables(df: pd.DataFrame, var_names: list) -> pd.DataFrame:\n\n df = df.copy()\n for var in var_names:\n # Create a column of 0s\n df[var] = 0\n return df", "def to_dataframe(data, names, **kwargs):\n return Component(\n \"ToDataframe\",\n arguments={\n 'data': Component.of(data),\n 'names': Component.of(names)\n },\n options={\n \n },\n constraints=kwargs)", "def State(**variables):\n return pd.Series(variables, name='state')", "def get_frame_from_query(the_query, colnames):\n df = DataFrame.from_records(list(the_query), columns=colnames)\n return df", "def data_frame_names(da_locals):\n frames = []\n for key, value in da_locals.items():\n if isinstance(value, pd.DataFrame):\n if key.startswith(\"_\") is False:\n frames.append(key)\n return frames", "def get_variables_by_id(\n self,\n ids: Optional[AnyGenericIdentifier] = None,\n variables: Optional[AnyGenericIdentifier] = None,\n ) -> Union[pd.Series, pd.DataFrame, str, int]:\n if ids is None:\n target_ids = self.xsid\n else:\n target_ids = np.asarray(ids)\n if variables is None:\n target_vars = self.variables\n else:\n target_vars = np.asarray(variables)\n if (\n self.__internal_samples.index.isin(target_ids).sum() <= len(target_ids)\n ) and (\n self.__internal_samples.columns.isin(target_vars).sum() <= len(target_vars)\n ):\n return self.__internal_samples.loc[target_ids, target_vars]\n else:\n raise ValueError(\"Invalid sample ids or variables are provided.\")", "def subset(self, names):\n vld = VarLookupDict(self._namespaces)\n new_ns = dict((name, vld[name]) for name in names)\n return EvalEnvironment([new_ns])", "def get_variable_values(self, vars):\n raise NotImplementedError()", "def var(name, indices=None, namespace=None):\n return ExprVariable(name, indices, namespace)", "def dict2pd(statdict, labelname):\n var_dfs = []\n for key, value in statdict.items():\n var_df = pd.Series(value.flatten())\n var_df.index = ttab.create_flat_names(key, value.shape)\n var_dfs.append(var_df)\n statpd = pd.concat(var_dfs, axis=0)\n statpd = statpd.rename(labelname)\n return statpd", "def var(\n self, values: pdarray, skipna: bool = True, ddof: int_scalars = 1\n ) -> Tuple[groupable, pdarray]:\n k, v = self.aggregate(values, \"var\", skipna, ddof)\n return k, cast(pdarray, v)", "def variable_extractor(variable: str, df, frequency: str):\n\t#Extract list of unique symbols from dataset\n\tsymbols = list(df['symbol'].unique())\n\tdf_master=pd.DataFrame()\n\tfor symbol in symbols:\n\t\tprint(symbol)\n\t\tsubset = df.loc[df['symbol'] == symbol]\n\t\t#Drop random duplicates in data\n\t\tsubset.drop_duplicates(inplace=True)\n\t\t#Set dataframe index and frequency\n\t\tsubset = subset.set_index('datetime')\n\t\tsubset = subset.asfreq('T')\n\t\t#Resample data\n\t\tresampled_subset = resample(subset, frequency)\n\t\tcol_of_interest = pd.DataFrame(resampled_subset[variable])\n\t\tcol_of_interest = col_of_interest.rename(columns = {variable: str(symbol)})\n\t\tdf_master = pd.concat([df_master, col_of_interest], axis = 1)\n\treturn df_master", "def _compute_variables(df: EDAFrame, cfg: Config) -> Dict[str, Any]:\n data: Dict[str, Any] = {}\n # variables\n if cfg.variables.enable:\n for col in df.columns:\n try:\n dtype = df.get_eda_dtype(col)\n # Since it will throw error if a numerical column is all-nan,\n # we transform it to categorical column.\n # We also transform to categorical for small cardinality numerical column.\n if df.get_missing_cnt(col) == df.shape[0]:\n srs = df.get_col_as_str(col, na_as_str=True)\n data[col] = nom_comps(srs, cfg)\n elif isinstance(dtype, (Nominal, GeoGraphy, GeoPoint)):\n data[col] = nom_comps(df.frame[col], cfg)\n elif isinstance(dtype, SmallCardNum):\n srs = df.get_col_as_str(col, na_as_str=False)\n data[col] = nom_comps(srs, cfg)\n elif isinstance(dtype, Continuous):\n data[col] = cont_comps(df.frame[col], cfg)\n # elif isinstance(dtype, DateTime):\n # data[col] = {}\n # data[col][\"stats\"] = calc_stats_dt(df.frame[col])\n # data[col][\"line\"] = dask.delayed(_calc_line_dt)(df.frame[[col]], \"auto\")\n else:\n raise ValueError(f\"unprocessed type in column{col}:{dtype}\")\n except:\n print(f\"error happended in column:{col}\", file=sys.stderr)\n raise\n return data", "def make_data_frame(words, years, feature_dict):\n\n temp = collections.defaultdict(list)\n feature_dict[\"word\"] = lambda word, year : word\n feature_dict[\"year\"] = lambda word, year : year\n for word in words:\n for year in years:\n for feature, feature_func in feature_dict.iteritems():\n temp[feature].append(feature_func(word, year))\n df = pd.DataFrame(temp)\n df = df.replace([np.inf, -np.inf], np.nan)\n df = df.dropna()\n return df", "def request_var(kid, var):\n code = \"import pandas as pd\\nimport numpy as np\\nif type(\" + var + \") \" \\\n \"is pd.DataFrame or type(\" + var + \") is np.ndarray or type(\" + var + \") is list:\\n\"\n code = code + \"\\tprint(\" + var + \".to_json(orient='split', index = False))\\n\"\n return exec_code(kid, var, code)" ]
[ "0.634334", "0.6127457", "0.54215074", "0.53402454", "0.5336286", "0.53129846", "0.52937585", "0.52844363", "0.5279209", "0.52531093", "0.52418345", "0.51818794", "0.5146553", "0.5134265", "0.50864273", "0.50787354", "0.5029223", "0.50172925", "0.5016772", "0.49703383", "0.4967813", "0.4965583", "0.49541882", "0.49390823", "0.4924998", "0.49212375", "0.48886093", "0.4841367", "0.48399496", "0.48384714" ]
0.74703014
0
Set the garbage collector.
def set_gc(self, gc): assert isinstance(gc, GarbageCollector) self._gc = gc
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def garbage_collectors(self, garbage_collectors):\n\n self._garbage_collectors = garbage_collectors", "def __init__(self, gc):\n self.gc = gc", "def device_gc():\n safe_call(backend.get().af_device_gc())", "def collect_garbage(self) -> None:\n pass", "def _run_garbage_collection():\n sleep_time = 0.5\n done = False\n while not done:\n collected = gc.collect(2)\n logger.info(\"{} objects collected\".format(collected))\n if collected:\n logger.info(\"Sleeping for {} seconds\".format(sleep_time))\n time.sleep(sleep_time)\n else:\n done = True", "def nomadgc():\n nomad.gc()", "def memory_free(self, memory_free: int):\r\n self._memory_free = memory_free", "def registerCollector(collector):\n assert isinstance(collector, Collector), \"collector=%r\" % (collector,)\n # store it in the global list and initiate a kill for anybody with the\n # same name\n if collector.name in COLLECTORS:\n col = COLLECTORS[collector.name]\n if col.proc is not None:\n LOG.error('%s still has a process (pid=%d) and is being reset,'\n ' terminating', col.name, col.proc.pid)\n col.shutdown()\n LOG.debug('Register collector : %s', collector.name)\n COLLECTORS[collector.name] = collector", "def gc_enable():\n raise NotImplementedError()", "def release(self):\n del self.ref\n self.ref = None\n gc.collect()", "def gc(self):\n if self.verbose:\n t0=time.time()\n self.data = None\n self.data_orig = None\n self.weights = None\n self.weights_orig = None\n if self.verbose:\n t1=time.time()\n print(\"Unload time: %0.2f s\" % (t1-t0))\n g.collect()", "def __del__(self):\n\n ipset.ipset_free(self.set)", "def __del__(self):\n self.release()", "def __del__(self):\n self.release()", "def __del__(self):\n self.release()", "def __del__(self):\n self.release()", "def __del__(self):\n self.release()", "def __del__(self):\n self.release()", "def gc_collect():\n\n # XXX Tests of GC and cleanup behavior are generally flaky and icky,\n # especially when you target all of Python 2.7, 3.5+ and PyPy. Their result\n # quickly depends on other tests, the arguments to the test runner and the\n # computer running the tests. This skips them all for now.\n raise unittest.SkipTest\n\n if platform.python_implementation() == \"PyPy\":\n # Since PyPy use garbage collection instead of reference counting\n # objects are not finalized before the next major GC collection.\n # Currently, the best way we have to ensure a major GC collection has\n # run is to call gc.collect() a number of times.\n [gc.collect() for _ in range(10)]\n else:\n gc.collect()", "def gc_disable():\n raise NotImplementedError()", "def yapasGarbageCollector(self):\r\n core.FW_conf['connection'].sendCommonMsg([0x00,0x00,0x10,0x38,0x00,0x06,0x00,0x01,0x01,0x5e,0x00,0x00]) #UI_FORCE_GARBAGE_COLLECTION_REQ\r\n if not core.FW_conf['connection'].recvMsg():\r\n raise GraniteConnectionException('Failed to receive UI_FORCE_GARBAGE_COLLECTION_RESP')", "def __del__(self):\r\n self.release()", "def __del__(self):\n self.shutdown()", "def __del__(self):\n\t\tself._pc.gid_clear()", "def gc(self):\n try:\n return self._gc\n except AttributeError:\n #if not self.is_flow and self.flow.gc: return self.flow.gc\n return None", "def gc(self):\n self._complete_grid = None", "def __del__(self):\n # Free the memory in the remote process's address space\n self.CleanUp()", "def free(self):\n pass", "def free(self):\n pass", "def release(self):\n self.__release_server_heap()\n if self.__cols is not None:\n for cname in self.__cols:\n del self.__dict__[cname]\n self.__fdata = None\n self.__cols = None\n self.__types = None\n self.__p_cols = None\n self.__p_types = None" ]
[ "0.72043836", "0.6208546", "0.61036986", "0.6040764", "0.5938195", "0.5892014", "0.56413096", "0.56100863", "0.55091774", "0.5441686", "0.5405389", "0.5392927", "0.5261957", "0.5261957", "0.5261957", "0.5261957", "0.5261957", "0.5261957", "0.52601856", "0.5254886", "0.524453", "0.5243451", "0.522404", "0.5191707", "0.5191053", "0.5172426", "0.51582164", "0.5153854", "0.5153854", "0.5120777" ]
0.7417656
0
Garbage collector. None if garbage collection is deactivated. Use flow.set_garbage_collector to initialize the object.
def gc(self): try: return self._gc except AttributeError: #if not self.is_flow and self.flow.gc: return self.flow.gc return None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def collect_garbage(self) -> None:\n pass", "def __init__(self, gc):\n self.gc = gc", "def set_gc(self, gc):\n assert isinstance(gc, GarbageCollector)\n self._gc = gc", "def device_gc():\n safe_call(backend.get().af_device_gc())", "def garbage_collectors(self, garbage_collectors):\n\n self._garbage_collectors = garbage_collectors", "def yapasGarbageCollector(self):\r\n core.FW_conf['connection'].sendCommonMsg([0x00,0x00,0x10,0x38,0x00,0x06,0x00,0x01,0x01,0x5e,0x00,0x00]) #UI_FORCE_GARBAGE_COLLECTION_REQ\r\n if not core.FW_conf['connection'].recvMsg():\r\n raise GraniteConnectionException('Failed to receive UI_FORCE_GARBAGE_COLLECTION_RESP')", "def gc(self):\n if self.verbose:\n t0=time.time()\n self.data = None\n self.data_orig = None\n self.weights = None\n self.weights_orig = None\n if self.verbose:\n t1=time.time()\n print(\"Unload time: %0.2f s\" % (t1-t0))\n g.collect()", "def _run_garbage_collection():\n sleep_time = 0.5\n done = False\n while not done:\n collected = gc.collect(2)\n logger.info(\"{} objects collected\".format(collected))\n if collected:\n logger.info(\"Sleeping for {} seconds\".format(sleep_time))\n time.sleep(sleep_time)\n else:\n done = True", "def yapasGarbageCollectorReq(self):\r\n if core.FW_conf['tracing_enabled']:\r\n core.FW_conf['trace'].yapasGarbageCollector()", "def nomadgc():\n nomad.gc()", "def gc_enable():\n raise NotImplementedError()", "def gc_collect_cycles():\n raise NotImplementedError()", "def collect_garbage(self, required_inputs):", "def __init__(self, allow_gc=None, use_cloop=False, callback=None,\r\n lazy=None, schedule=None):\r\n # Note: if more parameters are added to __init__, make sure to forward\r\n # them in the \"type(self)(...)\" call in the \"accept\" method below.\r\n if allow_gc is None:\r\n allow_gc = config.allow_gc\r\n self.fgraph = None\r\n self.allow_gc = allow_gc\r\n self.use_cloop = use_cloop\r\n self.callback = callback\r\n self.lazy = lazy\r\n self.updated_vars = {}\r\n if schedule:\r\n self.schedule = schedule", "def gc_collect():\n\n # XXX Tests of GC and cleanup behavior are generally flaky and icky,\n # especially when you target all of Python 2.7, 3.5+ and PyPy. Their result\n # quickly depends on other tests, the arguments to the test runner and the\n # computer running the tests. This skips them all for now.\n raise unittest.SkipTest\n\n if platform.python_implementation() == \"PyPy\":\n # Since PyPy use garbage collection instead of reference counting\n # objects are not finalized before the next major GC collection.\n # Currently, the best way we have to ensure a major GC collection has\n # run is to call gc.collect() a number of times.\n [gc.collect() for _ in range(10)]\n else:\n gc.collect()", "def gc_enabled():\n raise NotImplementedError()", "def collector(self) -> Collector:\n return self._collector", "def __del__(self):\n pass", "def __del__(self):\n pass", "def __del__(self):\n return", "def __init__(self):\n self._maxTime = 0\n self._activeHeap = []\n self._activeDict = {}\n self._graph = _Graph()", "def gc(self):\n self._complete_grid = None", "def gc_disable():\n raise NotImplementedError()", "def release(self):\n del self.ref\n self.ref = None\n gc.collect()", "def free(self):\n pass", "def free(self):\n pass", "def __del__(self):\n self.release()", "def __del__(self):\n self.release()", "def __del__(self):\n self.release()", "def __del__(self):\n self.release()" ]
[ "0.68191624", "0.669241", "0.64943933", "0.63076013", "0.6256203", "0.6121257", "0.61030954", "0.6062495", "0.5842175", "0.57076174", "0.57047695", "0.5668936", "0.5641246", "0.56166315", "0.55038536", "0.54899156", "0.5474436", "0.5462704", "0.5462704", "0.5421219", "0.54023755", "0.53925735", "0.5363085", "0.5347418", "0.5342831", "0.5342831", "0.53414774", "0.53414774", "0.53414774", "0.53414774" ]
0.7802732
0
Install the `EventHandlers for this `Node`. If no argument is provided the default list of handlers is installed.
def install_event_handlers(self, categories=None, handlers=None): if categories is not None and handlers is not None: raise ValueError("categories and handlers are mutually exclusive!") from .events import get_event_handler_classes if categories: raise NotImplementedError() handlers = [cls() for cls in get_event_handler_classes(categories=categories)] else: handlers = handlers or [cls() for cls in get_event_handler_classes()] self._event_handlers = handlers
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def addHandlers(self, handlers):\n self._eventHandlers.update(handlers)\n keys = self._eventHandlers.keys()\n pygame.event.set_allowed(keys)", "def setupInputEventHandlers(self):\n\n default.Script.setupInputEventHandlers(self)\n self.inputEventHandlers.update(\n self.structuralNavigation.inputEventHandlers)\n\n self.inputEventHandlers[\"sayAllHandler\"] = \\\n input_event.InputEventHandler(\n Script.sayAll,\n cmdnames.SAY_ALL)\n\n self.inputEventHandlers[\"panBrailleLeftHandler\"] = \\\n input_event.InputEventHandler(\n Script.panBrailleLeft,\n cmdnames.PAN_BRAILLE_LEFT,\n False) # Do not enable learn mode for this action\n\n self.inputEventHandlers[\"panBrailleRightHandler\"] = \\\n input_event.InputEventHandler(\n Script.panBrailleRight,\n cmdnames.PAN_BRAILLE_RIGHT,\n False) # Do not enable learn mode for this action", "def _register_handlers(self):\n DBG(\"\\nregister handlers\")\n for hook, handler in self.handlers:\n g.registerHandler(hook, handler)\n\n signal_manager.connect(self.c, 'body_changed', self._after_body_key)", "def register(self, events=[]):\n self.events = events\n if not self in manager.handler:\n manager.handler.append(self)", "def add_handlers(self, host_pattern, host_handlers):\n pass", "def load_handlers(self):\n\t\tself.handlers = []\n\t\tfor mod in os.listdir('classes/handlers'):\n\t\t\tif mod == '__init__.py' or mod[-3:] != '.py':\n\t\t\t\tcontinue\n\t\t\tlib = __import__(mod[:-3], locals(), globals())\n\t\t\tself.handlers.append(lib)\n\t\t#\n\t\tself.handlers.sort(key=lambda x: x.order, reverse=False)\n\t\tprint(\"Loaded handlers: \", ', '.join([x.tag for x in self.handlers]) )\n\t\tassert len(self.handlers)>0", "def _register_handlers(self):\n self.jm.register_handler(\"move_node\", self.move_node)\n self.jm.register_handler(\"copy_node\", self.copy_node)\n self.jm.register_handler(\"push_to_vospace\", self.push_to_vospace)\n self.jm.register_handler(\"push_from_vospace\", self.push_from_vospace)\n self.jm.register_handler(\"pull_to_vospace\", self.pull_to_vospace)\n self.jm.register_handler(\"pull_from_vospace\", self.pull_from_vospace)", "def _register_handlers(self):\n import handlers as th\n import inspect\n for name, class_type in inspect.getmembers(th, predicate=inspect.isclass):\n if class_type is th.ZMQTopicHandlerBase:\n continue\n handler = class_type()\n topic = handler.get_topic()\n if topic in self._topic_handlers:\n self._topic_handlers.append(handler)\n else:\n self._topic_handlers[topic] = [handler]", "def event_handlers(self):\n if self._event_handlers is not None:\n return self._event_handlers\n\n # Get event handlers for self\n ordered = []\n unordered = []\n cls = type(self)\n for cls_name in dir(cls):\n cls_item = getattr(cls, cls_name, None)\n if isinstance(cls_item, HandlerDecorator):\n bound_handler = getattr(self, cls_name)\n if cls_item.priority is not None:\n ordered.append((cls_item, bound_handler))\n else:\n unordered.append((cls_item, bound_handler))\n ordered.sort(key=lambda h: h[0].priority)\n\n # get parent event handlers\n try:\n parent = self.parent.acquire.event_handlers\n except AttributeError:\n parent = []\n\n # Combine, cache and return\n handlers = [*ordered, *unordered, *parent]\n self._event_handlers = handlers\n return handlers", "def fileHandlers(self, handlers):\n for handler in handlers:\n self.logger.addHandler(handler)", "def connect(self, handler, event=None):\n if event:\n self.event_handlers[event].append(handler)\n else:\n for event in self.event_handlers:\n if hasattr(handler, event):\n self.connect(getattr(handler, event), event)", "def handlers(self, handlers):\n return self._set_list_field(\"handlers\", handlers)", "def get_handlers(self):\n raise NotImplementedError()", "def _handlers(self):\n settings = self.get_settings(prefix='tangled.app.handler.')\n # System handler chain\n handlers = [settings['exc']]\n if self.has_any('static_directory'):\n # Only enable static file handler if there's at least one\n # local static directory registered.\n dirs = self.get_all('static_directory')\n if any(isinstance(d, LocalDirectory) for d in dirs):\n handlers.append(settings['static_files'])\n handlers.append(settings['tweaker'])\n handlers.append(settings['notifier'])\n handlers.append(settings['resource_finder'])\n if self.get_setting('csrf.enabled'):\n handlers.append(settings['csrf'])\n if 'auth' in settings:\n handlers.append(settings['auth'])\n # Handlers added by extensions and applications\n handlers += self.get_all(abcs.AHandler, [])\n if self.get_setting('cors.enabled'):\n handlers.append(settings['cors'])\n # Main handler\n handlers.append(settings['main'])\n # Wrap handlers\n wrapped_handlers = []\n next_handler = None\n for handler in reversed(handlers):\n handler = HandlerWrapper(handler, next_handler)\n wrapped_handlers.append(handler)\n next_handler = handler\n wrapped_handlers.reverse()\n return wrapped_handlers", "def setup_signal_handlers(self):\n signal.signal(signal.SIGUSR1, self.handle_logging_signal)\n signal.signal(signal.SIGUSR2, self.handle_logging_signal)", "def _initChangeHandlers(self, handlers):\n if hasattr(self, \"_changeHandlerSet\") :\n return\n if isinstance(handlers, BaseChangeHandler):\n self._changeHandlerSet = set([handlers])\n elif hasattr(handlers, '__iter__'):\n self._changeHandlerSet = set(\n [h for h in handlers if isinstance(h, BaseChangeHandler)])\n else: \n self._changeHandlerSet = set()", "def create_default_events(self):\n self.events.register_class(\"commands\", LineEvent)\n self.events.register_class(\"commands_out\", LineEvent)\n self.events.register_class(\"hooks\", HookEvent)", "def _update_handlers(self):\n handler_map = defaultdict(list)\n for i, obj in enumerate(self.handlers):\n for dummy, handler in inspect.getmembers(obj, callable):\n if not hasattr(handler, \"_pyxmpp_event_handled\"):\n continue\n # pylint: disable-msg=W0212\n event_class = handler._pyxmpp_event_handled\n handler_map[event_class].append( (i, handler) )\n self._handler_map = handler_map", "def get_handlers_for_event(self, event):\n pass # pragma: no cover", "def setup_logging(name, handlers=None, level=None):\n handlers = handlers or []\n log = logging.getLogger(name)\n if len(log.handlers) == 0 and not handlers:\n h = logging.NullHandler()\n log.addHandler(h)\n for h in handlers:\n log.addHandler(h)\n if level:\n log.setLevel(level)\n return log", "def register_handlers(dp, di_container: di.Container):\n general.router.register_handlers(dp)\n\n di_container.wire(packages=[sys.modules[__name__]])", "def import_handlers(self):\n if not self._import_handlers:\n self._initialize_handlers()\n\n return self._import_handlers", "def setup_signal_handlers():\n # type: () -> None\n for signum in [signal.SIGINT, signal.SIGTERM]:\n signal.signal(signum, log_and_exit_handler)\n\n signal.signal(signal.SIGUSR1, dump_thread_handler)", "async def setup(self):\n\t\tlogging.config.dictConfig(self.log_settings['log'])\n\t\tself.logger = logging.getLogger('Responder3')\n\t\tself.create_dir_strucutre()\n\n\t\tif 'handlers' in self.log_settings:\n\t\t\tasync for handlerclass, handler in self.get_handlers():\n\t\t\t\tawait self.start_extension(handlerclass, self.log_settings[self.log_settings['handlers'][handler]])", "def show_event_handlers(self, stream=sys.stdout, verbose=0):\n lines = [\"List of event handlers installed:\"]\n for handler in self.event_handlers:\n if verbose:\n lines.extend(handler.__class__.cls2str().split(\"\\n\"))\n else:\n lines.extend(str(handler).split(\"\\n\"))\n\n stream.write(\"\\n\".join(lines))\n stream.write(\"\\n\")", "def setup(self):\n\t\tif self.hasSignalModule and not self.signalsRegistered:\n\t\t\t# Jython does not support all signals, so we only use\n\t\t\t# the available ones\n\t\t\tsignals = ['SIGINT', 'SIGHUP', 'SIGABRT', 'SIGQUIT', 'SIGTERM']\n\t\t\timport signal\n\t\t\tfor sig in signals:\n\t\t\t\ttry:\n\t\t\t\t\tsignal.signal(getattr(signal, sig), self._shutdown)\n\t\t\t\t\tself.signalsRegistered.append(sig)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tLogger.Err(\"[!] monitoring.Signals._registerSignals:%s %s\\n\" % (sig, e))", "def u2handlers(self):\n return []", "def init_handlers(self, root_logger, default_stream='stderr'):\n\n if default_stream == 'stdout':\n default_stream = self.stdout\n elif default_stream == 'stderr':\n default_stream = self.stderr\n\n # default handler for display to terminal\n default_handler = TerminalHandler(self, strm=default_stream)\n if config.verbose_output:\n default_handler.setLevel(VERBOSE)\n else:\n default_handler.setLevel(INFO)\n # this handler ignores levels above INPUT\n default_handler.addFilter(MaxLevelFilter(INPUT))\n default_handler.setFormatter(\n TerminalFormatter(fmt=\"%(message)s%(newline)s\"))\n root_logger.addHandler(default_handler)\n\n # handler for level STDOUT\n output_handler = TerminalHandler(self, strm=self.stdout)\n output_handler.setLevel(STDOUT)\n output_handler.addFilter(MaxLevelFilter(STDOUT))\n output_handler.setFormatter(\n TerminalFormatter(fmt=\"%(message)s%(newline)s\"))\n root_logger.addHandler(output_handler)\n\n # handler for levels WARNING and higher\n warning_handler = TerminalHandler(self, strm=self.stderr)\n warning_handler.setLevel(logging.WARNING)\n warning_handler.setFormatter(\n TerminalFormatter(fmt=\"%(levelname)s: %(message)s%(newline)s\"))\n root_logger.addHandler(warning_handler)", "def register_handlers(path = EXPLOIT_FOLDER):\n\n exploit_folder = './{}/{}'.format(os.path.dirname(__file__), path)\n handlers = []\n\n for module in os.listdir(exploit_folder):\n\n if not module.endswith(\".py\") or module == \"__init__.py\":\n continue\n\n # Execute the script\n # We assume that each executed script registers himself to the handlers dictionary.\n try:\n execfile('./{}/{}'.format(path, module))\n except Exception as e:\n log.failure(\"Could not register handler '{}' : {}\".format(module, e))\n\n log.info(\"Registered {} handler(s).\".format(len(handlers)))\n for handler in handlers:\n\n handler_name = handler.__name__\n log.info(\"- Registered '{}' handler\".format(handler_name))\n\n return handlers", "def cacheHandlers(self):\n\n def collect_handlers(module):\n\n def wanted(member):\n return (isclass(member) and\n issubclass(member, handlers.HandlerBase) and\n member.__name__.endswith('Handler'))\n\n m = {}\n for name, obj in getmembers(module, wanted):\n m[name] = obj(self.skype)\n m[name].init()\n return m\n\n self.handlers = collect_handlers(handlers)\n if custom_handlers:\n self.handlers.update(collect_handlers(custom_handlers))" ]
[ "0.6414835", "0.6037302", "0.5909204", "0.59006244", "0.57668835", "0.57453233", "0.57121414", "0.5681006", "0.5673216", "0.5600916", "0.5572513", "0.5570844", "0.5547228", "0.5545235", "0.55310273", "0.5432351", "0.5400709", "0.53988016", "0.5386339", "0.53185946", "0.5306746", "0.5246401", "0.5237647", "0.522001", "0.51805776", "0.5146257", "0.51201403", "0.51196223", "0.5119045", "0.51187974" ]
0.7356163
0
Print to `stream` the event handlers installed for this flow.
def show_event_handlers(self, stream=sys.stdout, verbose=0): lines = ["List of event handlers installed:"] for handler in self.event_handlers: if verbose: lines.extend(handler.__class__.cls2str().split("\n")) else: lines.extend(str(handler).split("\n")) stream.write("\n".join(lines)) stream.write("\n")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_stream_handler(self):\n sh = logging.StreamHandler()\n sh.setFormatter(logging.Formatter(self.fmt, datefmt=self.date_fmt))\n self.addHandler(sh)", "def set_stream(self, stream):\n\n for handler in self.handlers[:]:\n if isinstance(handler, logging.StreamHandler):\n self.handlers.remove(handler)\n\n if stream is not None:\n stream_handler = logging.StreamHandler(stream)\n stream_handler.addFilter(_StreamHandlerEchoFilter())\n stream_handler.setFormatter(logging.Formatter('%(message)s'))\n self.addHandler(stream_handler)\n\n self.stream = stream", "def add_stream_to_event(self,stream):\n assert isinstance(stream,Stream)", "def to_event_stream(self, event_stream):\n while self.reading_log:\n event = self.read_event()\n if event is not None:\n event_stream.append(event)", "def printIns(self, stream):\n print(' ', str(self), file=stream)", "def _event(self, level=None, message=None):\n for i in eventhandlers:\n if level == 'write':\n i.write( object_definition=self, message=message )\n else:\n i.debug( object_definition=self, message=message )", "def do_input_events(self):\r\n for event in EventStream.allNext(self.streams):\r\n if self.handler.event(event) and self.unhandledHandler:\r\n self.unhandledHandler(event)", "def __show_all_events(self):\n for event in self.events_list:\n self.__print_events_info(event)\n print()", "async def _user_stream_event_listener(self):\n count = 0\n async for event_message in self._iter_user_event_queue():\n logging.info(event_message)\n if count > 5:\n return\n count += 1", "def stream(self, replay=False):\n self.__streaming = True\n try:\n yield (lambda e: self._publish(e, replay=replay))\n finally:\n self.__streaming = False", "def listen(self, debug=True):\n if len(self._handlers) == 0:\n warnings.warn('No event handler has been added.')\n\n events = self._events_fetcher()\n if debug and len(events) > 0:\n print events\n\n for e in events:\n if self._handlers.has_key(e):\n self._handlers[e]()", "def remove_handlers():\n handlers = []\n for handler in logging.root.handlers:\n if not isinstance(handler, logging.StreamHandler):\n handlers.append(handler)\n logging.root.handlers = handlers", "def set_stream(self, stream=sys.stdout):\n if self._have_logged:\n raise RuntimeError('Cannot configure after logging has started.')\n\n self._stream = stream", "def stream():\n return flask.Response(event_stream(flask.request.access_route[0]),\n mimetype='text/event-stream')", "def show_entries_stream():\n pass", "def init_handlers(self, root_logger, default_stream='stderr'):\n\n if default_stream == 'stdout':\n default_stream = self.stdout\n elif default_stream == 'stderr':\n default_stream = self.stderr\n\n # default handler for display to terminal\n default_handler = TerminalHandler(self, strm=default_stream)\n if config.verbose_output:\n default_handler.setLevel(VERBOSE)\n else:\n default_handler.setLevel(INFO)\n # this handler ignores levels above INPUT\n default_handler.addFilter(MaxLevelFilter(INPUT))\n default_handler.setFormatter(\n TerminalFormatter(fmt=\"%(message)s%(newline)s\"))\n root_logger.addHandler(default_handler)\n\n # handler for level STDOUT\n output_handler = TerminalHandler(self, strm=self.stdout)\n output_handler.setLevel(STDOUT)\n output_handler.addFilter(MaxLevelFilter(STDOUT))\n output_handler.setFormatter(\n TerminalFormatter(fmt=\"%(message)s%(newline)s\"))\n root_logger.addHandler(output_handler)\n\n # handler for levels WARNING and higher\n warning_handler = TerminalHandler(self, strm=self.stderr)\n warning_handler.setLevel(logging.WARNING)\n warning_handler.setFormatter(\n TerminalFormatter(fmt=\"%(levelname)s: %(message)s%(newline)s\"))\n root_logger.addHandler(warning_handler)", "def event_stream(self):\n for message in self.subscribe():\n event = message_to_sse(message[\"data\"])\n yield event", "def flush(self):\n\n for handler in self.handlers:\n handler.flush()", "def start_stream(self):\n pass", "def event_handlers(self):\n if self.is_flow:\n return self._event_handlers\n\n try:\n return self._event_handlers\n except AttributeError:\n return self.flow._event_handlers", "def __iter__(self):\n return _iterEvents(self._eventHandlers)", "def setOutputStream(self, stream):\n self.stream = stream", "def events(bot, event, *args):\n yield from _printEventList(bot, event)", "def fileHandlers(self, handlers):\n for handler in handlers:\n self.logger.addHandler(handler)", "def __logger_console(self):\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(self.__formatter)\n console_handler.setLevel(logging.INFO)\n self.__logger.addHandler(console_handler)", "def on(self):\n self._current_stream = self._stdout", "def on_connect(self):\n log.info(\"Stream connected\")", "def stream_created(self,stream):\n pass", "def stream_stream_event(behavior):\n return _MethodImplementation(\n cardinality.Cardinality.STREAM_STREAM, style.Service.EVENT, None, None,\n None, None, None, None, None, behavior)", "def stream(self, stream):\n\n self._stream = stream" ]
[ "0.61495626", "0.61448324", "0.5951412", "0.5731857", "0.5662934", "0.5529747", "0.5442177", "0.54411954", "0.5413522", "0.5389675", "0.5352938", "0.5295455", "0.52764785", "0.52754617", "0.5269171", "0.5219142", "0.52166575", "0.5141776", "0.5137113", "0.51314557", "0.5105221", "0.5088004", "0.50705534", "0.506929", "0.506498", "0.5061024", "0.5050374", "0.5035604", "0.5034685", "0.5016827" ]
0.81538296
0
Basename of the file.
def basename(self): return os.path.basename(self.filepath)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def basename(self):\n return get_basename(self.filename)", "def basename(self):\n return self.name.basename", "def get_file_basename(self):\n return self._basename[:]", "def basename(self) -> str:\n return self._basename", "def basename(self):\n return self._basename", "def get_basename(self):\n return self._basename", "def base_filename(self):\n return self.filename.split('.')[0]", "def base_name(self):\n return \".\".join(posixpath.basename(self.file_name).split(\".\")[:-1])", "def basename(self):\n return self._getbyspec(\"basename\")[0]", "def purebasename(self):\n return self.namebase", "def name(self) -> str:\n if '/' in self.path.strip('/'):\n basename: str = os.path.basename(self.path)\n return basename\n return self.path", "def filename(self):\n filename, ext = os.path.splitext(self.file.name)\n return filename.split('/')[-1]", "def getBaseName(filepath):\n return os.path.basename(filepath)", "def basename(file_path):\n return os.path.basename(file_path)", "def base(self):\n return os.path.basename(self.path)", "def get_basename(absolute_file_path):\r\n return absolute_file_path.split('/')[-1]", "def get_file_name(self):\n return self.path.name[6:]", "def getCurrentFileName(self):\n return os.path.basename(self.filePath)", "def GetFileName(self):\n return self.file.GetPath()", "def purebasename(self):\n return self._getbyspec(\"purebasename\")[0]", "def get_basename(file: Union[str, FileStorage]) -> str:\n filename = _retrieve_filename(file)\n # split will split at the final part of the path(image.jpg) and everything\n # before it is at index 0\n return os.path.split(filename)[1]", "def get_file_name(self):\n return str(self.get_file())", "def basename(self, filename):\n return filename.replace(self.remote_path, '', 1).lstrip(sep)", "def filename(self):\n # TODO(aron): write tests for this\n\n return os.path.basename(self.file_on_disk.name)", "def get_file_name(self):\n return self.upload.name[6:]", "def GcsBasename(path):\n return os.path.basename(path)", "def base_name(path):\n return os.path.basename(path)", "def basename(path: str) -> str:\n pass", "def get_file_name(self):\n return self.__file_name", "def getInputFileBasename(inputFilename = None):\n\n curInputFilename = getInputFilename()\n\n if inputFilename :\n curInputFilename = inputFilename\n\n # print \"curInputFilename=%s\"%(curInputFilename)\n inputBasename = getBasename(curInputFilename)\n # print \"inputBasename=%s\"%(inputBasename)\n return inputBasename" ]
[ "0.82301086", "0.798624", "0.79570544", "0.7948017", "0.7867619", "0.7821296", "0.77957064", "0.77522737", "0.7751202", "0.7596237", "0.7540688", "0.75255793", "0.750992", "0.74643725", "0.73821795", "0.7343527", "0.7268083", "0.7236085", "0.7234416", "0.7201531", "0.7180847", "0.7171925", "0.7140815", "0.7088621", "0.70869726", "0.7079834", "0.70371443", "0.7014952", "0.70014083", "0.69937474" ]
0.82075614
1
Add a node (usually Task) to the children of this FileNode.
def add_filechild(self, node): self._filechildren.append(node)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_child(self, node):\n self.children.append(node)", "def add_child(self, node):\n self.children.append(node)", "def add_child(self, node):\n self.children.append(node)", "def _add_child(self, node):\n self.children.update({\n node.name: node\n })\n node.path = self._sep.join([self.path, node.name])\n node.parent = self", "def add_node(self, node: Node) -> None:\n with scandir(node.path) as it:\n for entry in it:\n if entry.name.startswith('.') or entry.name.startswith('__'):\n continue\n if entry.is_dir():\n if len(node.children) > 50:\n pass\n else:\n node.children.append(Node(node, entry))\n else:\n node.files.append(entry)\n for child in node.children:\n self.add_node(child)\n if child.depth > self.depth:\n self.depth = child.depth", "def add_child(self, node):\n if isinstance(node, _Node):\n self.__children.append(node)\n else:\n raise ValueError(\"Please provide a valid node to append\")", "def add_child(self, node):\n if node not in self.children: #If the node isn't already a child of Node,\n self.children.append(node) #Add it to the end of the list of children", "def append_node(self, p_node):\n p_node.parent = self\n self.children.append(p_node)", "def add_children(self,node):\n\n node.parent_id = self.id\n node.level = self.level + 1\n node.path = node._create_path()\n node.save()", "def add_node(self, **kwargs):\n self._content.append(Node(**kwargs))", "def add_child(self, node):\n\n\t\tif Defaults == node.__class__:\n\t\t\tself.__defaults = node\n\t\telif Variables == node.__class__:\n\t\t\tself.__variables = node\n\t\telif Servers == node.__class__:\n\t\t\tself.__servers = node\n\t\telif FileSets == node.__class__:\n\t\t\tself.__filesets = node\n\t\telif Targets == node.__class__:\n\t\t\tself.__targets = node\n\t\telse:\n\t\t\traise DepFileParsingError()\n\n\t\treturn True", "def addChild(node):", "def add_child(self, nodo):\n if nodo not in self.children:\n self.children.append(nodo)", "def add_node(self, node):\n self.nodes.add(node)", "def addChild(self, node):\n self._children.append(node)\n self.updateDepth(node.depth)", "def add_node(self, node):\n self.nodes.append(node)", "def addNode (self, node):\n self.__nodes.add(node)", "def add_node(parent_name, child_name, node):\n if node.name == parent_name:\n return node.add(Node(child_name))\n else:\n for child in node.children:\n add_node(parent_name, child_name, child)", "def add_node(self, node):", "def add_child(self, child):\r\n self.children.append(child)", "def addNode(self, node: Node):\n self.nodes.append(node)", "def add(self, value):\n self.children.append(Node(value))", "def add_child(self, node):\n if self is node:\n parent_id = \"\"\n _nodeid=\"N_\"+str(0)\n else:\n if not issubclass(node.__class__, Node):\n raise TypeError(\"{}.add_child: arg «node»=«{}», type {} not valid.\".format(self.__class__.__name__, node, type(node)))\n self.childs.append(node)\n node.parent = self\n parent_id = self.TV.selection()[0]\n _nodeid=\"N_\"+str(self.node_count)\n # parent = self.rootnode.get_node_by_id(parent_id)\n # if parent is None:\n # return None\n\n # print(\"self.TV.insert node._nodeid\", node._nodeid)\n # print(\"self.TV.insert node.data\", node.data)\n \n self.TV.insert(parent_id, 'end', _nodeid, text=node.name)\n\n # parent_id = self.TreeView.selection()[0]\n # node_name = askstring(\"New Child\", prompt=\"Enter the node name\", initialvalue=\"\")\n # if not node_name:\n # node_name = \"no-name-node\"\n # # self.TV.insert(item, 'end', 'LC_'+str(self.TVleafref), \n # # text='Load case '+str(self.TVleafref))\n # #self.node_count += 1\n \n # self.TreeView.insert(parent_id, 'end', self._nodeid, text=self.name)\n\n return node", "def add_child(self, child):\r\n self.children.append(child)", "def add_node(self, node):\n self._nodes.add(node)", "def add_node(self, node):\n self._nodes.add(node)", "def add_node(self, node):\n self.nodes[node.name] = node\n self.dirty = True", "def AddNode(self, node):\n self.nodes.append(node)\n return node", "def addChild(self, node):\n if IElement.providedBy(node):\n node.parent = self\n self.children.append(node)\n return node", "def add_node(self, node) -> None:\n\t\tnode.nested = True\n\t\tsuper(Node, self).add_node(node)" ]
[ "0.7421095", "0.7421095", "0.7421095", "0.7351981", "0.7321027", "0.71562696", "0.70877254", "0.6899852", "0.68100643", "0.67405903", "0.6732843", "0.6729451", "0.66640645", "0.66608375", "0.6646832", "0.6642311", "0.65983003", "0.65927315", "0.6577577", "0.65509397", "0.6517182", "0.6477539", "0.6415489", "0.6411834", "0.63959366", "0.63959366", "0.63890666", "0.6377247", "0.6344195", "0.6343943" ]
0.8034507
0
List with the children (nodes) of this FileNode.
def filechildren(self): return self._filechildren
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def children(self):\n return list(self._children)", "def children(self) -> List[str]:\n return self._children", "def get_children(self):\n return []", "def get_children(self):\n\n pass", "def children(self):\n \n return self._children", "def children(self):\n return self._children", "def children(self):\n return self._children", "def get_children(self):\n std = self._std\n bld = self._bld\n cls = self.__class__\n\n root = self.get_sobj()\n cit = std.NewChildIterator(root)\n cit.InitEx(0)\n\n children = []\n while cit.More():\n node = cls(std, bld, cit.Value().GetID(), self)\n if node.is_alive():\n children.append(node)\n cit.Next()\n return children", "def get_children(self):\r\n return self._children", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\n return self.children", "def get_children(self):\r\n return self.children", "def get_children(self):\n return [node for node in self._children.values()]", "def get_children(self):\n return self._children", "def getChildren(self):\n return []", "def getChildren(self):\n \n return self._children", "def get_children(self):\n if not self.FileInfo:\n raise StopIteration(\"No children\")\n offset = self.offset_pad(self.FileInfo.obj_offset + self.ValueLength)\n return self._recurse_children(offset)", "def get_children(self):\n raise NotImplementedError()", "def GetChildren(self):\r\n\r\n return self._children", "def getchildren(self):\n return self.root.getchildren()", "def children(self):\n return self.contents", "def children(self):\n return self.contents", "def children(self):\n l = []\n n = self.node.firstChild\n while n:\n l.append(XmlWrapper(n))\n n = n.nextSibling\n return l", "def children(self):\n l = []\n n = self.node.firstChild\n while n:\n l.append(XmlWrapper(n))\n n = n.nextSibling\n return l", "def children(self) -> Iterable[Heirarchical]:\n return []", "def get_node_children(self, node):\n return node.children", "def get_children(self) -> typing.List[\"AbstractNode\"]:\n return list(self._children)", "def GetChildren( self ):\n children = [\n cWrpr \n for cWrpr in GameNodePath.GetChildren( self ) \n if not cWrpr.data.getPythonTag( TAG_IGNORE )\n ]\n return children", "def get_children(self):\n return self.children" ]
[ "0.7844669", "0.7693783", "0.7595761", "0.75668776", "0.7560855", "0.7555366", "0.7555366", "0.75442153", "0.75434875", "0.754045", "0.754045", "0.754045", "0.7537011", "0.7498799", "0.74978864", "0.74409", "0.741199", "0.74040014", "0.73515093", "0.7317118", "0.7292258", "0.728772", "0.728772", "0.728385", "0.728385", "0.72601575", "0.72377133", "0.7155889", "0.71017826", "0.7088351" ]
0.799188
0
Log 'msg % args' with the critical severity level
def critical(self, msg, *args, **kwargs): self._log("CRITICAL", msg, args, kwargs)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def critical(self, *args, **kwargs):\n self.msg(logging.CRITICAL, *args, **kwargs)", "def critical(self, msg, *args, **kwargs):\n self._logger.critical(msg, *args, **kwargs)", "def critical(self, msg, *args):\n if self.lvl<=logging.CRITICAL: return self._log(msg, *args)", "def critical(self, msg, *args, **kwargs):\n logger = self.__get_logger()\n logger.critical(msg, *args, **kwargs)", "def critical(msg):\n log_msg(CRITICAL, msg)", "def critical(message: str, *args: Any) -> None: # pragma: no cover\n Logger.log(logging.CRITICAL, message, *args)", "def critical(self, message, *args, **kwargs):\n\n self.logger.critical(message, *args, **kwargs)", "def logcritical(self, msg):\n self.logger.critical(msg)", "def critical(self, msg):\r\n self.logger.critical(msg)", "def critical(self, msg, *args, **kwargs):\n pass", "def critical(self, msg: str):\n self._logger.critical(msg)", "def critical(self, msg):\n self.__logger.critical(msg)", "def fatal(self, *args):\n self.mylog.critical(*args)\n sys.exit(1)", "def print_critical(self, message: str=\"\", src_file: str=\"\") -> None:\n if self._verbosity_level >= int(VerbosityLevel.VERBOSITY_LEVEL1):\n _mes = src_file + \": \" + message\n if self._print_statements_enabled:\n print(\"CRITICAL \\t- \", src_file + \": \\t\" + message)\n logging.critical(_mes)", "def exception(*messages):\n Logger.log('CRITICAL', *messages)", "def print_critical(msg):\n print('CRITICAL - %s' % (msg))\n sys.exit(2)", "def critical(self, *args, **kwargs):", "def critical(*args, **kwargs) -> None:\n # Assert that there are some active loggers\n assert Log.active_loggers\n\n for logger in Log.active_loggers:\n logger.critical(*args, **kwargs)", "def critical(\n self,\n msg,\n color=None,\n light=None\n ) -> None:\n self.write(msg, level=logging.CRITICAL, color=color, light=light)", "def critical(log):\n write(syslog.LOG_CRIT, 'critical', '{log}'.format(log=log))", "def critical(self, log_msg, tags=None):\n now = datetime.datetime.now()\n log_level = \"critical\"\n datestamp = self.create_datestamp(now)\n timestamp = self.create_timestamp(now)\n hrtimestemp = self.create_human_readable_timestamp(now)\n tags = json.dumps(self.tags)\n log_body = self.log_builder(log_level, hrtimestemp, datestamp, timestamp, log_msg, tags)\n self.logger.critical(log_body)", "def log_message(self, fmt, *args):\n pass", "def critical(self, message: str):\n self.log(Level.CRITICAL, message)", "def w(msg):\n logging.warning('##### %s' % repr(msg))", "def log_message(self, format, *args):", "def plain(self, *args):\n self.mylog.log(logging.INFO + 1, *args)", "def critical(self, msg):\n\n self(msg, CRITICAL)", "def report_critical(self, message, prefix='CRITICAL: '):\n if self.verbose > 50:\n return self\n if self.print_colors:\n self.print(_RED + prefix + _ENDC + str(message))\n else:\n self.print(prefix + str(message))", "def warning(self, *args, **kwargs):\n self.msg(logging.WARNING, *args, **kwargs)", "def log(self, level, msg, *args, **kwargs):\n pass" ]
[ "0.83006346", "0.7913172", "0.78587925", "0.7817943", "0.763236", "0.75481474", "0.7516018", "0.7500672", "0.7429482", "0.73415476", "0.727257", "0.71751595", "0.69875693", "0.6929796", "0.6856171", "0.6827632", "0.6792363", "0.6784813", "0.6758549", "0.6689472", "0.6678434", "0.6674557", "0.6655427", "0.65496594", "0.6478481", "0.6446599", "0.6429734", "0.640622", "0.64022857", "0.6399329" ]
0.812759
1
Save the id of the last node created.
def save_lastnode_id(): init_counter() with FileLock(_COUNTER_FILE): with AtomicFile(_COUNTER_FILE, mode="w") as fh: fh.write("%d\n" % _COUNTER)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __save_node(self):\n print(self._encoder.encode(self._current_node))\n self._count += 1", "def save_node(self, node: Node):", "def save_node(self):\n # save node in path2node\n if self.full_path in self.file.path2node:\n print \"** Error, created node with path twice:\\n%s\" % self.full_path\n traceback.print_stack()\n sys.exit(1)\n self.file.path2node[self.full_path] = self \n # save node in id_lookups\n id = self.sdef['id']\n ns = self.sdef['ns']\n type = self.sdef['type']\n custom = 'custom' in self.sdef and self.sdef['custom']\n if self.parent is None and self.sdef['df'] and not custom:\n # structure (not custom) created at top level, save in id_lookups\n if id not in self.file.id_lookups[ns]:\n print \"** Error: Unable to find id '%s' in id_lookups when saving node\" % id\n traceback.print_stack()\n sys.exit(1)\n if self.path not in self.file.id_lookups[ns][id]:\n print (\"** Error: Unable to find path '%s' in id_lookups when\"\n \" saving node %s\") % (self.path, id)\n print \"self.sdef['df'] is:\"\n pp.pprint (self.sdef['df'])\n traceback.print_stack()\n sys.exit(1)\n self.file.id_lookups[ns][id][self.path]['created'].append(self)\n # save node in all_nodes, either at top level (if no parent) or inside\n # mstats structure of parent node\n if self.parent is None:\n if self.path in self.file.all_nodes:\n self.file.all_nodes[self.path].append(self)\n else:\n self.file.all_nodes[self.path] = [self, ]\n else:\n if id not in self.parent.mstats:\n if custom:\n # custom node created, add id to mstats of parent\n self.parent.mstats[id] = { 'df': {}, 'type':type, 'ns': ns,\n 'created': [ self, ], 'qty':'?' }\n else:\n print \"** Error: Unable to find key '%s' in parent mstats\" % id\n print \"self.parent.mstats is\"\n pp.pprint (self.parent.mstats)\n traceback.print_stack()\n sys.exit(1)\n else: \n # append node to parent created mstats \n self.parent.mstats[id]['created'].append(self)", "def save(self, node):\n if node:\n nextId = node.nref.nodeId if node.nref else None\n record = dict(nextId=nextId, childId=node.childId, label=node.label)\n if not node.nodeId:\n node.nodeId = self.db.insert(self.tableName, record)\n self.save(node.pref)\n else:\n self.db.update(node.nodeId, self.tableName, record)", "def save(self)->None:\n item = database.cursor.fetchone()\n if item:\n self.id = item['id']\n database.connection.commit()", "def store_last_seen_id(last_seen_id, file_name):\n\n f_write = open(file_name, 'w')\n f_write.write(str(last_seen_id))\n f_write.close()\n return", "def getid_saveifneeded(self):\n #if (not hasattr(self,'id') or self.id == None):\n if (self.id == None):\n self.save()\n return self.id", "def newId():\n global lastId\n lastId += 1\n return 'id%d' % lastId", "def save(self):\n if not self.id:\n self.id = uuid4()\n DataStore.add_instance(self)", "def on_create(self):\n self.contentnode.make_content_id_unique()", "def persistent_id(self):\n return '{0}/{1}'.format(self.model_label(), self.id)", "def store_last_replied_id(last_replied_id, file):\n f = open(file, 'w')\n f.write(str(last_replied_id))\n f.close()\n return", "def last_node(self):\n return \"last_node\"", "def save_token(self):\n db.session.add(self)\n db.session.commit()", "def save(self, data):\n data['id'] = self.id\n\n self.db.append(data)", "def save_current_post(entry):\n return current.insert_one(entry).inserted_id", "def save_to_db(self):\n result = self.db.newsdb.insert_one({\"name\": self.name})\n self.id = str(result.inserted_id)", "def on_update(self):\n self.contentnode.make_content_id_unique()", "def save(self):\n\n data = super().save('name, type', self.name, self.type)\n\n self.id = data.get('id')\n return data", "def node_id(self) -> int:\r\n return self._node_id", "def save(self):\n if self.id is None:\n self._insert()\n else:\n self._update()", "def getLastObjectId(self):\n return self.objId", "def new_id(self):\n self.next += 1\n return self.next", "def save(self):\n if self.id:\n self.update()\n else:\n self.create()", "def record_node(self, elements: frozenset) -> int:\n logging.debug('get node id from elements %s', str(elements))\n if elements not in self.elems2node:\n logging.debug('get node id from elements %s. new node! %s', str(elements), self.next_id)\n logging.debug('Clusters =%s ', str(self.clusters))\n self.elems2node[elements] = self.next_id\n self.clusters[self.next_id] = elements\n if len(elements)>1:\n # print('element in elements=', [element for element in elements])\n # print(\"momentum =\", np.asarray([self.momentum[frozenset({elem})] for elem in elements]))\n self.momentum[elements]= sum(np.asarray([self.momentum[frozenset({elem})] for elem in elements])) # Add the momentum of the leaves that compose the node\n # self.invariant_mass[self.next_id] =\n # elif len(elements)==1:\n # self.momentum[elements]= self.leaves_momentum[list(elements)[0]]\n\n self.next_id += 1\n return self.next_id - 1\n else:\n return self.elems2node[elements]", "def node_create(self, parent, path):\n\n q = (\"insert into nodes (parent, path) \"\n \"values (?, ?)\")\n props = (parent, path)\n return self.execute(q, props).lastrowid", "def save (self):\n if self.newobj:\n using_sequence = self.sequence ()\n self.keyvals['id'] = using_sequence\n self.seq = using_sequence\n else:\n using_sequence = self.seq\n for key, val in self.keyvals.items ():\n r_key = self.prepare_key (key, using_sequence)\n r.set (r_key, val)\n self.keyvals = {}\n self.newobj = False", "def saveHeadId(self, headId):\n self.headId = headId\n record = dict(nextId=None, childId=headId, label='head pointer')\n self.db.update(1, self.tableName, record)", "def save(self, *args, **kwargs):\n if not self.id:\n self.last_msg_time = timezone.now()\n super(WeixinUser, self).save(*args, **kwargs)", "def last_id(self):\n rows = self.db.query(\"\"\"\n SELECT LAST_INSERT_ID() AS id\n \"\"\")\n for row in rows:\n return row['id']" ]
[ "0.6517922", "0.65173435", "0.64769125", "0.6426654", "0.614686", "0.6133911", "0.608231", "0.6079285", "0.60323066", "0.60263675", "0.6023729", "0.6021255", "0.5956886", "0.59333175", "0.5909313", "0.58732355", "0.58603567", "0.5828876", "0.57975936", "0.57755256", "0.57753974", "0.57546324", "0.5745675", "0.5734858", "0.57274026", "0.5718242", "0.56941617", "0.5683691", "0.56831276", "0.5623424" ]
0.723087
0